repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
rmax/scrapydo
scrapydo/api.py
https://github.com/rmax/scrapydo/blob/b0f9e6d50a5ea9d2ba8335bffa877003109c3af5/scrapydo/api.py#L130-L153
def _fetch_in_reactor(url, spider_cls=DefaultSpider, **kwargs): """Fetches an URL and returns the response. Parameters ---------- url : str An URL to fetch. spider_cls : scrapy.Spider (default: DefaultSpider) A spider class to be used in the crawler. kwargs : dict, optional Additional arguments to be passed to ``_run_spider_in_reactor``. Returns ------- crochet.EventualResult """ def parse(self, response): self.response = response req = Request(url) if isinstance(url, six.string_types) else url req.dont_filter = True req.meta['handle_httpstatus_all'] = True spider_cls = override_start_requests(spider_cls, [req], parse=parse) return _run_spider_in_reactor(spider_cls, **kwargs)
[ "def", "_fetch_in_reactor", "(", "url", ",", "spider_cls", "=", "DefaultSpider", ",", "*", "*", "kwargs", ")", ":", "def", "parse", "(", "self", ",", "response", ")", ":", "self", ".", "response", "=", "response", "req", "=", "Request", "(", "url", ")", "if", "isinstance", "(", "url", ",", "six", ".", "string_types", ")", "else", "url", "req", ".", "dont_filter", "=", "True", "req", ".", "meta", "[", "'handle_httpstatus_all'", "]", "=", "True", "spider_cls", "=", "override_start_requests", "(", "spider_cls", ",", "[", "req", "]", ",", "parse", "=", "parse", ")", "return", "_run_spider_in_reactor", "(", "spider_cls", ",", "*", "*", "kwargs", ")" ]
Fetches an URL and returns the response. Parameters ---------- url : str An URL to fetch. spider_cls : scrapy.Spider (default: DefaultSpider) A spider class to be used in the crawler. kwargs : dict, optional Additional arguments to be passed to ``_run_spider_in_reactor``. Returns ------- crochet.EventualResult
[ "Fetches", "an", "URL", "and", "returns", "the", "response", "." ]
python
train
neuropsychology/NeuroKit.py
neurokit/statistics/statistics.py
https://github.com/neuropsychology/NeuroKit.py/blob/c9589348fbbde0fa7e986048c48f38e6b488adfe/neurokit/statistics/statistics.py#L125-L165
def find_outliers(data, treshold=2.58): """ Identify outliers (abnormal values) using the standart deviation. Parameters ---------- data : list or ndarray Data array treshold : float Maximum deviation (in terms of standart deviation). Rule of thumb of a gaussian distribution: 2.58 = rejecting 1%, 2.33 = rejecting 2%, 1.96 = 5% and 1.28 = rejecting 10%. Returns ---------- outliers : ndarray A list of True/False with True being the outliers. Example ---------- >>> import neurokit as nk >>> outliers = nk.find_outliers([1, 2, 1, 5, 666, 4, 1 ,3, 5]) Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - numpy """ outliers = [] mean = np.mean(data) std = np.std(data) for i in data: if abs(i - mean)/std < treshold: outliers.append(False) else: outliers.append(True) outliers = np.array(outliers) return (outliers)
[ "def", "find_outliers", "(", "data", ",", "treshold", "=", "2.58", ")", ":", "outliers", "=", "[", "]", "mean", "=", "np", ".", "mean", "(", "data", ")", "std", "=", "np", ".", "std", "(", "data", ")", "for", "i", "in", "data", ":", "if", "abs", "(", "i", "-", "mean", ")", "/", "std", "<", "treshold", ":", "outliers", ".", "append", "(", "False", ")", "else", ":", "outliers", ".", "append", "(", "True", ")", "outliers", "=", "np", ".", "array", "(", "outliers", ")", "return", "(", "outliers", ")" ]
Identify outliers (abnormal values) using the standart deviation. Parameters ---------- data : list or ndarray Data array treshold : float Maximum deviation (in terms of standart deviation). Rule of thumb of a gaussian distribution: 2.58 = rejecting 1%, 2.33 = rejecting 2%, 1.96 = 5% and 1.28 = rejecting 10%. Returns ---------- outliers : ndarray A list of True/False with True being the outliers. Example ---------- >>> import neurokit as nk >>> outliers = nk.find_outliers([1, 2, 1, 5, 666, 4, 1 ,3, 5]) Notes ---------- *Authors* - `Dominique Makowski <https://dominiquemakowski.github.io/>`_ *Dependencies* - numpy
[ "Identify", "outliers", "(", "abnormal", "values", ")", "using", "the", "standart", "deviation", "." ]
python
train
chaoss/grimoirelab-perceval-mozilla
perceval/backends/mozilla/crates.py
https://github.com/chaoss/grimoirelab-perceval-mozilla/blob/4514f8d3d609d3cb79d83c72d51fcc4b4a7daeb4/perceval/backends/mozilla/crates.py#L91-L104
def fetch_items(self, category, **kwargs): """Fetch packages and summary from Crates.io :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] if category == CATEGORY_CRATES: return self.__fetch_crates(from_date) else: return self.__fetch_summary()
[ "def", "fetch_items", "(", "self", ",", "category", ",", "*", "*", "kwargs", ")", ":", "from_date", "=", "kwargs", "[", "'from_date'", "]", "if", "category", "==", "CATEGORY_CRATES", ":", "return", "self", ".", "__fetch_crates", "(", "from_date", ")", "else", ":", "return", "self", ".", "__fetch_summary", "(", ")" ]
Fetch packages and summary from Crates.io :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items
[ "Fetch", "packages", "and", "summary", "from", "Crates", ".", "io" ]
python
test
Workiva/furious
furious/extras/appengine/ndb_persistence.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/extras/appengine/ndb_persistence.py#L297-L307
def store_async_result(async_id, async_result): """Persist the Async's result to the datastore.""" logging.debug("Storing result for %s", async_id) key = FuriousAsyncMarker( id=async_id, result=json.dumps(async_result.to_dict()), status=async_result.status).put() logging.debug("Setting Async result %s using marker: %s.", async_result, key)
[ "def", "store_async_result", "(", "async_id", ",", "async_result", ")", ":", "logging", ".", "debug", "(", "\"Storing result for %s\"", ",", "async_id", ")", "key", "=", "FuriousAsyncMarker", "(", "id", "=", "async_id", ",", "result", "=", "json", ".", "dumps", "(", "async_result", ".", "to_dict", "(", ")", ")", ",", "status", "=", "async_result", ".", "status", ")", ".", "put", "(", ")", "logging", ".", "debug", "(", "\"Setting Async result %s using marker: %s.\"", ",", "async_result", ",", "key", ")" ]
Persist the Async's result to the datastore.
[ "Persist", "the", "Async", "s", "result", "to", "the", "datastore", "." ]
python
train
mattja/nsim
nsim/analyses1/freq.py
https://github.com/mattja/nsim/blob/ed62c41cd56b918fd97e09f7ad73c12c76a8c3e0/nsim/analyses1/freq.py#L207-L210
def hilbert_phase(ts): """Phase of the analytic signal, using the Hilbert transform""" output = np.angle(signal.hilbert(signal.detrend(ts, axis=0), axis=0)) return Timeseries(output, ts.tspan, labels=ts.labels)
[ "def", "hilbert_phase", "(", "ts", ")", ":", "output", "=", "np", ".", "angle", "(", "signal", ".", "hilbert", "(", "signal", ".", "detrend", "(", "ts", ",", "axis", "=", "0", ")", ",", "axis", "=", "0", ")", ")", "return", "Timeseries", "(", "output", ",", "ts", ".", "tspan", ",", "labels", "=", "ts", ".", "labels", ")" ]
Phase of the analytic signal, using the Hilbert transform
[ "Phase", "of", "the", "analytic", "signal", "using", "the", "Hilbert", "transform" ]
python
train
rbarrois/restricted_pkg
restricted_pkg/base.py
https://github.com/rbarrois/restricted_pkg/blob/abbd3cb33ed85af02fbb531fd85dda9c1b070c85/restricted_pkg/base.py#L78-L91
def full_url(self): """The full URL, including username/password.""" if self.needs_auth: netloc = '%s:%s@%s' % (self.username, self.password, self.netloc) else: netloc = self.netloc return urlunparse(( self.scheme, netloc, self.path, self.params, self.query, self.fragment, ))
[ "def", "full_url", "(", "self", ")", ":", "if", "self", ".", "needs_auth", ":", "netloc", "=", "'%s:%s@%s'", "%", "(", "self", ".", "username", ",", "self", ".", "password", ",", "self", ".", "netloc", ")", "else", ":", "netloc", "=", "self", ".", "netloc", "return", "urlunparse", "(", "(", "self", ".", "scheme", ",", "netloc", ",", "self", ".", "path", ",", "self", ".", "params", ",", "self", ".", "query", ",", "self", ".", "fragment", ",", ")", ")" ]
The full URL, including username/password.
[ "The", "full", "URL", "including", "username", "/", "password", "." ]
python
train
elastic/elasticsearch-py
elasticsearch/client/indices.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/indices.py#L988-L1015
def shrink(self, index, target, body=None, params=None): """ The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The number of primary shards in the target index must be a factor of the shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard. Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html>`_ :arg index: The name of the source index to shrink :arg target: The name of the target index to shrink into :arg body: The configuration for the target index (`settings` and `aliases`) :arg master_timeout: Specify timeout for connection to master :arg request_timeout: Explicit operation timeout :arg wait_for_active_shards: Set the number of active shards to wait for on the shrunken index before the operation returns. """ for param in (index, target): if param in SKIP_IN_PATH: raise ValueError("Empty value passed for a required argument.") return self.transport.perform_request( "PUT", _make_path(index, "_shrink", target), params=params, body=body )
[ "def", "shrink", "(", "self", ",", "index", ",", "target", ",", "body", "=", "None", ",", "params", "=", "None", ")", ":", "for", "param", "in", "(", "index", ",", "target", ")", ":", "if", "param", "in", "SKIP_IN_PATH", ":", "raise", "ValueError", "(", "\"Empty value passed for a required argument.\"", ")", "return", "self", ".", "transport", ".", "perform_request", "(", "\"PUT\"", ",", "_make_path", "(", "index", ",", "\"_shrink\"", ",", "target", ")", ",", "params", "=", "params", ",", "body", "=", "body", ")" ]
The shrink index API allows you to shrink an existing index into a new index with fewer primary shards. The number of primary shards in the target index must be a factor of the shards in the source index. For example an index with 8 primary shards can be shrunk into 4, 2 or 1 primary shards or an index with 15 primary shards can be shrunk into 5, 3 or 1. If the number of shards in the index is a prime number it can only be shrunk into a single primary shard. Before shrinking, a (primary or replica) copy of every shard in the index must be present on the same node. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-shrink-index.html>`_ :arg index: The name of the source index to shrink :arg target: The name of the target index to shrink into :arg body: The configuration for the target index (`settings` and `aliases`) :arg master_timeout: Specify timeout for connection to master :arg request_timeout: Explicit operation timeout :arg wait_for_active_shards: Set the number of active shards to wait for on the shrunken index before the operation returns.
[ "The", "shrink", "index", "API", "allows", "you", "to", "shrink", "an", "existing", "index", "into", "a", "new", "index", "with", "fewer", "primary", "shards", ".", "The", "number", "of", "primary", "shards", "in", "the", "target", "index", "must", "be", "a", "factor", "of", "the", "shards", "in", "the", "source", "index", ".", "For", "example", "an", "index", "with", "8", "primary", "shards", "can", "be", "shrunk", "into", "4", "2", "or", "1", "primary", "shards", "or", "an", "index", "with", "15", "primary", "shards", "can", "be", "shrunk", "into", "5", "3", "or", "1", ".", "If", "the", "number", "of", "shards", "in", "the", "index", "is", "a", "prime", "number", "it", "can", "only", "be", "shrunk", "into", "a", "single", "primary", "shard", ".", "Before", "shrinking", "a", "(", "primary", "or", "replica", ")", "copy", "of", "every", "shard", "in", "the", "index", "must", "be", "present", "on", "the", "same", "node", ".", "<http", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "indices", "-", "shrink", "-", "index", ".", "html", ">", "_" ]
python
train
saltstack/salt
salt/modules/inspectlib/query.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/inspectlib/query.py#L195-L211
def _configuration(self, *args, **kwargs): ''' Return configuration files. ''' data = dict() self.db.open() for pkg in self.db.get(Package): configs = list() for pkg_cfg in self.db.get(PackageCfgFile, eq={'pkgid': pkg.id}): configs.append(pkg_cfg.path) data[pkg.name] = configs if not data: raise InspectorQueryException("No inspected configuration yet available.") return data
[ "def", "_configuration", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "data", "=", "dict", "(", ")", "self", ".", "db", ".", "open", "(", ")", "for", "pkg", "in", "self", ".", "db", ".", "get", "(", "Package", ")", ":", "configs", "=", "list", "(", ")", "for", "pkg_cfg", "in", "self", ".", "db", ".", "get", "(", "PackageCfgFile", ",", "eq", "=", "{", "'pkgid'", ":", "pkg", ".", "id", "}", ")", ":", "configs", ".", "append", "(", "pkg_cfg", ".", "path", ")", "data", "[", "pkg", ".", "name", "]", "=", "configs", "if", "not", "data", ":", "raise", "InspectorQueryException", "(", "\"No inspected configuration yet available.\"", ")", "return", "data" ]
Return configuration files.
[ "Return", "configuration", "files", "." ]
python
train
pygeobuf/pygeobuf
geobuf/scripts/cli.py
https://github.com/pygeobuf/pygeobuf/blob/c9e055ab47532781626cfe2c931a8444820acf05/geobuf/scripts/cli.py#L65-L78
def decode(): """Given a Geobuf byte string on stdin, write a GeoJSON feature collection to stdout.""" logger = logging.getLogger('geobuf') stdin = click.get_binary_stream('stdin') sink = click.get_text_stream('stdout') try: pbf = stdin.read() data = geobuf.decode(pbf) json.dump(data, sink) sys.exit(0) except Exception: logger.exception("Failed. Exception caught") sys.exit(1)
[ "def", "decode", "(", ")", ":", "logger", "=", "logging", ".", "getLogger", "(", "'geobuf'", ")", "stdin", "=", "click", ".", "get_binary_stream", "(", "'stdin'", ")", "sink", "=", "click", ".", "get_text_stream", "(", "'stdout'", ")", "try", ":", "pbf", "=", "stdin", ".", "read", "(", ")", "data", "=", "geobuf", ".", "decode", "(", "pbf", ")", "json", ".", "dump", "(", "data", ",", "sink", ")", "sys", ".", "exit", "(", "0", ")", "except", "Exception", ":", "logger", ".", "exception", "(", "\"Failed. Exception caught\"", ")", "sys", ".", "exit", "(", "1", ")" ]
Given a Geobuf byte string on stdin, write a GeoJSON feature collection to stdout.
[ "Given", "a", "Geobuf", "byte", "string", "on", "stdin", "write", "a", "GeoJSON", "feature", "collection", "to", "stdout", "." ]
python
train
HewlettPackard/python-hpOneView
hpOneView/resources/servers/server_profiles.py
https://github.com/HewlettPackard/python-hpOneView/blob/3c6219723ef25e6e0c83d44a89007f89bc325b89/hpOneView/resources/servers/server_profiles.py#L302-L320
def get_available_targets(self, **kwargs): """ Retrieves a list of the target servers and empty device bays that are available for assignment to the server profile. Args: enclosureGroupUri (str): The URI of the enclosure group associated with the resource. serverHardwareTypeUri (str): The URI of the server hardware type associated with the resource. profileUri (str): The URI of the server profile associated with the resource. scopeUris (str): An expression to restrict the resources returned according to the scopes to which they are assigned. filter (list or str): A general filter/query string to narrow the list of items returned. The default is no filter, all resources are returned. Returns: list: List of available servers and bays. """ uri = self._helper.build_uri_with_query_string(kwargs, '/available-targets') return self._helper.do_get(uri)
[ "def", "get_available_targets", "(", "self", ",", "*", "*", "kwargs", ")", ":", "uri", "=", "self", ".", "_helper", ".", "build_uri_with_query_string", "(", "kwargs", ",", "'/available-targets'", ")", "return", "self", ".", "_helper", ".", "do_get", "(", "uri", ")" ]
Retrieves a list of the target servers and empty device bays that are available for assignment to the server profile. Args: enclosureGroupUri (str): The URI of the enclosure group associated with the resource. serverHardwareTypeUri (str): The URI of the server hardware type associated with the resource. profileUri (str): The URI of the server profile associated with the resource. scopeUris (str): An expression to restrict the resources returned according to the scopes to which they are assigned. filter (list or str): A general filter/query string to narrow the list of items returned. The default is no filter, all resources are returned. Returns: list: List of available servers and bays.
[ "Retrieves", "a", "list", "of", "the", "target", "servers", "and", "empty", "device", "bays", "that", "are", "available", "for", "assignment", "to", "the", "server", "profile", "." ]
python
train
kyuupichan/aiorpcX
aiorpcx/jsonrpc.py
https://github.com/kyuupichan/aiorpcX/blob/707c989ed1c67ac9a40cd20b0161b1ce1f4d7db0/aiorpcx/jsonrpc.py#L330-L336
def encode_payload(cls, payload): '''Encode a Python object as JSON and convert it to bytes.''' try: return json.dumps(payload).encode() except TypeError: msg = f'JSON payload encoding error: {payload}' raise ProtocolError(cls.INTERNAL_ERROR, msg) from None
[ "def", "encode_payload", "(", "cls", ",", "payload", ")", ":", "try", ":", "return", "json", ".", "dumps", "(", "payload", ")", ".", "encode", "(", ")", "except", "TypeError", ":", "msg", "=", "f'JSON payload encoding error: {payload}'", "raise", "ProtocolError", "(", "cls", ".", "INTERNAL_ERROR", ",", "msg", ")", "from", "None" ]
Encode a Python object as JSON and convert it to bytes.
[ "Encode", "a", "Python", "object", "as", "JSON", "and", "convert", "it", "to", "bytes", "." ]
python
train
BlueBrain/nat
nat/zotero_wrap.py
https://github.com/BlueBrain/nat/blob/0934f06e48e6efedf55a9617b15becae0d7b277c/nat/zotero_wrap.py#L70-L79
def create_distant_reference(self, ref_data): """Validate and create the reference in Zotero and return the created item.""" self.validate_reference_data(ref_data) creation_status = self._zotero_lib.create_items([ref_data]) try: created_item = creation_status["successful"]["0"] return created_item except KeyError as e: print(creation_status) raise CreateZoteroItemError from e
[ "def", "create_distant_reference", "(", "self", ",", "ref_data", ")", ":", "self", ".", "validate_reference_data", "(", "ref_data", ")", "creation_status", "=", "self", ".", "_zotero_lib", ".", "create_items", "(", "[", "ref_data", "]", ")", "try", ":", "created_item", "=", "creation_status", "[", "\"successful\"", "]", "[", "\"0\"", "]", "return", "created_item", "except", "KeyError", "as", "e", ":", "print", "(", "creation_status", ")", "raise", "CreateZoteroItemError", "from", "e" ]
Validate and create the reference in Zotero and return the created item.
[ "Validate", "and", "create", "the", "reference", "in", "Zotero", "and", "return", "the", "created", "item", "." ]
python
train
pydata/xarray
xarray/core/formatting.py
https://github.com/pydata/xarray/blob/6d93a95d05bdbfc33fff24064f67d29dd891ab58/xarray/core/formatting.py#L139-L150
def format_item(x, timedelta_format=None, quote_strings=True): """Returns a succinct summary of an object as a string""" if isinstance(x, (np.datetime64, datetime)): return format_timestamp(x) if isinstance(x, (np.timedelta64, timedelta)): return format_timedelta(x, timedelta_format=timedelta_format) elif isinstance(x, (str, bytes)): return repr(x) if quote_strings else x elif isinstance(x, (float, np.float)): return '{0:.4}'.format(x) else: return str(x)
[ "def", "format_item", "(", "x", ",", "timedelta_format", "=", "None", ",", "quote_strings", "=", "True", ")", ":", "if", "isinstance", "(", "x", ",", "(", "np", ".", "datetime64", ",", "datetime", ")", ")", ":", "return", "format_timestamp", "(", "x", ")", "if", "isinstance", "(", "x", ",", "(", "np", ".", "timedelta64", ",", "timedelta", ")", ")", ":", "return", "format_timedelta", "(", "x", ",", "timedelta_format", "=", "timedelta_format", ")", "elif", "isinstance", "(", "x", ",", "(", "str", ",", "bytes", ")", ")", ":", "return", "repr", "(", "x", ")", "if", "quote_strings", "else", "x", "elif", "isinstance", "(", "x", ",", "(", "float", ",", "np", ".", "float", ")", ")", ":", "return", "'{0:.4}'", ".", "format", "(", "x", ")", "else", ":", "return", "str", "(", "x", ")" ]
Returns a succinct summary of an object as a string
[ "Returns", "a", "succinct", "summary", "of", "an", "object", "as", "a", "string" ]
python
train
bcbio/bcbio-nextgen
bcbio/illumina/demultiplex.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/illumina/demultiplex.py#L11-L30
def run_bcl2fastq(run_folder, ss_csv, config): """Run bcl2fastq for de-multiplexing and fastq generation. run_folder -- directory of Illumina outputs ss_csv -- Samplesheet CSV file describing samples. """ bc_dir = os.path.join(run_folder, "Data", "Intensities", "BaseCalls") output_dir = os.path.join(run_folder, "fastq") if not os.path.exists(os.path.join(output_dir, "Makefile")): subprocess.check_call(["configureBclToFastq.pl", "--no-eamss", "--input-dir", bc_dir, "--output-dir", output_dir, "--sample-sheet", ss_csv]) with utils.chdir(output_dir): cores = str(utils.get_in(config, ("algorithm", "num_cores"), 1)) cmd = ["make", "-j", cores] if "submit_cmd" in config["process"] and "bcl2fastq_batch" in config["process"]: _submit_and_wait(cmd, cores, config, output_dir) else: subprocess.check_call(cmd) return output_dir
[ "def", "run_bcl2fastq", "(", "run_folder", ",", "ss_csv", ",", "config", ")", ":", "bc_dir", "=", "os", ".", "path", ".", "join", "(", "run_folder", ",", "\"Data\"", ",", "\"Intensities\"", ",", "\"BaseCalls\"", ")", "output_dir", "=", "os", ".", "path", ".", "join", "(", "run_folder", ",", "\"fastq\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"Makefile\"", ")", ")", ":", "subprocess", ".", "check_call", "(", "[", "\"configureBclToFastq.pl\"", ",", "\"--no-eamss\"", ",", "\"--input-dir\"", ",", "bc_dir", ",", "\"--output-dir\"", ",", "output_dir", ",", "\"--sample-sheet\"", ",", "ss_csv", "]", ")", "with", "utils", ".", "chdir", "(", "output_dir", ")", ":", "cores", "=", "str", "(", "utils", ".", "get_in", "(", "config", ",", "(", "\"algorithm\"", ",", "\"num_cores\"", ")", ",", "1", ")", ")", "cmd", "=", "[", "\"make\"", ",", "\"-j\"", ",", "cores", "]", "if", "\"submit_cmd\"", "in", "config", "[", "\"process\"", "]", "and", "\"bcl2fastq_batch\"", "in", "config", "[", "\"process\"", "]", ":", "_submit_and_wait", "(", "cmd", ",", "cores", ",", "config", ",", "output_dir", ")", "else", ":", "subprocess", ".", "check_call", "(", "cmd", ")", "return", "output_dir" ]
Run bcl2fastq for de-multiplexing and fastq generation. run_folder -- directory of Illumina outputs ss_csv -- Samplesheet CSV file describing samples.
[ "Run", "bcl2fastq", "for", "de", "-", "multiplexing", "and", "fastq", "generation", ".", "run_folder", "--", "directory", "of", "Illumina", "outputs", "ss_csv", "--", "Samplesheet", "CSV", "file", "describing", "samples", "." ]
python
train
pmneila/morphsnakes
morphsnakes_v1.py
https://github.com/pmneila/morphsnakes/blob/aab66e70f86308d7b1927d76869a1a562120f849/morphsnakes_v1.py#L74-L91
def operator_si(u): """operator_si operator.""" global _aux if np.ndim(u) == 2: P = _P2 elif np.ndim(u) == 3: P = _P3 else: raise ValueError("u has an invalid number of dimensions " "(should be 2 or 3)") if u.shape != _aux.shape[1:]: _aux = np.zeros((len(P),) + u.shape) for _aux_i, P_i in zip(_aux, P): _aux_i[:] = binary_erosion(u, P_i) return _aux.max(0)
[ "def", "operator_si", "(", "u", ")", ":", "global", "_aux", "if", "np", ".", "ndim", "(", "u", ")", "==", "2", ":", "P", "=", "_P2", "elif", "np", ".", "ndim", "(", "u", ")", "==", "3", ":", "P", "=", "_P3", "else", ":", "raise", "ValueError", "(", "\"u has an invalid number of dimensions \"", "\"(should be 2 or 3)\"", ")", "if", "u", ".", "shape", "!=", "_aux", ".", "shape", "[", "1", ":", "]", ":", "_aux", "=", "np", ".", "zeros", "(", "(", "len", "(", "P", ")", ",", ")", "+", "u", ".", "shape", ")", "for", "_aux_i", ",", "P_i", "in", "zip", "(", "_aux", ",", "P", ")", ":", "_aux_i", "[", ":", "]", "=", "binary_erosion", "(", "u", ",", "P_i", ")", "return", "_aux", ".", "max", "(", "0", ")" ]
operator_si operator.
[ "operator_si", "operator", "." ]
python
train
rflamary/POT
ot/dr.py
https://github.com/rflamary/POT/blob/c5108efc7b6702e1af3928bef1032e6b37734d1c/ot/dr.py#L25-L35
def sinkhorn(w1, w2, M, reg, k): """Sinkhorn algorithm with fixed number of iteration (autograd) """ K = np.exp(-M / reg) ui = np.ones((M.shape[0],)) vi = np.ones((M.shape[1],)) for i in range(k): vi = w2 / (np.dot(K.T, ui)) ui = w1 / (np.dot(K, vi)) G = ui.reshape((M.shape[0], 1)) * K * vi.reshape((1, M.shape[1])) return G
[ "def", "sinkhorn", "(", "w1", ",", "w2", ",", "M", ",", "reg", ",", "k", ")", ":", "K", "=", "np", ".", "exp", "(", "-", "M", "/", "reg", ")", "ui", "=", "np", ".", "ones", "(", "(", "M", ".", "shape", "[", "0", "]", ",", ")", ")", "vi", "=", "np", ".", "ones", "(", "(", "M", ".", "shape", "[", "1", "]", ",", ")", ")", "for", "i", "in", "range", "(", "k", ")", ":", "vi", "=", "w2", "/", "(", "np", ".", "dot", "(", "K", ".", "T", ",", "ui", ")", ")", "ui", "=", "w1", "/", "(", "np", ".", "dot", "(", "K", ",", "vi", ")", ")", "G", "=", "ui", ".", "reshape", "(", "(", "M", ".", "shape", "[", "0", "]", ",", "1", ")", ")", "*", "K", "*", "vi", ".", "reshape", "(", "(", "1", ",", "M", ".", "shape", "[", "1", "]", ")", ")", "return", "G" ]
Sinkhorn algorithm with fixed number of iteration (autograd)
[ "Sinkhorn", "algorithm", "with", "fixed", "number", "of", "iteration", "(", "autograd", ")" ]
python
train
hitchtest/hitchserve
hitchserve/service_bundle.py
https://github.com/hitchtest/hitchserve/blob/a2def19979264186d283e76f7f0c88f3ed97f2e0/hitchserve/service_bundle.py#L261-L285
def time_travel(self, datetime=None, timedelta=None, seconds=0, minutes=0, hours=0, days=0): """Mock moving forward or backward in time by shifting the system clock fed to the services tested. Note that all of these arguments can be used together, individually or not at all. The time traveled to will be the sum of all specified time deltas from datetime. If no datetime is specified, the deltas will be added to the current time. Args: datetime (Optional[datetime]): Time travel to specific datetime. timedelta (Optional[timedelta]): Time travel to 'timedelta' from now. seconds (Optional[number]): Time travel 'seconds' seconds from now. minutes (Optional[number]): Time travel 'minutes' minutes from now. hours (Optional[number]): Time travel 'hours' hours from now. days (Optional[number]): Time travel 'days' days from now. """ if datetime is not None: self.timedelta = datetime - python_datetime.now() if timedelta is not None: self.timedelta = self.timedelta + timedelta self.timedelta = self.timedelta + python_timedelta(seconds=seconds) self.timedelta = self.timedelta + python_timedelta(minutes=minutes) self.timedelta = self.timedelta + python_timedelta(hours=hours) self.timedelta = self.timedelta + python_timedelta(days=days) log("Time traveling to {}\n".format(humanize.naturaltime(self.now()))) faketime.change_time(self.hitch_dir.faketime(), self.now())
[ "def", "time_travel", "(", "self", ",", "datetime", "=", "None", ",", "timedelta", "=", "None", ",", "seconds", "=", "0", ",", "minutes", "=", "0", ",", "hours", "=", "0", ",", "days", "=", "0", ")", ":", "if", "datetime", "is", "not", "None", ":", "self", ".", "timedelta", "=", "datetime", "-", "python_datetime", ".", "now", "(", ")", "if", "timedelta", "is", "not", "None", ":", "self", ".", "timedelta", "=", "self", ".", "timedelta", "+", "timedelta", "self", ".", "timedelta", "=", "self", ".", "timedelta", "+", "python_timedelta", "(", "seconds", "=", "seconds", ")", "self", ".", "timedelta", "=", "self", ".", "timedelta", "+", "python_timedelta", "(", "minutes", "=", "minutes", ")", "self", ".", "timedelta", "=", "self", ".", "timedelta", "+", "python_timedelta", "(", "hours", "=", "hours", ")", "self", ".", "timedelta", "=", "self", ".", "timedelta", "+", "python_timedelta", "(", "days", "=", "days", ")", "log", "(", "\"Time traveling to {}\\n\"", ".", "format", "(", "humanize", ".", "naturaltime", "(", "self", ".", "now", "(", ")", ")", ")", ")", "faketime", ".", "change_time", "(", "self", ".", "hitch_dir", ".", "faketime", "(", ")", ",", "self", ".", "now", "(", ")", ")" ]
Mock moving forward or backward in time by shifting the system clock fed to the services tested. Note that all of these arguments can be used together, individually or not at all. The time traveled to will be the sum of all specified time deltas from datetime. If no datetime is specified, the deltas will be added to the current time. Args: datetime (Optional[datetime]): Time travel to specific datetime. timedelta (Optional[timedelta]): Time travel to 'timedelta' from now. seconds (Optional[number]): Time travel 'seconds' seconds from now. minutes (Optional[number]): Time travel 'minutes' minutes from now. hours (Optional[number]): Time travel 'hours' hours from now. days (Optional[number]): Time travel 'days' days from now.
[ "Mock", "moving", "forward", "or", "backward", "in", "time", "by", "shifting", "the", "system", "clock", "fed", "to", "the", "services", "tested", "." ]
python
train
CTPUG/wafer
wafer/registration/sso.py
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/registration/sso.py#L24-L41
def sso(user, desired_username, name, email, profile_fields=None): """ Create a user, if the provided `user` is None, from the parameters. Then log the user in, and return it. """ if not user: if not settings.REGISTRATION_OPEN: raise SSOError('Account registration is closed') user = _create_desired_user(desired_username) _configure_user(user, name, email, profile_fields) if not user.is_active: raise SSOError('Account disabled') # login() expects the logging in backend to be set on the user. # We are bypassing login, so fake it. user.backend = settings.AUTHENTICATION_BACKENDS[0] return user
[ "def", "sso", "(", "user", ",", "desired_username", ",", "name", ",", "email", ",", "profile_fields", "=", "None", ")", ":", "if", "not", "user", ":", "if", "not", "settings", ".", "REGISTRATION_OPEN", ":", "raise", "SSOError", "(", "'Account registration is closed'", ")", "user", "=", "_create_desired_user", "(", "desired_username", ")", "_configure_user", "(", "user", ",", "name", ",", "email", ",", "profile_fields", ")", "if", "not", "user", ".", "is_active", ":", "raise", "SSOError", "(", "'Account disabled'", ")", "# login() expects the logging in backend to be set on the user.", "# We are bypassing login, so fake it.", "user", ".", "backend", "=", "settings", ".", "AUTHENTICATION_BACKENDS", "[", "0", "]", "return", "user" ]
Create a user, if the provided `user` is None, from the parameters. Then log the user in, and return it.
[ "Create", "a", "user", "if", "the", "provided", "user", "is", "None", "from", "the", "parameters", ".", "Then", "log", "the", "user", "in", "and", "return", "it", "." ]
python
train
dh1tw/pyhamtools
pyhamtools/locator.py
https://github.com/dh1tw/pyhamtools/blob/ee7e4b8732e23c298da10e07163748156c16d0fa/pyhamtools/locator.py#L128-L167
def calculate_distance(locator1, locator2): """calculates the (shortpath) distance between two Maidenhead locators Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Distance in km Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the distance between two Maidenhead locators in km >>> from pyhamtools.locator import calculate_distance >>> calculate_distance("JN48QM", "QF67bf") 16466.413 """ R = 6371 #earh radius lat1, long1 = locator_to_latlong(locator1) lat2, long2 = locator_to_latlong(locator2) d_lat = radians(lat2) - radians(lat1) d_long = radians(long2) - radians(long1) r_lat1 = radians(lat1) r_long1 = radians(long1) r_lat2 = radians(lat2) r_long2 = radians(long2) a = sin(d_lat/2) * sin(d_lat/2) + cos(r_lat1) * cos(r_lat2) * sin(d_long/2) * sin(d_long/2) c = 2 * atan2(sqrt(a), sqrt(1-a)) d = R * c #distance in km return d;
[ "def", "calculate_distance", "(", "locator1", ",", "locator2", ")", ":", "R", "=", "6371", "#earh radius", "lat1", ",", "long1", "=", "locator_to_latlong", "(", "locator1", ")", "lat2", ",", "long2", "=", "locator_to_latlong", "(", "locator2", ")", "d_lat", "=", "radians", "(", "lat2", ")", "-", "radians", "(", "lat1", ")", "d_long", "=", "radians", "(", "long2", ")", "-", "radians", "(", "long1", ")", "r_lat1", "=", "radians", "(", "lat1", ")", "r_long1", "=", "radians", "(", "long1", ")", "r_lat2", "=", "radians", "(", "lat2", ")", "r_long2", "=", "radians", "(", "long2", ")", "a", "=", "sin", "(", "d_lat", "/", "2", ")", "*", "sin", "(", "d_lat", "/", "2", ")", "+", "cos", "(", "r_lat1", ")", "*", "cos", "(", "r_lat2", ")", "*", "sin", "(", "d_long", "/", "2", ")", "*", "sin", "(", "d_long", "/", "2", ")", "c", "=", "2", "*", "atan2", "(", "sqrt", "(", "a", ")", ",", "sqrt", "(", "1", "-", "a", ")", ")", "d", "=", "R", "*", "c", "#distance in km", "return", "d" ]
calculates the (shortpath) distance between two Maidenhead locators Args: locator1 (string): Locator, either 4 or 6 characters locator2 (string): Locator, either 4 or 6 characters Returns: float: Distance in km Raises: ValueError: When called with wrong or invalid input arg AttributeError: When args are not a string Example: The following calculates the distance between two Maidenhead locators in km >>> from pyhamtools.locator import calculate_distance >>> calculate_distance("JN48QM", "QF67bf") 16466.413
[ "calculates", "the", "(", "shortpath", ")", "distance", "between", "two", "Maidenhead", "locators" ]
python
train
Cue/scales
src/greplin/scales/__init__.py
https://github.com/Cue/scales/blob/0aced26eb050ceb98ee9d5d6cdca8db448666986/src/greplin/scales/__init__.py#L415-L418
def update(self, instance, oldValue, newValue): """Updates the aggregate based on a change in the child value.""" self.__set__(instance, self.__get__(instance, None) + newValue - (oldValue or 0))
[ "def", "update", "(", "self", ",", "instance", ",", "oldValue", ",", "newValue", ")", ":", "self", ".", "__set__", "(", "instance", ",", "self", ".", "__get__", "(", "instance", ",", "None", ")", "+", "newValue", "-", "(", "oldValue", "or", "0", ")", ")" ]
Updates the aggregate based on a change in the child value.
[ "Updates", "the", "aggregate", "based", "on", "a", "change", "in", "the", "child", "value", "." ]
python
train
inspirehep/inspire-dojson
inspire_dojson/cds/rules.py
https://github.com/inspirehep/inspire-dojson/blob/17f3789cd3d5ae58efa1190dc0eea9efb9c8ca59/inspire_dojson/cds/rules.py#L400-L453
def urls(self, key, value): """Populate the ``8564`` MARC field. Also populate the ``FFT`` field through side effects. """ def _is_preprint(value): return value.get('y', '').lower() == 'preprint' def _is_fulltext(value): return value['u'].endswith('.pdf') and value['u'].startswith('http://cds.cern.ch') def _is_local_copy(value): return 'local copy' in value.get('y', '') def _is_ignored_domain(value): ignored_domains = ['http://cdsweb.cern.ch', 'http://cms.cern.ch', 'http://cmsdoc.cern.ch', 'http://documents.cern.ch', 'http://preprints.cern.ch', 'http://cds.cern.ch', 'http://arxiv.org'] return any(value['u'].startswith(domain) for domain in ignored_domains) field_8564 = self.get('8564_', []) field_FFT = self.get('FFT__', []) if 'u' not in value: return field_8564 url = escape_url(value['u']) if _is_fulltext(value) and not _is_preprint(value): if _is_local_copy(value): description = value.get('y', '').replace('local copy', 'on CERN Document Server') field_8564.append({ 'u': url, 'y': description, }) else: _, file_name = os.path.split(urllib.parse.urlparse(value['u']).path) _, extension = os.path.splitext(file_name) field_FFT.append({ 't': 'CDS', 'a': url, 'd': value.get('y', ''), 'n': file_name, 'f': extension, }) elif not _is_ignored_domain(value): field_8564.append({ 'u': url, 'y': value.get('y'), }) self['FFT__'] = field_FFT return field_8564
[ "def", "urls", "(", "self", ",", "key", ",", "value", ")", ":", "def", "_is_preprint", "(", "value", ")", ":", "return", "value", ".", "get", "(", "'y'", ",", "''", ")", ".", "lower", "(", ")", "==", "'preprint'", "def", "_is_fulltext", "(", "value", ")", ":", "return", "value", "[", "'u'", "]", ".", "endswith", "(", "'.pdf'", ")", "and", "value", "[", "'u'", "]", ".", "startswith", "(", "'http://cds.cern.ch'", ")", "def", "_is_local_copy", "(", "value", ")", ":", "return", "'local copy'", "in", "value", ".", "get", "(", "'y'", ",", "''", ")", "def", "_is_ignored_domain", "(", "value", ")", ":", "ignored_domains", "=", "[", "'http://cdsweb.cern.ch'", ",", "'http://cms.cern.ch'", ",", "'http://cmsdoc.cern.ch'", ",", "'http://documents.cern.ch'", ",", "'http://preprints.cern.ch'", ",", "'http://cds.cern.ch'", ",", "'http://arxiv.org'", "]", "return", "any", "(", "value", "[", "'u'", "]", ".", "startswith", "(", "domain", ")", "for", "domain", "in", "ignored_domains", ")", "field_8564", "=", "self", ".", "get", "(", "'8564_'", ",", "[", "]", ")", "field_FFT", "=", "self", ".", "get", "(", "'FFT__'", ",", "[", "]", ")", "if", "'u'", "not", "in", "value", ":", "return", "field_8564", "url", "=", "escape_url", "(", "value", "[", "'u'", "]", ")", "if", "_is_fulltext", "(", "value", ")", "and", "not", "_is_preprint", "(", "value", ")", ":", "if", "_is_local_copy", "(", "value", ")", ":", "description", "=", "value", ".", "get", "(", "'y'", ",", "''", ")", ".", "replace", "(", "'local copy'", ",", "'on CERN Document Server'", ")", "field_8564", ".", "append", "(", "{", "'u'", ":", "url", ",", "'y'", ":", "description", ",", "}", ")", "else", ":", "_", ",", "file_name", "=", "os", ".", "path", ".", "split", "(", "urllib", ".", "parse", ".", "urlparse", "(", "value", "[", "'u'", "]", ")", ".", "path", ")", "_", ",", "extension", "=", "os", ".", "path", ".", "splitext", "(", "file_name", ")", "field_FFT", ".", "append", "(", "{", "'t'", ":", "'CDS'", ",", "'a'", ":", "url", ",", "'d'", ":", "value", ".", "get", "(", "'y'", ",", "''", ")", ",", "'n'", ":", "file_name", ",", "'f'", ":", "extension", ",", "}", ")", "elif", "not", "_is_ignored_domain", "(", "value", ")", ":", "field_8564", ".", "append", "(", "{", "'u'", ":", "url", ",", "'y'", ":", "value", ".", "get", "(", "'y'", ")", ",", "}", ")", "self", "[", "'FFT__'", "]", "=", "field_FFT", "return", "field_8564" ]
Populate the ``8564`` MARC field. Also populate the ``FFT`` field through side effects.
[ "Populate", "the", "8564", "MARC", "field", "." ]
python
train
coderholic/pyradio
pyradio/player.py
https://github.com/coderholic/pyradio/blob/c5219d350bccbccd49dbd627c1f886a952ea1963/pyradio/player.py#L285-L319
def play(self, name, streamUrl, encoding = ''): """ use a multimedia player to play a stream """ self.close() self.name = name self.oldUserInput = {'Input': '', 'Volume': '', 'Title': ''} self.muted = False self.show_volume = True self.title_prefix = '' self.playback_is_on = False self.outputStream.write('Station: "{}"'.format(name), self.status_update_lock) if logger.isEnabledFor(logging.INFO): logger.info('Selected Station: "{}"'.format(name)) if encoding: self._station_encoding = encoding else: self._station_encoding = 'utf-8' opts = [] isPlayList = streamUrl.split("?")[0][-3:] in ['m3u', 'pls'] opts = self._buildStartOpts(streamUrl, isPlayList) self.process = subprocess.Popen(opts, shell=False, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT) t = threading.Thread(target=self.updateStatus, args=(self.status_update_lock, )) t.start() # start playback check timer thread try: self.connection_timeout_thread = threading.Timer(self.playback_timeout, self.playback_timeout_handler) self.connection_timeout_thread.start() except: self.connection_timeout_thread = None if (logger.isEnabledFor(logging.ERROR)): logger.error("playback detection thread start failed") if logger.isEnabledFor(logging.INFO): logger.info("Player started")
[ "def", "play", "(", "self", ",", "name", ",", "streamUrl", ",", "encoding", "=", "''", ")", ":", "self", ".", "close", "(", ")", "self", ".", "name", "=", "name", "self", ".", "oldUserInput", "=", "{", "'Input'", ":", "''", ",", "'Volume'", ":", "''", ",", "'Title'", ":", "''", "}", "self", ".", "muted", "=", "False", "self", ".", "show_volume", "=", "True", "self", ".", "title_prefix", "=", "''", "self", ".", "playback_is_on", "=", "False", "self", ".", "outputStream", ".", "write", "(", "'Station: \"{}\"'", ".", "format", "(", "name", ")", ",", "self", ".", "status_update_lock", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "'Selected Station: \"{}\"'", ".", "format", "(", "name", ")", ")", "if", "encoding", ":", "self", ".", "_station_encoding", "=", "encoding", "else", ":", "self", ".", "_station_encoding", "=", "'utf-8'", "opts", "=", "[", "]", "isPlayList", "=", "streamUrl", ".", "split", "(", "\"?\"", ")", "[", "0", "]", "[", "-", "3", ":", "]", "in", "[", "'m3u'", ",", "'pls'", "]", "opts", "=", "self", ".", "_buildStartOpts", "(", "streamUrl", ",", "isPlayList", ")", "self", ".", "process", "=", "subprocess", ".", "Popen", "(", "opts", ",", "shell", "=", "False", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ")", "t", "=", "threading", ".", "Thread", "(", "target", "=", "self", ".", "updateStatus", ",", "args", "=", "(", "self", ".", "status_update_lock", ",", ")", ")", "t", ".", "start", "(", ")", "# start playback check timer thread", "try", ":", "self", ".", "connection_timeout_thread", "=", "threading", ".", "Timer", "(", "self", ".", "playback_timeout", ",", "self", ".", "playback_timeout_handler", ")", "self", ".", "connection_timeout_thread", ".", "start", "(", ")", "except", ":", "self", ".", "connection_timeout_thread", "=", "None", "if", "(", "logger", ".", "isEnabledFor", "(", "logging", ".", "ERROR", ")", ")", ":", "logger", ".", "error", "(", "\"playback detection thread start failed\"", ")", "if", "logger", ".", "isEnabledFor", "(", "logging", ".", "INFO", ")", ":", "logger", ".", "info", "(", "\"Player started\"", ")" ]
use a multimedia player to play a stream
[ "use", "a", "multimedia", "player", "to", "play", "a", "stream" ]
python
train
the01/python-paps
paps/si/app/message.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/message.py#L362-L401
def unpack(cls, data): """ Unpack packed data into an instance :param data: Packed data :type data: str :return: Object instance and remaining data :rtype: (APPHeader, str) """ size = struct.calcsize(APPHeader.fmt_header) ( version, msg_type, payload_len, timestamp, device_id, flags, ), payload = struct.unpack( APPHeader.fmt_header, data[:size] ), data[size:] ack_sequence_number = None sequence_number = None if flags & Flag.ACKSEQ: size = struct.calcsize(APPHeader.fmt_seq_ack) (ack_sequence_number,), payload = struct.unpack( APPHeader.fmt_seq_ack, payload[:size] ), payload[size:] if flags & Flag.SEQ: size = struct.calcsize(APPHeader.fmt_seq) (sequence_number,), payload = struct.unpack( APPHeader.fmt_seq, payload[:size] ), payload[size:] return cls( message_type=msg_type, version_major=version >> 4, version_minor=version & 0xf, payload_length=payload_len, device_id=device_id, sequence_number=sequence_number, flags=flags, timestamp=timestamp, ack_sequence_number=ack_sequence_number ), payload
[ "def", "unpack", "(", "cls", ",", "data", ")", ":", "size", "=", "struct", ".", "calcsize", "(", "APPHeader", ".", "fmt_header", ")", "(", "version", ",", "msg_type", ",", "payload_len", ",", "timestamp", ",", "device_id", ",", "flags", ",", ")", ",", "payload", "=", "struct", ".", "unpack", "(", "APPHeader", ".", "fmt_header", ",", "data", "[", ":", "size", "]", ")", ",", "data", "[", "size", ":", "]", "ack_sequence_number", "=", "None", "sequence_number", "=", "None", "if", "flags", "&", "Flag", ".", "ACKSEQ", ":", "size", "=", "struct", ".", "calcsize", "(", "APPHeader", ".", "fmt_seq_ack", ")", "(", "ack_sequence_number", ",", ")", ",", "payload", "=", "struct", ".", "unpack", "(", "APPHeader", ".", "fmt_seq_ack", ",", "payload", "[", ":", "size", "]", ")", ",", "payload", "[", "size", ":", "]", "if", "flags", "&", "Flag", ".", "SEQ", ":", "size", "=", "struct", ".", "calcsize", "(", "APPHeader", ".", "fmt_seq", ")", "(", "sequence_number", ",", ")", ",", "payload", "=", "struct", ".", "unpack", "(", "APPHeader", ".", "fmt_seq", ",", "payload", "[", ":", "size", "]", ")", ",", "payload", "[", "size", ":", "]", "return", "cls", "(", "message_type", "=", "msg_type", ",", "version_major", "=", "version", ">>", "4", ",", "version_minor", "=", "version", "&", "0xf", ",", "payload_length", "=", "payload_len", ",", "device_id", "=", "device_id", ",", "sequence_number", "=", "sequence_number", ",", "flags", "=", "flags", ",", "timestamp", "=", "timestamp", ",", "ack_sequence_number", "=", "ack_sequence_number", ")", ",", "payload" ]
Unpack packed data into an instance :param data: Packed data :type data: str :return: Object instance and remaining data :rtype: (APPHeader, str)
[ "Unpack", "packed", "data", "into", "an", "instance" ]
python
train
greyli/flask-dropzone
flask_dropzone/__init__.py
https://github.com/greyli/flask-dropzone/blob/eb1d5ef16d8f83a12e6fed1bb9412a0c12c6d584/flask_dropzone/__init__.py#L29-L134
def load(js_url='', css_url='', version='5.2.0'): """Load Dropzone resources with given version and init dropzone configuration. .. versionchanged:: 1.4.3 Added ``js_url`` and ``css_url`` parameters to pass custom resource URL. .. versionchanged:: 1.4.4 This method was deprecated due to inflexible. Now it's divided into three methods: 1. Use ``load_css()`` to load css resources. 2. Use ``load_js()`` to load js resources. 3. Use ``config()`` to configure Dropzone. :param js_url: The JavaScript url for Dropzone.js. :param css_url: The CSS url for Dropzone.js. :param version: The version of Dropzone.js. """ warnings.warn('The method will be removed in 2.0, see docs for more details.') js_filename = 'dropzone.min.js' css_filename = 'dropzone.min.css' upload_multiple = current_app.config['DROPZONE_UPLOAD_MULTIPLE'] parallel_uploads = current_app.config['DROPZONE_PARALLEL_UPLOADS'] if upload_multiple in [True, 'true', 'True', 1]: upload_multiple = 'true' else: upload_multiple = 'false' serve_local = current_app.config['DROPZONE_SERVE_LOCAL'] size = current_app.config['DROPZONE_MAX_FILE_SIZE'] param = current_app.config['DROPZONE_INPUT_NAME'] redirect_view = current_app.config['DROPZONE_REDIRECT_VIEW'] if redirect_view is not None: redirect_js = ''' this.on("queuecomplete", function(file) { // Called when all files in the queue finish uploading. window.location = "%s"; });''' % url_for(redirect_view) else: redirect_js = '' if not current_app.config['DROPZONE_ALLOWED_FILE_CUSTOM']: allowed_type = allowed_file_extensions[ current_app.config['DROPZONE_ALLOWED_FILE_TYPE']] else: allowed_type = current_app.config['DROPZONE_ALLOWED_FILE_TYPE'] max_files = current_app.config['DROPZONE_MAX_FILES'] default_message = current_app.config['DROPZONE_DEFAULT_MESSAGE'] invalid_file_type = current_app.config['DROPZONE_INVALID_FILE_TYPE'] file_too_big = current_app.config['DROPZONE_FILE_TOO_BIG'] server_error = current_app.config['DROPZONE_SERVER_ERROR'] browser_unsupported = current_app.config['DROPZONE_BROWSER_UNSUPPORTED'] max_files_exceeded = current_app.config['DROPZONE_MAX_FILE_EXCEED'] cancelUpload = current_app.config['DROPZONE_CANCEL_UPLOAD'] removeFile = current_app.config['DROPZONE_REMOVE_FILE'] cancelConfirmation = current_app.config['DROPZONE_CANCEL_CONFIRMATION'] uploadCanceled = current_app.config['DROPZONE_UPLOAD_CANCELED'] timeout = current_app.config['DROPZONE_TIMEOUT'] if timeout: timeout_js = 'timeout: %d,' % timeout else: timeout_js = '' if serve_local: js = '<script src="%s"></script>\n' % url_for('dropzone.static', filename=js_filename) css = '<link rel="stylesheet" href="%s" type="text/css">\n' % \ url_for('dropzone.static', filename=css_filename) else: js = '<script src="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/%s"></script>\n' % (version, js_filename) css = '<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/min/%s"' \ ' type="text/css">\n' % (version, css_filename) if js_url: js = '<script src="%s"></script>\n' % js_url if css_url: css = '<link rel="stylesheet" href="%s" type="text/css">\n' % css_url return Markup(''' %s%s<script> Dropzone.options.myDropzone = { init: function() {%s}, uploadMultiple: %s, parallelUploads: %d, paramName: "%s", // The name that will be used to transfer the file maxFilesize: %d, // MB acceptedFiles: "%s", maxFiles: %s, dictDefaultMessage: "%s", // message display on drop area dictFallbackMessage: "%s", dictInvalidFileType: "%s", dictFileTooBig: "%s", dictResponseError: "%s", dictMaxFilesExceeded: "%s", dictCancelUpload: "%s", dictRemoveFile: "%s", dictCancelUploadConfirmation: "%s", dictUploadCanceled: "%s", %s // timeout }; </script> ''' % (css, js, redirect_js, upload_multiple, parallel_uploads, param, size, allowed_type, max_files, default_message, browser_unsupported, invalid_file_type, file_too_big, server_error, max_files_exceeded, cancelUpload, removeFile, cancelConfirmation, uploadCanceled, timeout_js))
[ "def", "load", "(", "js_url", "=", "''", ",", "css_url", "=", "''", ",", "version", "=", "'5.2.0'", ")", ":", "warnings", ".", "warn", "(", "'The method will be removed in 2.0, see docs for more details.'", ")", "js_filename", "=", "'dropzone.min.js'", "css_filename", "=", "'dropzone.min.css'", "upload_multiple", "=", "current_app", ".", "config", "[", "'DROPZONE_UPLOAD_MULTIPLE'", "]", "parallel_uploads", "=", "current_app", ".", "config", "[", "'DROPZONE_PARALLEL_UPLOADS'", "]", "if", "upload_multiple", "in", "[", "True", ",", "'true'", ",", "'True'", ",", "1", "]", ":", "upload_multiple", "=", "'true'", "else", ":", "upload_multiple", "=", "'false'", "serve_local", "=", "current_app", ".", "config", "[", "'DROPZONE_SERVE_LOCAL'", "]", "size", "=", "current_app", ".", "config", "[", "'DROPZONE_MAX_FILE_SIZE'", "]", "param", "=", "current_app", ".", "config", "[", "'DROPZONE_INPUT_NAME'", "]", "redirect_view", "=", "current_app", ".", "config", "[", "'DROPZONE_REDIRECT_VIEW'", "]", "if", "redirect_view", "is", "not", "None", ":", "redirect_js", "=", "'''\n this.on(\"queuecomplete\", function(file) {\n // Called when all files in the queue finish uploading.\n window.location = \"%s\";\n });'''", "%", "url_for", "(", "redirect_view", ")", "else", ":", "redirect_js", "=", "''", "if", "not", "current_app", ".", "config", "[", "'DROPZONE_ALLOWED_FILE_CUSTOM'", "]", ":", "allowed_type", "=", "allowed_file_extensions", "[", "current_app", ".", "config", "[", "'DROPZONE_ALLOWED_FILE_TYPE'", "]", "]", "else", ":", "allowed_type", "=", "current_app", ".", "config", "[", "'DROPZONE_ALLOWED_FILE_TYPE'", "]", "max_files", "=", "current_app", ".", "config", "[", "'DROPZONE_MAX_FILES'", "]", "default_message", "=", "current_app", ".", "config", "[", "'DROPZONE_DEFAULT_MESSAGE'", "]", "invalid_file_type", "=", "current_app", ".", "config", "[", "'DROPZONE_INVALID_FILE_TYPE'", "]", "file_too_big", "=", "current_app", ".", "config", "[", "'DROPZONE_FILE_TOO_BIG'", "]", "server_error", "=", "current_app", ".", "config", "[", "'DROPZONE_SERVER_ERROR'", "]", "browser_unsupported", "=", "current_app", ".", "config", "[", "'DROPZONE_BROWSER_UNSUPPORTED'", "]", "max_files_exceeded", "=", "current_app", ".", "config", "[", "'DROPZONE_MAX_FILE_EXCEED'", "]", "cancelUpload", "=", "current_app", ".", "config", "[", "'DROPZONE_CANCEL_UPLOAD'", "]", "removeFile", "=", "current_app", ".", "config", "[", "'DROPZONE_REMOVE_FILE'", "]", "cancelConfirmation", "=", "current_app", ".", "config", "[", "'DROPZONE_CANCEL_CONFIRMATION'", "]", "uploadCanceled", "=", "current_app", ".", "config", "[", "'DROPZONE_UPLOAD_CANCELED'", "]", "timeout", "=", "current_app", ".", "config", "[", "'DROPZONE_TIMEOUT'", "]", "if", "timeout", ":", "timeout_js", "=", "'timeout: %d,'", "%", "timeout", "else", ":", "timeout_js", "=", "''", "if", "serve_local", ":", "js", "=", "'<script src=\"%s\"></script>\\n'", "%", "url_for", "(", "'dropzone.static'", ",", "filename", "=", "js_filename", ")", "css", "=", "'<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\">\\n'", "%", "url_for", "(", "'dropzone.static'", ",", "filename", "=", "css_filename", ")", "else", ":", "js", "=", "'<script src=\"https://cdn.jsdelivr.net/npm/dropzone@%s/dist/%s\"></script>\\n'", "%", "(", "version", ",", "js_filename", ")", "css", "=", "'<link rel=\"stylesheet\" href=\"https://cdn.jsdelivr.net/npm/dropzone@%s/dist/min/%s\"'", "' type=\"text/css\">\\n'", "%", "(", "version", ",", "css_filename", ")", "if", "js_url", ":", "js", "=", "'<script src=\"%s\"></script>\\n'", "%", "js_url", "if", "css_url", ":", "css", "=", "'<link rel=\"stylesheet\" href=\"%s\" type=\"text/css\">\\n'", "%", "css_url", "return", "Markup", "(", "'''\n %s%s<script>\nDropzone.options.myDropzone = {\n init: function() {%s},\n uploadMultiple: %s,\n parallelUploads: %d,\n paramName: \"%s\", // The name that will be used to transfer the file\n maxFilesize: %d, // MB\n acceptedFiles: \"%s\",\n maxFiles: %s,\n dictDefaultMessage: \"%s\", // message display on drop area\n dictFallbackMessage: \"%s\",\n dictInvalidFileType: \"%s\",\n dictFileTooBig: \"%s\",\n dictResponseError: \"%s\",\n dictMaxFilesExceeded: \"%s\",\n dictCancelUpload: \"%s\",\n dictRemoveFile: \"%s\",\n dictCancelUploadConfirmation: \"%s\",\n dictUploadCanceled: \"%s\",\n %s // timeout\n};\n </script>\n '''", "%", "(", "css", ",", "js", ",", "redirect_js", ",", "upload_multiple", ",", "parallel_uploads", ",", "param", ",", "size", ",", "allowed_type", ",", "max_files", ",", "default_message", ",", "browser_unsupported", ",", "invalid_file_type", ",", "file_too_big", ",", "server_error", ",", "max_files_exceeded", ",", "cancelUpload", ",", "removeFile", ",", "cancelConfirmation", ",", "uploadCanceled", ",", "timeout_js", ")", ")" ]
Load Dropzone resources with given version and init dropzone configuration. .. versionchanged:: 1.4.3 Added ``js_url`` and ``css_url`` parameters to pass custom resource URL. .. versionchanged:: 1.4.4 This method was deprecated due to inflexible. Now it's divided into three methods: 1. Use ``load_css()`` to load css resources. 2. Use ``load_js()`` to load js resources. 3. Use ``config()`` to configure Dropzone. :param js_url: The JavaScript url for Dropzone.js. :param css_url: The CSS url for Dropzone.js. :param version: The version of Dropzone.js.
[ "Load", "Dropzone", "resources", "with", "given", "version", "and", "init", "dropzone", "configuration", "." ]
python
train
CivicSpleen/ambry
ambry/orm/column.py
https://github.com/CivicSpleen/ambry/blob/d7f2be4bf1f7ffd086f3fadd4fcae60c32473e42/ambry/orm/column.py#L444-L457
def mangle_name(name): """Mangles a column name to a standard form, remoing illegal characters. :param name: :return: """ import re try: return re.sub('_+', '_', re.sub('[^\w_]', '_', name).lower()).rstrip('_') except TypeError: raise TypeError( 'Trying to mangle name with invalid type of: ' + str(type(name)))
[ "def", "mangle_name", "(", "name", ")", ":", "import", "re", "try", ":", "return", "re", ".", "sub", "(", "'_+'", ",", "'_'", ",", "re", ".", "sub", "(", "'[^\\w_]'", ",", "'_'", ",", "name", ")", ".", "lower", "(", ")", ")", ".", "rstrip", "(", "'_'", ")", "except", "TypeError", ":", "raise", "TypeError", "(", "'Trying to mangle name with invalid type of: '", "+", "str", "(", "type", "(", "name", ")", ")", ")" ]
Mangles a column name to a standard form, remoing illegal characters. :param name: :return:
[ "Mangles", "a", "column", "name", "to", "a", "standard", "form", "remoing", "illegal", "characters", "." ]
python
train
lawsie/guizero
guizero/tkmixins.py
https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/tkmixins.py#L8-L11
def after(self, time, function, args = []): """Call `function` after `time` milliseconds.""" callback_id = self.tk.after(time, self._call_wrapper, time, function, *args) self._callback[function] = [callback_id, False]
[ "def", "after", "(", "self", ",", "time", ",", "function", ",", "args", "=", "[", "]", ")", ":", "callback_id", "=", "self", ".", "tk", ".", "after", "(", "time", ",", "self", ".", "_call_wrapper", ",", "time", ",", "function", ",", "*", "args", ")", "self", ".", "_callback", "[", "function", "]", "=", "[", "callback_id", ",", "False", "]" ]
Call `function` after `time` milliseconds.
[ "Call", "function", "after", "time", "milliseconds", "." ]
python
train
Loudr/pale
pale/doc.py
https://github.com/Loudr/pale/blob/dc002ee6032c856551143af222ff8f71ed9853fe/pale/doc.py#L818-L833
def document_endpoint(endpoint): """Extract the full documentation dictionary from the endpoint.""" descr = clean_description(py_doc_trim(endpoint.__doc__)) docs = { 'name': endpoint._route_name, 'http_method': endpoint._http_method, 'uri': endpoint._uri, 'description': descr, 'arguments': extract_endpoint_arguments(endpoint), 'returns': format_endpoint_returns_doc(endpoint), } if hasattr(endpoint, "_success"): docs["success"] = endpoint._success if hasattr(endpoint, "_requires_permission"): docs["requires_permission"] = endpoint._requires_permission return docs
[ "def", "document_endpoint", "(", "endpoint", ")", ":", "descr", "=", "clean_description", "(", "py_doc_trim", "(", "endpoint", ".", "__doc__", ")", ")", "docs", "=", "{", "'name'", ":", "endpoint", ".", "_route_name", ",", "'http_method'", ":", "endpoint", ".", "_http_method", ",", "'uri'", ":", "endpoint", ".", "_uri", ",", "'description'", ":", "descr", ",", "'arguments'", ":", "extract_endpoint_arguments", "(", "endpoint", ")", ",", "'returns'", ":", "format_endpoint_returns_doc", "(", "endpoint", ")", ",", "}", "if", "hasattr", "(", "endpoint", ",", "\"_success\"", ")", ":", "docs", "[", "\"success\"", "]", "=", "endpoint", ".", "_success", "if", "hasattr", "(", "endpoint", ",", "\"_requires_permission\"", ")", ":", "docs", "[", "\"requires_permission\"", "]", "=", "endpoint", ".", "_requires_permission", "return", "docs" ]
Extract the full documentation dictionary from the endpoint.
[ "Extract", "the", "full", "documentation", "dictionary", "from", "the", "endpoint", "." ]
python
train
VingtCinq/python-mailchimp
mailchimp3/entities/reportunsubscribes.py
https://github.com/VingtCinq/python-mailchimp/blob/1b472f1b64fdde974732ac4b7ed48908bb707260/mailchimp3/entities/reportunsubscribes.py#L25-L45
def all(self, campaign_id, get_all=False, **queryparams): """ Get information about members who have unsubscribed from a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer """ self.campaign_id = campaign_id self.subscriber_hash = None if get_all: return self._iterate(url=self._build_path(campaign_id, 'unsubscribed'), **queryparams) else: return self._mc_client._get(url=self._build_path(campaign_id, 'unsubscribed'), **queryparams)
[ "def", "all", "(", "self", ",", "campaign_id", ",", "get_all", "=", "False", ",", "*", "*", "queryparams", ")", ":", "self", ".", "campaign_id", "=", "campaign_id", "self", ".", "subscriber_hash", "=", "None", "if", "get_all", ":", "return", "self", ".", "_iterate", "(", "url", "=", "self", ".", "_build_path", "(", "campaign_id", ",", "'unsubscribed'", ")", ",", "*", "*", "queryparams", ")", "else", ":", "return", "self", ".", "_mc_client", ".", "_get", "(", "url", "=", "self", ".", "_build_path", "(", "campaign_id", ",", "'unsubscribed'", ")", ",", "*", "*", "queryparams", ")" ]
Get information about members who have unsubscribed from a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer
[ "Get", "information", "about", "members", "who", "have", "unsubscribed", "from", "a", "specific", "campaign", "." ]
python
valid
FNNDSC/med2image
med2image/systemMisc.py
https://github.com/FNNDSC/med2image/blob/638d5d230de47608af20f9764acf8e382c2bf2ff/med2image/systemMisc.py#L183-L215
def com_find2D(ar_grid, **kwargs): """ ARGS **kwargs ordering = 'rc' or 'xy' order the return either in (x,y) or (row, col) order. indexing = 'zero' or 'one' return positions relative to zero (i.e. python addressing) or one (i.e. MatLAB addressing) DESC Find the center of mass in 2D array grid <ar_grid>. Mass elements are grid index values. By using python idioms, his version is MUCH faster than the com_find() """ b_reorder = True b_oneOffset = True for key, value in kwargs.iteritems(): if key == 'ordering' and value == 'rc': b_reorder = False if key == 'ordering' and value == 'xy': b_reorder = True if key == 'indexing' and value == 'zero': b_oneOffset = False if key == 'indexing' and value == 'one': b_oneOffset = True f_Smass = ar_grid.sum() f_comX = (ar_grid[nonzero(ar_grid)] * (nonzero(ar_grid)[1] + 1)).sum() / f_Smass f_comY = (ar_grid[nonzero(ar_grid)] * (nonzero(ar_grid)[0] + 1)).sum() / f_Smass if b_reorder: ar_ret = array( (f_comX, f_comY) ) if not b_reorder: ar_ret = array( (f_comY, f_comX) ) if not b_oneOffset: ar_ret -= 1.0 return ar_ret
[ "def", "com_find2D", "(", "ar_grid", ",", "*", "*", "kwargs", ")", ":", "b_reorder", "=", "True", "b_oneOffset", "=", "True", "for", "key", ",", "value", "in", "kwargs", ".", "iteritems", "(", ")", ":", "if", "key", "==", "'ordering'", "and", "value", "==", "'rc'", ":", "b_reorder", "=", "False", "if", "key", "==", "'ordering'", "and", "value", "==", "'xy'", ":", "b_reorder", "=", "True", "if", "key", "==", "'indexing'", "and", "value", "==", "'zero'", ":", "b_oneOffset", "=", "False", "if", "key", "==", "'indexing'", "and", "value", "==", "'one'", ":", "b_oneOffset", "=", "True", "f_Smass", "=", "ar_grid", ".", "sum", "(", ")", "f_comX", "=", "(", "ar_grid", "[", "nonzero", "(", "ar_grid", ")", "]", "*", "(", "nonzero", "(", "ar_grid", ")", "[", "1", "]", "+", "1", ")", ")", ".", "sum", "(", ")", "/", "f_Smass", "f_comY", "=", "(", "ar_grid", "[", "nonzero", "(", "ar_grid", ")", "]", "*", "(", "nonzero", "(", "ar_grid", ")", "[", "0", "]", "+", "1", ")", ")", ".", "sum", "(", ")", "/", "f_Smass", "if", "b_reorder", ":", "ar_ret", "=", "array", "(", "(", "f_comX", ",", "f_comY", ")", ")", "if", "not", "b_reorder", ":", "ar_ret", "=", "array", "(", "(", "f_comY", ",", "f_comX", ")", ")", "if", "not", "b_oneOffset", ":", "ar_ret", "-=", "1.0", "return", "ar_ret" ]
ARGS **kwargs ordering = 'rc' or 'xy' order the return either in (x,y) or (row, col) order. indexing = 'zero' or 'one' return positions relative to zero (i.e. python addressing) or one (i.e. MatLAB addressing) DESC Find the center of mass in 2D array grid <ar_grid>. Mass elements are grid index values. By using python idioms, his version is MUCH faster than the com_find()
[ "ARGS", "**", "kwargs", "ordering", "=", "rc", "or", "xy", "order", "the", "return", "either", "in", "(", "x", "y", ")", "or", "(", "row", "col", ")", "order", ".", "indexing", "=", "zero", "or", "one", "return", "positions", "relative", "to", "zero", "(", "i", ".", "e", ".", "python", "addressing", ")", "or", "one", "(", "i", ".", "e", ".", "MatLAB", "addressing", ")" ]
python
train
bcbio/bcbio-nextgen
bcbio/variation/vcfutils.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/variation/vcfutils.py#L383-L400
def _run_concat_variant_files_gatk4(input_file_list, out_file, config): """Use GATK4 GatherVcfs for concatenation of scattered VCFs. """ if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: params = ["-T", "GatherVcfs", "-I", input_file_list, "-O", tx_out_file] # Use GATK4 for merging, tools_off: [gatk4] applies to variant calling config = utils.deepish_copy(config) if "gatk4" in dd.get_tools_off({"config": config}): config["algorithm"]["tools_off"].remove("gatk4") # Allow specification of verbosity in the unique style this tool uses resources = config_utils.get_resources("gatk", config) opts = [str(x) for x in resources.get("options", [])] if "--verbosity" in opts: params += ["--VERBOSITY:%s" % opts[opts.index("--verbosity") + 1]] broad_runner = broad.runner_from_config(config) broad_runner.run_gatk(params) return out_file
[ "def", "_run_concat_variant_files_gatk4", "(", "input_file_list", ",", "out_file", ",", "config", ")", ":", "if", "not", "utils", ".", "file_exists", "(", "out_file", ")", ":", "with", "file_transaction", "(", "config", ",", "out_file", ")", "as", "tx_out_file", ":", "params", "=", "[", "\"-T\"", ",", "\"GatherVcfs\"", ",", "\"-I\"", ",", "input_file_list", ",", "\"-O\"", ",", "tx_out_file", "]", "# Use GATK4 for merging, tools_off: [gatk4] applies to variant calling", "config", "=", "utils", ".", "deepish_copy", "(", "config", ")", "if", "\"gatk4\"", "in", "dd", ".", "get_tools_off", "(", "{", "\"config\"", ":", "config", "}", ")", ":", "config", "[", "\"algorithm\"", "]", "[", "\"tools_off\"", "]", ".", "remove", "(", "\"gatk4\"", ")", "# Allow specification of verbosity in the unique style this tool uses", "resources", "=", "config_utils", ".", "get_resources", "(", "\"gatk\"", ",", "config", ")", "opts", "=", "[", "str", "(", "x", ")", "for", "x", "in", "resources", ".", "get", "(", "\"options\"", ",", "[", "]", ")", "]", "if", "\"--verbosity\"", "in", "opts", ":", "params", "+=", "[", "\"--VERBOSITY:%s\"", "%", "opts", "[", "opts", ".", "index", "(", "\"--verbosity\"", ")", "+", "1", "]", "]", "broad_runner", "=", "broad", ".", "runner_from_config", "(", "config", ")", "broad_runner", ".", "run_gatk", "(", "params", ")", "return", "out_file" ]
Use GATK4 GatherVcfs for concatenation of scattered VCFs.
[ "Use", "GATK4", "GatherVcfs", "for", "concatenation", "of", "scattered", "VCFs", "." ]
python
train
saltstack/salt
salt/modules/boto_kinesis.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto_kinesis.py#L124-L153
def get_stream_when_active(stream_name, region=None, key=None, keyid=None, profile=None): ''' Get complete stream info from AWS, returning only when the stream is in the ACTIVE state. Continues to retry when stream is updating or creating. If the stream is deleted during retries, the loop will catch the error and break. CLI example:: salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) stream_status = None # only get basic stream until it's active, # so we don't pull the full list of shards repeatedly (in case of very large stream) attempt = 0 max_retry_delay = 10 while stream_status != "ACTIVE": time.sleep(_jittered_backoff(attempt, max_retry_delay)) attempt += 1 stream_response = _get_basic_stream(stream_name, conn) if 'error' in stream_response: return stream_response stream_status = stream_response['result']["StreamDescription"]["StreamStatus"] # now it's active, get the full stream if necessary if stream_response['result']["StreamDescription"]["HasMoreShards"]: stream_response = _get_full_stream(stream_name, region, key, keyid, profile) return stream_response
[ "def", "get_stream_when_active", "(", "stream_name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "conn", "=", "_get_conn", "(", "region", "=", "region", ",", "key", "=", "key", ",", "keyid", "=", "keyid", ",", "profile", "=", "profile", ")", "stream_status", "=", "None", "# only get basic stream until it's active,", "# so we don't pull the full list of shards repeatedly (in case of very large stream)", "attempt", "=", "0", "max_retry_delay", "=", "10", "while", "stream_status", "!=", "\"ACTIVE\"", ":", "time", ".", "sleep", "(", "_jittered_backoff", "(", "attempt", ",", "max_retry_delay", ")", ")", "attempt", "+=", "1", "stream_response", "=", "_get_basic_stream", "(", "stream_name", ",", "conn", ")", "if", "'error'", "in", "stream_response", ":", "return", "stream_response", "stream_status", "=", "stream_response", "[", "'result'", "]", "[", "\"StreamDescription\"", "]", "[", "\"StreamStatus\"", "]", "# now it's active, get the full stream if necessary", "if", "stream_response", "[", "'result'", "]", "[", "\"StreamDescription\"", "]", "[", "\"HasMoreShards\"", "]", ":", "stream_response", "=", "_get_full_stream", "(", "stream_name", ",", "region", ",", "key", ",", "keyid", ",", "profile", ")", "return", "stream_response" ]
Get complete stream info from AWS, returning only when the stream is in the ACTIVE state. Continues to retry when stream is updating or creating. If the stream is deleted during retries, the loop will catch the error and break. CLI example:: salt myminion boto_kinesis.get_stream_when_active my_stream region=us-east-1
[ "Get", "complete", "stream", "info", "from", "AWS", "returning", "only", "when", "the", "stream", "is", "in", "the", "ACTIVE", "state", ".", "Continues", "to", "retry", "when", "stream", "is", "updating", "or", "creating", ".", "If", "the", "stream", "is", "deleted", "during", "retries", "the", "loop", "will", "catch", "the", "error", "and", "break", "." ]
python
train
litl/rauth
rauth/oauth.py
https://github.com/litl/rauth/blob/a6d887d7737cf21ec896a8104f25c2754c694011/rauth/oauth.py#L115-L155
def sign(self, consumer_secret, access_token_secret, method, url, oauth_params, req_kwargs): '''Sign request parameters. :param consumer_secret: Consumer secret. :type consumer_secret: str :param access_token_secret: Access token secret. :type access_token_secret: str :param method: The method of this particular request. :type method: str :param url: The URL of this particular request. :type url: str :param oauth_params: OAuth parameters. :type oauth_params: dict :param req_kwargs: Keyworded args that will be sent to the request method. :type req_kwargs: dict ''' url = self._remove_qs(url) oauth_params = \ self._normalize_request_parameters(oauth_params, req_kwargs) parameters = map(self._escape, [method, url, oauth_params]) key = self._escape(consumer_secret) + b'&' if access_token_secret is not None: key += self._escape(access_token_secret) # build a Signature Base String signature_base_string = b'&'.join(parameters) # hash the string with HMAC-SHA1 hashed = hmac.new(key, signature_base_string, sha1) # return the signature return base64.b64encode(hashed.digest()).decode()
[ "def", "sign", "(", "self", ",", "consumer_secret", ",", "access_token_secret", ",", "method", ",", "url", ",", "oauth_params", ",", "req_kwargs", ")", ":", "url", "=", "self", ".", "_remove_qs", "(", "url", ")", "oauth_params", "=", "self", ".", "_normalize_request_parameters", "(", "oauth_params", ",", "req_kwargs", ")", "parameters", "=", "map", "(", "self", ".", "_escape", ",", "[", "method", ",", "url", ",", "oauth_params", "]", ")", "key", "=", "self", ".", "_escape", "(", "consumer_secret", ")", "+", "b'&'", "if", "access_token_secret", "is", "not", "None", ":", "key", "+=", "self", ".", "_escape", "(", "access_token_secret", ")", "# build a Signature Base String", "signature_base_string", "=", "b'&'", ".", "join", "(", "parameters", ")", "# hash the string with HMAC-SHA1", "hashed", "=", "hmac", ".", "new", "(", "key", ",", "signature_base_string", ",", "sha1", ")", "# return the signature", "return", "base64", ".", "b64encode", "(", "hashed", ".", "digest", "(", ")", ")", ".", "decode", "(", ")" ]
Sign request parameters. :param consumer_secret: Consumer secret. :type consumer_secret: str :param access_token_secret: Access token secret. :type access_token_secret: str :param method: The method of this particular request. :type method: str :param url: The URL of this particular request. :type url: str :param oauth_params: OAuth parameters. :type oauth_params: dict :param req_kwargs: Keyworded args that will be sent to the request method. :type req_kwargs: dict
[ "Sign", "request", "parameters", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/__init__.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_map/__init__.py#L182-L189
def remove_rally(self, key): '''remove a rally point''' a = key.split(' ') if a[0] != 'Rally' or len(a) != 2: print("Bad rally object %s" % key) return i = int(a[1]) self.mpstate.functions.process_stdin('rally remove %u' % i)
[ "def", "remove_rally", "(", "self", ",", "key", ")", ":", "a", "=", "key", ".", "split", "(", "' '", ")", "if", "a", "[", "0", "]", "!=", "'Rally'", "or", "len", "(", "a", ")", "!=", "2", ":", "print", "(", "\"Bad rally object %s\"", "%", "key", ")", "return", "i", "=", "int", "(", "a", "[", "1", "]", ")", "self", ".", "mpstate", ".", "functions", ".", "process_stdin", "(", "'rally remove %u'", "%", "i", ")" ]
remove a rally point
[ "remove", "a", "rally", "point" ]
python
train
googledatalab/pydatalab
solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py
https://github.com/googledatalab/pydatalab/blob/d9031901d5bca22fe0d5925d204e6698df9852e1/solutionbox/structured_data/mltoolbox/_structured_data/trainer/util.py#L367-L445
def get_estimator(output_dir, train_config, args): """Returns a tf learn estimator. We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is controlled by the values of model_type in the args. Args: output_dir: Modes are saved into outputdir/train train_config: our training config args: command line parameters Returns: TF lean estimator Raises: ValueError: if config is wrong. """ # Check the requested mode fits the preprocessed data. target_name = train_config['target_column'] if is_classification_model(args.model_type) and target_name not in \ train_config['categorical_columns']: raise ValueError('When using a classification model, the target must be a ' 'categorical variable.') if is_regression_model(args.model_type) and target_name not in \ train_config['numerical_columns']: raise ValueError('When using a regression model, the target must be a ' 'numerical variable.') # Check layers used for dnn models. if is_dnn_model(args.model_type) and not args.layer_sizes: raise ValueError('--layer-size* must be used with DNN models') if is_linear_model(args.model_type) and args.layer_sizes: raise ValueError('--layer-size* cannot be used with linear models') # Build tf.learn features feature_columns = _tflearn_features(train_config, args) # Set how often to run checkpointing in terms of time. config = tf.contrib.learn.RunConfig( save_checkpoints_secs=args.save_checkpoints_secs) train_dir = os.path.join(output_dir, 'train') if args.model_type == 'dnn_regression': estimator = tf.contrib.learn.DNNRegressor( feature_columns=feature_columns, hidden_units=args.layer_sizes, config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer( args.learning_rate, epsilon=args.epsilon)) elif args.model_type == 'linear_regression': estimator = tf.contrib.learn.LinearRegressor( feature_columns=feature_columns, config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer( args.learning_rate, epsilon=args.epsilon)) elif args.model_type == 'dnn_classification': estimator = tf.contrib.learn.DNNClassifier( feature_columns=feature_columns, hidden_units=args.layer_sizes, n_classes=train_config['vocab_stats'][target_name]['n_classes'], config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer( args.learning_rate, epsilon=args.epsilon)) elif args.model_type == 'linear_classification': estimator = tf.contrib.learn.LinearClassifier( feature_columns=feature_columns, n_classes=train_config['vocab_stats'][target_name]['n_classes'], config=config, model_dir=train_dir, optimizer=tf.train.AdamOptimizer( args.learning_rate, epsilon=args.epsilon)) else: raise ValueError('bad --model-type value') return estimator
[ "def", "get_estimator", "(", "output_dir", ",", "train_config", ",", "args", ")", ":", "# Check the requested mode fits the preprocessed data.", "target_name", "=", "train_config", "[", "'target_column'", "]", "if", "is_classification_model", "(", "args", ".", "model_type", ")", "and", "target_name", "not", "in", "train_config", "[", "'categorical_columns'", "]", ":", "raise", "ValueError", "(", "'When using a classification model, the target must be a '", "'categorical variable.'", ")", "if", "is_regression_model", "(", "args", ".", "model_type", ")", "and", "target_name", "not", "in", "train_config", "[", "'numerical_columns'", "]", ":", "raise", "ValueError", "(", "'When using a regression model, the target must be a '", "'numerical variable.'", ")", "# Check layers used for dnn models.", "if", "is_dnn_model", "(", "args", ".", "model_type", ")", "and", "not", "args", ".", "layer_sizes", ":", "raise", "ValueError", "(", "'--layer-size* must be used with DNN models'", ")", "if", "is_linear_model", "(", "args", ".", "model_type", ")", "and", "args", ".", "layer_sizes", ":", "raise", "ValueError", "(", "'--layer-size* cannot be used with linear models'", ")", "# Build tf.learn features", "feature_columns", "=", "_tflearn_features", "(", "train_config", ",", "args", ")", "# Set how often to run checkpointing in terms of time.", "config", "=", "tf", ".", "contrib", ".", "learn", ".", "RunConfig", "(", "save_checkpoints_secs", "=", "args", ".", "save_checkpoints_secs", ")", "train_dir", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'train'", ")", "if", "args", ".", "model_type", "==", "'dnn_regression'", ":", "estimator", "=", "tf", ".", "contrib", ".", "learn", ".", "DNNRegressor", "(", "feature_columns", "=", "feature_columns", ",", "hidden_units", "=", "args", ".", "layer_sizes", ",", "config", "=", "config", ",", "model_dir", "=", "train_dir", ",", "optimizer", "=", "tf", ".", "train", ".", "AdamOptimizer", "(", "args", ".", "learning_rate", ",", "epsilon", "=", "args", ".", "epsilon", ")", ")", "elif", "args", ".", "model_type", "==", "'linear_regression'", ":", "estimator", "=", "tf", ".", "contrib", ".", "learn", ".", "LinearRegressor", "(", "feature_columns", "=", "feature_columns", ",", "config", "=", "config", ",", "model_dir", "=", "train_dir", ",", "optimizer", "=", "tf", ".", "train", ".", "AdamOptimizer", "(", "args", ".", "learning_rate", ",", "epsilon", "=", "args", ".", "epsilon", ")", ")", "elif", "args", ".", "model_type", "==", "'dnn_classification'", ":", "estimator", "=", "tf", ".", "contrib", ".", "learn", ".", "DNNClassifier", "(", "feature_columns", "=", "feature_columns", ",", "hidden_units", "=", "args", ".", "layer_sizes", ",", "n_classes", "=", "train_config", "[", "'vocab_stats'", "]", "[", "target_name", "]", "[", "'n_classes'", "]", ",", "config", "=", "config", ",", "model_dir", "=", "train_dir", ",", "optimizer", "=", "tf", ".", "train", ".", "AdamOptimizer", "(", "args", ".", "learning_rate", ",", "epsilon", "=", "args", ".", "epsilon", ")", ")", "elif", "args", ".", "model_type", "==", "'linear_classification'", ":", "estimator", "=", "tf", ".", "contrib", ".", "learn", ".", "LinearClassifier", "(", "feature_columns", "=", "feature_columns", ",", "n_classes", "=", "train_config", "[", "'vocab_stats'", "]", "[", "target_name", "]", "[", "'n_classes'", "]", ",", "config", "=", "config", ",", "model_dir", "=", "train_dir", ",", "optimizer", "=", "tf", ".", "train", ".", "AdamOptimizer", "(", "args", ".", "learning_rate", ",", "epsilon", "=", "args", ".", "epsilon", ")", ")", "else", ":", "raise", "ValueError", "(", "'bad --model-type value'", ")", "return", "estimator" ]
Returns a tf learn estimator. We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is controlled by the values of model_type in the args. Args: output_dir: Modes are saved into outputdir/train train_config: our training config args: command line parameters Returns: TF lean estimator Raises: ValueError: if config is wrong.
[ "Returns", "a", "tf", "learn", "estimator", "." ]
python
train
DataBiosphere/toil
src/toil/jobStores/aws/jobStore.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/jobStores/aws/jobStore.py#L1226-L1241
def __getBucketVersioning(self, bucket): """ For newly created buckets get_versioning_status returns an empty dict. In the past we've seen None in this case. We map both to a return value of False. Otherwise, the 'Versioning' entry in the dictionary returned by get_versioning_status can be 'Enabled', 'Suspended' or 'Disabled' which we map to True, None and False respectively. Note that we've never seen a versioning status of 'Disabled', only the empty dictionary. Calling configure_versioning with False on a bucket will cause get_versioning_status to then return 'Suspended' even on a new bucket that never had versioning enabled. """ for attempt in retry_s3(): with attempt: status = bucket.get_versioning_status() return self.versionings[status['Versioning']] if status else False
[ "def", "__getBucketVersioning", "(", "self", ",", "bucket", ")", ":", "for", "attempt", "in", "retry_s3", "(", ")", ":", "with", "attempt", ":", "status", "=", "bucket", ".", "get_versioning_status", "(", ")", "return", "self", ".", "versionings", "[", "status", "[", "'Versioning'", "]", "]", "if", "status", "else", "False" ]
For newly created buckets get_versioning_status returns an empty dict. In the past we've seen None in this case. We map both to a return value of False. Otherwise, the 'Versioning' entry in the dictionary returned by get_versioning_status can be 'Enabled', 'Suspended' or 'Disabled' which we map to True, None and False respectively. Note that we've never seen a versioning status of 'Disabled', only the empty dictionary. Calling configure_versioning with False on a bucket will cause get_versioning_status to then return 'Suspended' even on a new bucket that never had versioning enabled.
[ "For", "newly", "created", "buckets", "get_versioning_status", "returns", "an", "empty", "dict", ".", "In", "the", "past", "we", "ve", "seen", "None", "in", "this", "case", ".", "We", "map", "both", "to", "a", "return", "value", "of", "False", "." ]
python
train
jbloomlab/phydms
phydmslib/models.py
https://github.com/jbloomlab/phydms/blob/9cdebc10bafbe543c552d79486c7f950780ed3c0/phydmslib/models.py#L2226-L2229
def branchScale(self): """See docs for `Model` abstract base class.""" bscales = [m.branchScale for m in self._models] return (self.catweights * bscales).sum()
[ "def", "branchScale", "(", "self", ")", ":", "bscales", "=", "[", "m", ".", "branchScale", "for", "m", "in", "self", ".", "_models", "]", "return", "(", "self", ".", "catweights", "*", "bscales", ")", ".", "sum", "(", ")" ]
See docs for `Model` abstract base class.
[ "See", "docs", "for", "Model", "abstract", "base", "class", "." ]
python
train
tjguk/networkzero
networkzero/messenger.py
https://github.com/tjguk/networkzero/blob/0e3e81d2e9200b25a83ac07741612283599486d7/networkzero/messenger.py#L59-L69
def wait_for_news_from(address, prefix=config.EVERYTHING, wait_for_s=config.FOREVER, is_raw=False): """Wait for news whose topic starts with `prefix`. :param address: a nw0 address, eg from `nw0.discover` :param prefix: any text object [default: all messages] :param wait_for_s: how many seconds to wait before giving up [default: forever] :returns: a 2-tuple of (topic, data) or (None, None) if out of time """ _logger.info("Listen on %s for news matching %s waiting for %s secs", address, prefix, wait_for_s) return sockets._sockets.wait_for_news_from(address, prefix, wait_for_s, is_raw)
[ "def", "wait_for_news_from", "(", "address", ",", "prefix", "=", "config", ".", "EVERYTHING", ",", "wait_for_s", "=", "config", ".", "FOREVER", ",", "is_raw", "=", "False", ")", ":", "_logger", ".", "info", "(", "\"Listen on %s for news matching %s waiting for %s secs\"", ",", "address", ",", "prefix", ",", "wait_for_s", ")", "return", "sockets", ".", "_sockets", ".", "wait_for_news_from", "(", "address", ",", "prefix", ",", "wait_for_s", ",", "is_raw", ")" ]
Wait for news whose topic starts with `prefix`. :param address: a nw0 address, eg from `nw0.discover` :param prefix: any text object [default: all messages] :param wait_for_s: how many seconds to wait before giving up [default: forever] :returns: a 2-tuple of (topic, data) or (None, None) if out of time
[ "Wait", "for", "news", "whose", "topic", "starts", "with", "prefix", ".", ":", "param", "address", ":", "a", "nw0", "address", "eg", "from", "nw0", ".", "discover", ":", "param", "prefix", ":", "any", "text", "object", "[", "default", ":", "all", "messages", "]", ":", "param", "wait_for_s", ":", "how", "many", "seconds", "to", "wait", "before", "giving", "up", "[", "default", ":", "forever", "]", ":", "returns", ":", "a", "2", "-", "tuple", "of", "(", "topic", "data", ")", "or", "(", "None", "None", ")", "if", "out", "of", "time" ]
python
train
RJT1990/pyflux
pyflux/ssm/nllm.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/ssm/nllm.py#L628-L678
def plot_fit(self,intervals=True,**kwargs): """ Plots the fit of the model Returns ---------- None (plots data and the fit) """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: date_index = copy.deepcopy(self.index) date_index = date_index[self.integ:self.data_original.shape[0]+1] t_params = self.transform_z() if self.model_name2 == 'Skewt': states_upper_95 = (self.states + 1.98*np.sqrt(self.states_ses)) + ((t_params[-3] - (1.0/t_params[-3]))*t_params[-2]*gas.SkewtScore.tv_variate_exp(t_params[-1])) states_lower_95 = (self.states - 1.98*np.sqrt(self.states_ses)) + ((t_params[-3] - (1.0/t_params[-3]))*t_params[-2]*gas.SkewtScore.tv_variate_exp(t_params[-1])) mean_states = self.states + ((t_params[-3] - (1.0/t_params[-3]))*t_params[-2]*gas.SkewtScore.tv_variate_exp(t_params[-1])) else: states_upper_95 = (self.states + 1.98*np.sqrt(self.states_ses)) states_lower_95 = (self.states - 1.98*np.sqrt(self.states_ses)) mean_states = self.states plt.figure(figsize=figsize) plt.subplot(2, 1, 1) plt.title(self.data_name + " Raw and Smoothed") if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] plt.fill_between(date_index, self.link(states_lower_95), self.link(states_upper_95), alpha=0.15,label='95% C.I.') plt.plot(date_index, self.data, label='Data') plt.plot(date_index, self.link(mean_states), label='Smoothed', c='black') plt.legend(loc=2) plt.subplot(2, 1, 2) plt.title(self.data_name + " Local Level") if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] plt.fill_between(date_index, self.link(states_lower_95), self.link(states_upper_95), alpha=0.15,label='95% C.I.') plt.plot(date_index, self.link(mean_states), label='Smoothed State') plt.legend(loc=2) plt.show()
[ "def", "plot_fit", "(", "self", ",", "intervals", "=", "True", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "seaborn", "as", "sns", "figsize", "=", "kwargs", ".", "get", "(", "'figsize'", ",", "(", "10", ",", "7", ")", ")", "if", "self", ".", "latent_variables", ".", "estimated", "is", "False", ":", "raise", "Exception", "(", "\"No latent variables estimated!\"", ")", "else", ":", "date_index", "=", "copy", ".", "deepcopy", "(", "self", ".", "index", ")", "date_index", "=", "date_index", "[", "self", ".", "integ", ":", "self", ".", "data_original", ".", "shape", "[", "0", "]", "+", "1", "]", "t_params", "=", "self", ".", "transform_z", "(", ")", "if", "self", ".", "model_name2", "==", "'Skewt'", ":", "states_upper_95", "=", "(", "self", ".", "states", "+", "1.98", "*", "np", ".", "sqrt", "(", "self", ".", "states_ses", ")", ")", "+", "(", "(", "t_params", "[", "-", "3", "]", "-", "(", "1.0", "/", "t_params", "[", "-", "3", "]", ")", ")", "*", "t_params", "[", "-", "2", "]", "*", "gas", ".", "SkewtScore", ".", "tv_variate_exp", "(", "t_params", "[", "-", "1", "]", ")", ")", "states_lower_95", "=", "(", "self", ".", "states", "-", "1.98", "*", "np", ".", "sqrt", "(", "self", ".", "states_ses", ")", ")", "+", "(", "(", "t_params", "[", "-", "3", "]", "-", "(", "1.0", "/", "t_params", "[", "-", "3", "]", ")", ")", "*", "t_params", "[", "-", "2", "]", "*", "gas", ".", "SkewtScore", ".", "tv_variate_exp", "(", "t_params", "[", "-", "1", "]", ")", ")", "mean_states", "=", "self", ".", "states", "+", "(", "(", "t_params", "[", "-", "3", "]", "-", "(", "1.0", "/", "t_params", "[", "-", "3", "]", ")", ")", "*", "t_params", "[", "-", "2", "]", "*", "gas", ".", "SkewtScore", ".", "tv_variate_exp", "(", "t_params", "[", "-", "1", "]", ")", ")", "else", ":", "states_upper_95", "=", "(", "self", ".", "states", "+", "1.98", "*", "np", ".", "sqrt", "(", "self", ".", "states_ses", ")", ")", "states_lower_95", "=", "(", "self", ".", "states", "-", "1.98", "*", "np", ".", "sqrt", "(", "self", ".", "states_ses", ")", ")", "mean_states", "=", "self", ".", "states", "plt", ".", "figure", "(", "figsize", "=", "figsize", ")", "plt", ".", "subplot", "(", "2", ",", "1", ",", "1", ")", "plt", ".", "title", "(", "self", ".", "data_name", "+", "\" Raw and Smoothed\"", ")", "if", "intervals", "==", "True", ":", "alpha", "=", "[", "0.15", "*", "i", "/", "float", "(", "100", ")", "for", "i", "in", "range", "(", "50", ",", "12", ",", "-", "2", ")", "]", "plt", ".", "fill_between", "(", "date_index", ",", "self", ".", "link", "(", "states_lower_95", ")", ",", "self", ".", "link", "(", "states_upper_95", ")", ",", "alpha", "=", "0.15", ",", "label", "=", "'95% C.I.'", ")", "plt", ".", "plot", "(", "date_index", ",", "self", ".", "data", ",", "label", "=", "'Data'", ")", "plt", ".", "plot", "(", "date_index", ",", "self", ".", "link", "(", "mean_states", ")", ",", "label", "=", "'Smoothed'", ",", "c", "=", "'black'", ")", "plt", ".", "legend", "(", "loc", "=", "2", ")", "plt", ".", "subplot", "(", "2", ",", "1", ",", "2", ")", "plt", ".", "title", "(", "self", ".", "data_name", "+", "\" Local Level\"", ")", "if", "intervals", "==", "True", ":", "alpha", "=", "[", "0.15", "*", "i", "/", "float", "(", "100", ")", "for", "i", "in", "range", "(", "50", ",", "12", ",", "-", "2", ")", "]", "plt", ".", "fill_between", "(", "date_index", ",", "self", ".", "link", "(", "states_lower_95", ")", ",", "self", ".", "link", "(", "states_upper_95", ")", ",", "alpha", "=", "0.15", ",", "label", "=", "'95% C.I.'", ")", "plt", ".", "plot", "(", "date_index", ",", "self", ".", "link", "(", "mean_states", ")", ",", "label", "=", "'Smoothed State'", ")", "plt", ".", "legend", "(", "loc", "=", "2", ")", "plt", ".", "show", "(", ")" ]
Plots the fit of the model Returns ---------- None (plots data and the fit)
[ "Plots", "the", "fit", "of", "the", "model" ]
python
train
mezz64/pyHik
pyhik/hikvision.py
https://github.com/mezz64/pyHik/blob/1e7afca926e2b045257a43cbf8b1236a435493c2/pyhik/hikvision.py#L225-L231
def _do_update_callback(self, msg): """Call registered callback functions.""" for callback, sensor in self._updateCallbacks: if sensor == msg: _LOGGING.debug('Update callback %s for sensor %s', callback, sensor) callback(msg)
[ "def", "_do_update_callback", "(", "self", ",", "msg", ")", ":", "for", "callback", ",", "sensor", "in", "self", ".", "_updateCallbacks", ":", "if", "sensor", "==", "msg", ":", "_LOGGING", ".", "debug", "(", "'Update callback %s for sensor %s'", ",", "callback", ",", "sensor", ")", "callback", "(", "msg", ")" ]
Call registered callback functions.
[ "Call", "registered", "callback", "functions", "." ]
python
train
ktbyers/netmiko
netmiko/_textfsm/_texttable.py
https://github.com/ktbyers/netmiko/blob/54e6116c0b4664de2123081937e0a9a27bdfdfea/netmiko/_textfsm/_texttable.py#L1036-L1103
def CsvToTable(self, buf, header=True, separator=","): """Parses buffer into tabular format. Strips off comments (preceded by '#'). Optionally parses and indexes by first line (header). Args: buf: String file buffer containing CSV data. header: Is the first line of buffer a header. separator: String that CSV is separated by. Returns: int, the size of the table created. Raises: TableError: A parsing error occurred. """ self.Reset() header_row = self.row_class() if header: line = buf.readline() header_str = "" while not header_str: # Remove comments. header_str = line.split("#")[0].strip() if not header_str: line = buf.readline() header_list = header_str.split(separator) header_length = len(header_list) for entry in header_list: entry = entry.strip() if entry in header_row: raise TableError("Duplicate header entry %r." % entry) header_row[entry] = entry header_row.row = 0 self._table[0] = header_row # xreadlines would be better but not supported by StringIO for testing. for line in buf: # Support commented lines, provide '#' is first character of line. if line.startswith("#"): continue lst = line.split(separator) lst = [l.strip() for l in lst] if header and len(lst) != header_length: # Silently drop illegal line entries continue if not header: header_row = self.row_class() header_length = len(lst) header_row.values = dict( zip(range(header_length), range(header_length)) ) self._table[0] = header_row header = True continue new_row = self.NewRow() new_row.values = lst header_row.row = self.size + 1 self._table.append(new_row) return self.size
[ "def", "CsvToTable", "(", "self", ",", "buf", ",", "header", "=", "True", ",", "separator", "=", "\",\"", ")", ":", "self", ".", "Reset", "(", ")", "header_row", "=", "self", ".", "row_class", "(", ")", "if", "header", ":", "line", "=", "buf", ".", "readline", "(", ")", "header_str", "=", "\"\"", "while", "not", "header_str", ":", "# Remove comments.", "header_str", "=", "line", ".", "split", "(", "\"#\"", ")", "[", "0", "]", ".", "strip", "(", ")", "if", "not", "header_str", ":", "line", "=", "buf", ".", "readline", "(", ")", "header_list", "=", "header_str", ".", "split", "(", "separator", ")", "header_length", "=", "len", "(", "header_list", ")", "for", "entry", "in", "header_list", ":", "entry", "=", "entry", ".", "strip", "(", ")", "if", "entry", "in", "header_row", ":", "raise", "TableError", "(", "\"Duplicate header entry %r.\"", "%", "entry", ")", "header_row", "[", "entry", "]", "=", "entry", "header_row", ".", "row", "=", "0", "self", ".", "_table", "[", "0", "]", "=", "header_row", "# xreadlines would be better but not supported by StringIO for testing.", "for", "line", "in", "buf", ":", "# Support commented lines, provide '#' is first character of line.", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "continue", "lst", "=", "line", ".", "split", "(", "separator", ")", "lst", "=", "[", "l", ".", "strip", "(", ")", "for", "l", "in", "lst", "]", "if", "header", "and", "len", "(", "lst", ")", "!=", "header_length", ":", "# Silently drop illegal line entries", "continue", "if", "not", "header", ":", "header_row", "=", "self", ".", "row_class", "(", ")", "header_length", "=", "len", "(", "lst", ")", "header_row", ".", "values", "=", "dict", "(", "zip", "(", "range", "(", "header_length", ")", ",", "range", "(", "header_length", ")", ")", ")", "self", ".", "_table", "[", "0", "]", "=", "header_row", "header", "=", "True", "continue", "new_row", "=", "self", ".", "NewRow", "(", ")", "new_row", ".", "values", "=", "lst", "header_row", ".", "row", "=", "self", ".", "size", "+", "1", "self", ".", "_table", ".", "append", "(", "new_row", ")", "return", "self", ".", "size" ]
Parses buffer into tabular format. Strips off comments (preceded by '#'). Optionally parses and indexes by first line (header). Args: buf: String file buffer containing CSV data. header: Is the first line of buffer a header. separator: String that CSV is separated by. Returns: int, the size of the table created. Raises: TableError: A parsing error occurred.
[ "Parses", "buffer", "into", "tabular", "format", "." ]
python
train
Gbps/fastlog
fastlog/log.py
https://github.com/Gbps/fastlog/blob/8edb2327d72191510302c4654ffaa1691fe31277/fastlog/log.py#L67-L78
def _log(self, lvl, msg, type, args, kwargs): """ Internal method to filter into the formatter before being passed to the main Python logger """ extra = kwargs.get('extra', {}) extra.setdefault("fastlog-type", type) extra.setdefault("fastlog-indent", self._indent) kwargs['extra'] = extra self._lastlevel = lvl self.inner.log(lvl, msg, *args, **kwargs)
[ "def", "_log", "(", "self", ",", "lvl", ",", "msg", ",", "type", ",", "args", ",", "kwargs", ")", ":", "extra", "=", "kwargs", ".", "get", "(", "'extra'", ",", "{", "}", ")", "extra", ".", "setdefault", "(", "\"fastlog-type\"", ",", "type", ")", "extra", ".", "setdefault", "(", "\"fastlog-indent\"", ",", "self", ".", "_indent", ")", "kwargs", "[", "'extra'", "]", "=", "extra", "self", ".", "_lastlevel", "=", "lvl", "self", ".", "inner", ".", "log", "(", "lvl", ",", "msg", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Internal method to filter into the formatter before being passed to the main Python logger
[ "Internal", "method", "to", "filter", "into", "the", "formatter", "before", "being", "passed", "to", "the", "main", "Python", "logger" ]
python
train
phoikoi/sisy
src/sisy/models.py
https://github.com/phoikoi/sisy/blob/840c5463ab65488d34e99531f230e61f755d2d69/src/sisy/models.py#L359-L364
def taskinfo_with_label(label): """Return task info dictionary from task label. Internal function, pretty much only used in migrations since the model methods aren't there.""" task = Task.objects.get(label=label) info = json.loads(task._func_info) return info
[ "def", "taskinfo_with_label", "(", "label", ")", ":", "task", "=", "Task", ".", "objects", ".", "get", "(", "label", "=", "label", ")", "info", "=", "json", ".", "loads", "(", "task", ".", "_func_info", ")", "return", "info" ]
Return task info dictionary from task label. Internal function, pretty much only used in migrations since the model methods aren't there.
[ "Return", "task", "info", "dictionary", "from", "task", "label", ".", "Internal", "function", "pretty", "much", "only", "used", "in", "migrations", "since", "the", "model", "methods", "aren", "t", "there", "." ]
python
test
icometrix/dicom2nifti
dicom2nifti/convert_dicom.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/convert_dicom.py#L54-L96
def dicom_series_to_nifti(original_dicom_directory, output_file=None, reorient_nifti=True): """ Converts dicom single series (see pydicom) to nifty, mimicking SPM Examples: See unit test will return a dictionary containing - the NIFTI under key 'NIFTI' - the NIFTI file path under 'NII_FILE' - the BVAL file path under 'BVAL_FILE' (only for dti) - the BVEC file path under 'BVEC_FILE' (only for dti) IMPORTANT: If no specific sequence type can be found it will default to anatomical and try to convert. You should check that the data you are trying to convert is supported by this code Inspired by http://nipy.sourceforge.net/nibabel/dicom/spm_dicom.html Inspired by http://code.google.com/p/pydicom/source/browse/source/dicom/contrib/pydicom_series.py :param reorient_nifti: if True the nifti affine and data will be updated so the data is stored LAS oriented :param output_file: file path to write to if not set to None :param original_dicom_directory: directory with the dicom files for a single series/scan :return nibabel image """ # copy files so we can can modify without altering the original temp_directory = tempfile.mkdtemp() try: dicom_directory = os.path.join(temp_directory, 'dicom') shutil.copytree(original_dicom_directory, dicom_directory) dicom_input = common.read_dicom_directory(dicom_directory) return dicom_array_to_nifti(dicom_input, output_file, reorient_nifti) except AttributeError as exception: reraise( tp=ConversionError, value=ConversionError(str(exception)), tb=sys.exc_info()[2]) finally: # remove the copied data shutil.rmtree(temp_directory)
[ "def", "dicom_series_to_nifti", "(", "original_dicom_directory", ",", "output_file", "=", "None", ",", "reorient_nifti", "=", "True", ")", ":", "# copy files so we can can modify without altering the original", "temp_directory", "=", "tempfile", ".", "mkdtemp", "(", ")", "try", ":", "dicom_directory", "=", "os", ".", "path", ".", "join", "(", "temp_directory", ",", "'dicom'", ")", "shutil", ".", "copytree", "(", "original_dicom_directory", ",", "dicom_directory", ")", "dicom_input", "=", "common", ".", "read_dicom_directory", "(", "dicom_directory", ")", "return", "dicom_array_to_nifti", "(", "dicom_input", ",", "output_file", ",", "reorient_nifti", ")", "except", "AttributeError", "as", "exception", ":", "reraise", "(", "tp", "=", "ConversionError", ",", "value", "=", "ConversionError", "(", "str", "(", "exception", ")", ")", ",", "tb", "=", "sys", ".", "exc_info", "(", ")", "[", "2", "]", ")", "finally", ":", "# remove the copied data", "shutil", ".", "rmtree", "(", "temp_directory", ")" ]
Converts dicom single series (see pydicom) to nifty, mimicking SPM Examples: See unit test will return a dictionary containing - the NIFTI under key 'NIFTI' - the NIFTI file path under 'NII_FILE' - the BVAL file path under 'BVAL_FILE' (only for dti) - the BVEC file path under 'BVEC_FILE' (only for dti) IMPORTANT: If no specific sequence type can be found it will default to anatomical and try to convert. You should check that the data you are trying to convert is supported by this code Inspired by http://nipy.sourceforge.net/nibabel/dicom/spm_dicom.html Inspired by http://code.google.com/p/pydicom/source/browse/source/dicom/contrib/pydicom_series.py :param reorient_nifti: if True the nifti affine and data will be updated so the data is stored LAS oriented :param output_file: file path to write to if not set to None :param original_dicom_directory: directory with the dicom files for a single series/scan :return nibabel image
[ "Converts", "dicom", "single", "series", "(", "see", "pydicom", ")", "to", "nifty", "mimicking", "SPM" ]
python
train
explosion/spaCy
spacy/pipeline/functions.py
https://github.com/explosion/spaCy/blob/8ee4100f8ffb336886208a1ea827bf4c745e2709/spacy/pipeline/functions.py#L39-L55
def merge_subtokens(doc, label="subtok"): """Merge subtokens into a single token. doc (Doc): The Doc object. label (unicode): The subtoken dependency label. RETURNS (Doc): The Doc object with merged subtokens. DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens """ merger = Matcher(doc.vocab) merger.add("SUBTOK", None, [{"DEP": label, "op": "+"}]) matches = merger(doc) spans = [doc[start : end + 1] for _, start, end in matches] with doc.retokenize() as retokenizer: for span in spans: retokenizer.merge(span) return doc
[ "def", "merge_subtokens", "(", "doc", ",", "label", "=", "\"subtok\"", ")", ":", "merger", "=", "Matcher", "(", "doc", ".", "vocab", ")", "merger", ".", "add", "(", "\"SUBTOK\"", ",", "None", ",", "[", "{", "\"DEP\"", ":", "label", ",", "\"op\"", ":", "\"+\"", "}", "]", ")", "matches", "=", "merger", "(", "doc", ")", "spans", "=", "[", "doc", "[", "start", ":", "end", "+", "1", "]", "for", "_", ",", "start", ",", "end", "in", "matches", "]", "with", "doc", ".", "retokenize", "(", ")", "as", "retokenizer", ":", "for", "span", "in", "spans", ":", "retokenizer", ".", "merge", "(", "span", ")", "return", "doc" ]
Merge subtokens into a single token. doc (Doc): The Doc object. label (unicode): The subtoken dependency label. RETURNS (Doc): The Doc object with merged subtokens. DOCS: https://spacy.io/api/pipeline-functions#merge_subtokens
[ "Merge", "subtokens", "into", "a", "single", "token", "." ]
python
train
googleapis/google-cloud-python
logging/google/cloud/logging/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/logging/google/cloud/logging/client.py#L294-L318
def list_metrics(self, page_size=None, page_token=None): """List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list :type page_size: int :param page_size: Optional. The maximum number of metrics in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. :type page_token: str :param page_token: Optional. If present, return the next batch of metrics, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.logging.metric.Metric` accessible to the current client. """ return self.metrics_api.list_metrics(self.project, page_size, page_token)
[ "def", "list_metrics", "(", "self", ",", "page_size", "=", "None", ",", "page_token", "=", "None", ")", ":", "return", "self", ".", "metrics_api", ".", "list_metrics", "(", "self", ".", "project", ",", "page_size", ",", "page_token", ")" ]
List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list :type page_size: int :param page_size: Optional. The maximum number of metrics in each page of results from this request. Non-positive values are ignored. Defaults to a sensible value set by the API. :type page_token: str :param page_token: Optional. If present, return the next batch of metrics, using the value, which must correspond to the ``nextPageToken`` value returned in the previous response. Deprecated: use the ``pages`` property of the returned iterator instead of manually passing the token. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.logging.metric.Metric` accessible to the current client.
[ "List", "metrics", "for", "the", "project", "associated", "with", "this", "client", "." ]
python
train
openstack/horizon
openstack_dashboard/api/neutron.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/neutron.py#L361-L371
def list(self, **params): """Fetches a list all security groups. :returns: List of SecurityGroup objects """ # This is to ensure tenant_id key is not populated # if tenant_id=None is specified. tenant_id = params.pop('tenant_id', self.request.user.tenant_id) if tenant_id: params['tenant_id'] = tenant_id return self._list(**params)
[ "def", "list", "(", "self", ",", "*", "*", "params", ")", ":", "# This is to ensure tenant_id key is not populated", "# if tenant_id=None is specified.", "tenant_id", "=", "params", ".", "pop", "(", "'tenant_id'", ",", "self", ".", "request", ".", "user", ".", "tenant_id", ")", "if", "tenant_id", ":", "params", "[", "'tenant_id'", "]", "=", "tenant_id", "return", "self", ".", "_list", "(", "*", "*", "params", ")" ]
Fetches a list all security groups. :returns: List of SecurityGroup objects
[ "Fetches", "a", "list", "all", "security", "groups", "." ]
python
train
CalebBell/fluids
fluids/fittings.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/fittings.py#L3693-L3758
def K_angle_stop_check_valve_Crane(D1, D2, fd=None, style=0): r'''Returns the loss coefficient for a angle stop check valve as shown in [1]_. If β = 1: .. math:: K = K_1 = K_2 = N\cdot f_d Otherwise: .. math:: K_2 = \frac{K + \left[0.5(1-\beta^2) + (1-\beta^2)^2\right]}{\beta^4} Style 0 is the standard form; style 1 has a restrition to force the flow up through the valve; style 2 is has the clearest flow area with no guides for the angle valve. N is 200, 350, and 55 for those cases respectively. Parameters ---------- D1 : float Diameter of the valve seat bore (must be smaller or equal to `D2`), [m] D2 : float Diameter of the pipe attached to the valve, [m] fd : float, optional Darcy friction factor calculated for the actual pipe flow in clean steel (roughness = 0.0018 inch) in the fully developed turbulent region; do not specify this to use the original Crane friction factor!, [-] style : int, optional One of 0, 1, or 2; refers to three different types of angle valves as shown in [1]_ [-] Returns ------- K : float Loss coefficient with respect to the pipe inside diameter [-] Notes ----- This method is not valid in the laminar regime and the pressure drop will be underestimated in those conditions. Examples -------- >>> K_angle_stop_check_valve_Crane(.1, .02, style=1) 4.525425593879809 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009. ''' if fd is None: fd = ft_Crane(D2) try: K = angle_stop_check_valve_Crane_coeffs[style]*fd except KeyError: raise KeyError('Accepted valve styles are 0, 1, and 2 only') beta = D1/D2 if beta == 1: return K else: return (K + beta*(0.5*(1.0 - beta**2) + (1.0 - beta**2)**2))/beta**4
[ "def", "K_angle_stop_check_valve_Crane", "(", "D1", ",", "D2", ",", "fd", "=", "None", ",", "style", "=", "0", ")", ":", "if", "fd", "is", "None", ":", "fd", "=", "ft_Crane", "(", "D2", ")", "try", ":", "K", "=", "angle_stop_check_valve_Crane_coeffs", "[", "style", "]", "*", "fd", "except", "KeyError", ":", "raise", "KeyError", "(", "'Accepted valve styles are 0, 1, and 2 only'", ")", "beta", "=", "D1", "/", "D2", "if", "beta", "==", "1", ":", "return", "K", "else", ":", "return", "(", "K", "+", "beta", "*", "(", "0.5", "*", "(", "1.0", "-", "beta", "**", "2", ")", "+", "(", "1.0", "-", "beta", "**", "2", ")", "**", "2", ")", ")", "/", "beta", "**", "4" ]
r'''Returns the loss coefficient for a angle stop check valve as shown in [1]_. If β = 1: .. math:: K = K_1 = K_2 = N\cdot f_d Otherwise: .. math:: K_2 = \frac{K + \left[0.5(1-\beta^2) + (1-\beta^2)^2\right]}{\beta^4} Style 0 is the standard form; style 1 has a restrition to force the flow up through the valve; style 2 is has the clearest flow area with no guides for the angle valve. N is 200, 350, and 55 for those cases respectively. Parameters ---------- D1 : float Diameter of the valve seat bore (must be smaller or equal to `D2`), [m] D2 : float Diameter of the pipe attached to the valve, [m] fd : float, optional Darcy friction factor calculated for the actual pipe flow in clean steel (roughness = 0.0018 inch) in the fully developed turbulent region; do not specify this to use the original Crane friction factor!, [-] style : int, optional One of 0, 1, or 2; refers to three different types of angle valves as shown in [1]_ [-] Returns ------- K : float Loss coefficient with respect to the pipe inside diameter [-] Notes ----- This method is not valid in the laminar regime and the pressure drop will be underestimated in those conditions. Examples -------- >>> K_angle_stop_check_valve_Crane(.1, .02, style=1) 4.525425593879809 References ---------- .. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane, 2009.
[ "r", "Returns", "the", "loss", "coefficient", "for", "a", "angle", "stop", "check", "valve", "as", "shown", "in", "[", "1", "]", "_", ".", "If", "β", "=", "1", ":", "..", "math", "::", "K", "=", "K_1", "=", "K_2", "=", "N", "\\", "cdot", "f_d", "Otherwise", ":", "..", "math", "::", "K_2", "=", "\\", "frac", "{", "K", "+", "\\", "left", "[", "0", ".", "5", "(", "1", "-", "\\", "beta^2", ")", "+", "(", "1", "-", "\\", "beta^2", ")", "^2", "\\", "right", "]", "}", "{", "\\", "beta^4", "}", "Style", "0", "is", "the", "standard", "form", ";", "style", "1", "has", "a", "restrition", "to", "force", "the", "flow", "up", "through", "the", "valve", ";", "style", "2", "is", "has", "the", "clearest", "flow", "area", "with", "no", "guides", "for", "the", "angle", "valve", ".", "N", "is", "200", "350", "and", "55", "for", "those", "cases", "respectively", ".", "Parameters", "----------", "D1", ":", "float", "Diameter", "of", "the", "valve", "seat", "bore", "(", "must", "be", "smaller", "or", "equal", "to", "D2", ")", "[", "m", "]", "D2", ":", "float", "Diameter", "of", "the", "pipe", "attached", "to", "the", "valve", "[", "m", "]", "fd", ":", "float", "optional", "Darcy", "friction", "factor", "calculated", "for", "the", "actual", "pipe", "flow", "in", "clean", "steel", "(", "roughness", "=", "0", ".", "0018", "inch", ")", "in", "the", "fully", "developed", "turbulent", "region", ";", "do", "not", "specify", "this", "to", "use", "the", "original", "Crane", "friction", "factor!", "[", "-", "]", "style", ":", "int", "optional", "One", "of", "0", "1", "or", "2", ";", "refers", "to", "three", "different", "types", "of", "angle", "valves", "as", "shown", "in", "[", "1", "]", "_", "[", "-", "]", "Returns", "-------", "K", ":", "float", "Loss", "coefficient", "with", "respect", "to", "the", "pipe", "inside", "diameter", "[", "-", "]" ]
python
train
pybel/pybel-tools
src/pybel_tools/analysis/neurommsig/algorithm.py
https://github.com/pybel/pybel-tools/blob/3491adea0ac4ee60f57275ef72f9b73da6dbfe0c/src/pybel_tools/analysis/neurommsig/algorithm.py#L166-L172
def neurommsig_gene_ora(graph: BELGraph, genes: List[Gene]) -> float: """Calculate the percentage of target genes mappable to the graph. Assume: graph central dogma inferred, collapsed to genes, collapsed variants """ graph_genes = set(get_nodes_by_function(graph, GENE)) return len(graph_genes.intersection(genes)) / len(graph_genes)
[ "def", "neurommsig_gene_ora", "(", "graph", ":", "BELGraph", ",", "genes", ":", "List", "[", "Gene", "]", ")", "->", "float", ":", "graph_genes", "=", "set", "(", "get_nodes_by_function", "(", "graph", ",", "GENE", ")", ")", "return", "len", "(", "graph_genes", ".", "intersection", "(", "genes", ")", ")", "/", "len", "(", "graph_genes", ")" ]
Calculate the percentage of target genes mappable to the graph. Assume: graph central dogma inferred, collapsed to genes, collapsed variants
[ "Calculate", "the", "percentage", "of", "target", "genes", "mappable", "to", "the", "graph", ".", "Assume", ":", "graph", "central", "dogma", "inferred", "collapsed", "to", "genes", "collapsed", "variants" ]
python
valid
Stewori/pytypes
pytypes/type_util.py
https://github.com/Stewori/pytypes/blob/b814d38709e84c0e0825caf8b721c20eb5a8ab3b/pytypes/type_util.py#L125-L170
def get_iterable_itemtype(obj): """Attempts to get an iterable's itemtype without iterating over it, not even partly. Note that iterating over an iterable might modify its inner state, e.g. if it is an iterator. Note that obj is expected to be an iterable, not a typing.Iterable. This function leverages various alternative ways to obtain that info, e.g. by looking for type annotations of '__iter__' or '__getitem__'. It is intended for (unknown) iterables, where the type cannot be obtained via sampling without the risk of modifying inner state. """ # support further specific iterables on demand try: if isinstance(obj, range): tpl = tuple(deep_type(obj.start), deep_type(obj.stop), deep_type(obj.step)) return Union[tpl] except TypeError: # We're running Python 2 pass if type(obj) is tuple: tpl = tuple(deep_type(t) for t in obj) return Union[tpl] elif type(obj) is types.GeneratorType: return get_generator_yield_type(obj) else: tp = deep_type(obj) if is_Generic(tp): if issubclass(tp.__origin__, typing.Iterable): if len(tp.__args__) == 1: return tp.__args__[0] return _select_Generic_superclass_parameters(tp, typing.Iterable)[0] if is_iterable(obj): if type(obj) is str: return str if hasattr(obj, '__iter__'): if has_type_hints(obj.__iter__): itrator = _funcsigtypes(obj.__iter__, True, obj.__class__)[1] if is_Generic(itrator) and itrator.__origin__ is typing.Iterator: return itrator.__args__[0] if hasattr(obj, '__getitem__'): if has_type_hints(obj.__getitem__): itrator = _funcsigtypes(obj.__getitem__, True, obj.__class__)[1] if is_Generic(itrator) and itrator.__origin__ is typing.Iterator: return itrator.__args__[0] return None # means that type is unknown else: raise TypeError('Not an iterable: '+str(type(obj)))
[ "def", "get_iterable_itemtype", "(", "obj", ")", ":", "# support further specific iterables on demand", "try", ":", "if", "isinstance", "(", "obj", ",", "range", ")", ":", "tpl", "=", "tuple", "(", "deep_type", "(", "obj", ".", "start", ")", ",", "deep_type", "(", "obj", ".", "stop", ")", ",", "deep_type", "(", "obj", ".", "step", ")", ")", "return", "Union", "[", "tpl", "]", "except", "TypeError", ":", "# We're running Python 2", "pass", "if", "type", "(", "obj", ")", "is", "tuple", ":", "tpl", "=", "tuple", "(", "deep_type", "(", "t", ")", "for", "t", "in", "obj", ")", "return", "Union", "[", "tpl", "]", "elif", "type", "(", "obj", ")", "is", "types", ".", "GeneratorType", ":", "return", "get_generator_yield_type", "(", "obj", ")", "else", ":", "tp", "=", "deep_type", "(", "obj", ")", "if", "is_Generic", "(", "tp", ")", ":", "if", "issubclass", "(", "tp", ".", "__origin__", ",", "typing", ".", "Iterable", ")", ":", "if", "len", "(", "tp", ".", "__args__", ")", "==", "1", ":", "return", "tp", ".", "__args__", "[", "0", "]", "return", "_select_Generic_superclass_parameters", "(", "tp", ",", "typing", ".", "Iterable", ")", "[", "0", "]", "if", "is_iterable", "(", "obj", ")", ":", "if", "type", "(", "obj", ")", "is", "str", ":", "return", "str", "if", "hasattr", "(", "obj", ",", "'__iter__'", ")", ":", "if", "has_type_hints", "(", "obj", ".", "__iter__", ")", ":", "itrator", "=", "_funcsigtypes", "(", "obj", ".", "__iter__", ",", "True", ",", "obj", ".", "__class__", ")", "[", "1", "]", "if", "is_Generic", "(", "itrator", ")", "and", "itrator", ".", "__origin__", "is", "typing", ".", "Iterator", ":", "return", "itrator", ".", "__args__", "[", "0", "]", "if", "hasattr", "(", "obj", ",", "'__getitem__'", ")", ":", "if", "has_type_hints", "(", "obj", ".", "__getitem__", ")", ":", "itrator", "=", "_funcsigtypes", "(", "obj", ".", "__getitem__", ",", "True", ",", "obj", ".", "__class__", ")", "[", "1", "]", "if", "is_Generic", "(", "itrator", ")", "and", "itrator", ".", "__origin__", "is", "typing", ".", "Iterator", ":", "return", "itrator", ".", "__args__", "[", "0", "]", "return", "None", "# means that type is unknown", "else", ":", "raise", "TypeError", "(", "'Not an iterable: '", "+", "str", "(", "type", "(", "obj", ")", ")", ")" ]
Attempts to get an iterable's itemtype without iterating over it, not even partly. Note that iterating over an iterable might modify its inner state, e.g. if it is an iterator. Note that obj is expected to be an iterable, not a typing.Iterable. This function leverages various alternative ways to obtain that info, e.g. by looking for type annotations of '__iter__' or '__getitem__'. It is intended for (unknown) iterables, where the type cannot be obtained via sampling without the risk of modifying inner state.
[ "Attempts", "to", "get", "an", "iterable", "s", "itemtype", "without", "iterating", "over", "it", "not", "even", "partly", ".", "Note", "that", "iterating", "over", "an", "iterable", "might", "modify", "its", "inner", "state", "e", ".", "g", ".", "if", "it", "is", "an", "iterator", ".", "Note", "that", "obj", "is", "expected", "to", "be", "an", "iterable", "not", "a", "typing", ".", "Iterable", ".", "This", "function", "leverages", "various", "alternative", "ways", "to", "obtain", "that", "info", "e", ".", "g", ".", "by", "looking", "for", "type", "annotations", "of", "__iter__", "or", "__getitem__", ".", "It", "is", "intended", "for", "(", "unknown", ")", "iterables", "where", "the", "type", "cannot", "be", "obtained", "via", "sampling", "without", "the", "risk", "of", "modifying", "inner", "state", "." ]
python
train
kensho-technologies/graphql-compiler
graphql_compiler/compiler/emit_sql.py
https://github.com/kensho-technologies/graphql-compiler/blob/f6079c6d10f64932f6b3af309b79bcea2123ca8f/graphql_compiler/compiler/emit_sql.py#L273-L286
def _transform_local_field_to_expression(expression, node, context): """Transform a LocalField compiler expression into its SQLAlchemy expression representation. Args: expression: expression, LocalField compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression. """ column_name = expression.field_name column = sql_context_helpers.get_column(column_name, node, context) return column
[ "def", "_transform_local_field_to_expression", "(", "expression", ",", "node", ",", "context", ")", ":", "column_name", "=", "expression", ".", "field_name", "column", "=", "sql_context_helpers", ".", "get_column", "(", "column_name", ",", "node", ",", "context", ")", "return", "column" ]
Transform a LocalField compiler expression into its SQLAlchemy expression representation. Args: expression: expression, LocalField compiler expression. node: SqlNode, the SqlNode the expression applies to. context: CompilationContext, global compilation state and metadata. Returns: Expression, SQLAlchemy expression.
[ "Transform", "a", "LocalField", "compiler", "expression", "into", "its", "SQLAlchemy", "expression", "representation", "." ]
python
train
dpkp/kafka-python
kafka/client_async.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/client_async.py#L704-L738
def least_loaded_node(self): """Choose the node with fewest outstanding requests, with fallbacks. This method will prefer a node with an existing connection and no in-flight-requests. If no such node is found, a node will be chosen randomly from disconnected nodes that are not "blacked out" (i.e., are not subject to a reconnect backoff). If no node metadata has been obtained, will return a bootstrap node (subject to exponential backoff). Returns: node_id or None if no suitable node was found """ nodes = [broker.nodeId for broker in self.cluster.brokers()] random.shuffle(nodes) inflight = float('inf') found = None for node_id in nodes: conn = self._conns.get(node_id) connected = conn is not None and conn.connected() blacked_out = conn is not None and conn.blacked_out() curr_inflight = len(conn.in_flight_requests) if conn is not None else 0 if connected and curr_inflight == 0: # if we find an established connection # with no in-flight requests, we can stop right away return node_id elif not blacked_out and curr_inflight < inflight: # otherwise if this is the best we have found so far, record that inflight = curr_inflight found = node_id if found is not None: return found return None
[ "def", "least_loaded_node", "(", "self", ")", ":", "nodes", "=", "[", "broker", ".", "nodeId", "for", "broker", "in", "self", ".", "cluster", ".", "brokers", "(", ")", "]", "random", ".", "shuffle", "(", "nodes", ")", "inflight", "=", "float", "(", "'inf'", ")", "found", "=", "None", "for", "node_id", "in", "nodes", ":", "conn", "=", "self", ".", "_conns", ".", "get", "(", "node_id", ")", "connected", "=", "conn", "is", "not", "None", "and", "conn", ".", "connected", "(", ")", "blacked_out", "=", "conn", "is", "not", "None", "and", "conn", ".", "blacked_out", "(", ")", "curr_inflight", "=", "len", "(", "conn", ".", "in_flight_requests", ")", "if", "conn", "is", "not", "None", "else", "0", "if", "connected", "and", "curr_inflight", "==", "0", ":", "# if we find an established connection", "# with no in-flight requests, we can stop right away", "return", "node_id", "elif", "not", "blacked_out", "and", "curr_inflight", "<", "inflight", ":", "# otherwise if this is the best we have found so far, record that", "inflight", "=", "curr_inflight", "found", "=", "node_id", "if", "found", "is", "not", "None", ":", "return", "found", "return", "None" ]
Choose the node with fewest outstanding requests, with fallbacks. This method will prefer a node with an existing connection and no in-flight-requests. If no such node is found, a node will be chosen randomly from disconnected nodes that are not "blacked out" (i.e., are not subject to a reconnect backoff). If no node metadata has been obtained, will return a bootstrap node (subject to exponential backoff). Returns: node_id or None if no suitable node was found
[ "Choose", "the", "node", "with", "fewest", "outstanding", "requests", "with", "fallbacks", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/common.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/common.py#L178-L187
def validate_readable(option, value): """Validates that 'value' is file-like and readable. """ if value is None: return value # First make sure its a string py3.3 open(True, 'r') succeeds # Used in ssl cert checking due to poor ssl module error reporting value = validate_string(option, value) open(value, 'r').close() return value
[ "def", "validate_readable", "(", "option", ",", "value", ")", ":", "if", "value", "is", "None", ":", "return", "value", "# First make sure its a string py3.3 open(True, 'r') succeeds", "# Used in ssl cert checking due to poor ssl module error reporting", "value", "=", "validate_string", "(", "option", ",", "value", ")", "open", "(", "value", ",", "'r'", ")", ".", "close", "(", ")", "return", "value" ]
Validates that 'value' is file-like and readable.
[ "Validates", "that", "value", "is", "file", "-", "like", "and", "readable", "." ]
python
train
cisco-sas/kitty
kitty/model/low_level/field.py
https://github.com/cisco-sas/kitty/blob/cb0760989dcdfe079e43ac574d872d0b18953a32/kitty/model/low_level/field.py#L938-L944
def hash(self): ''' :rtype: int :return: hash of the field ''' hashed = super(_LibraryBitField, self).hash() return khash(hashed, self._length, self._signed, self._min_value, self._max_value)
[ "def", "hash", "(", "self", ")", ":", "hashed", "=", "super", "(", "_LibraryBitField", ",", "self", ")", ".", "hash", "(", ")", "return", "khash", "(", "hashed", ",", "self", ".", "_length", ",", "self", ".", "_signed", ",", "self", ".", "_min_value", ",", "self", ".", "_max_value", ")" ]
:rtype: int :return: hash of the field
[ ":", "rtype", ":", "int", ":", "return", ":", "hash", "of", "the", "field" ]
python
train
gem/oq-engine
openquake/hazardlib/gsim/nga_east.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/nga_east.py#L530-L534
def _get_tau(self, imt, mag): """ Returns the inter-event standard deviation (tau) """ return TAU_EXECUTION[self.tau_model](imt, mag, self.TAU)
[ "def", "_get_tau", "(", "self", ",", "imt", ",", "mag", ")", ":", "return", "TAU_EXECUTION", "[", "self", ".", "tau_model", "]", "(", "imt", ",", "mag", ",", "self", ".", "TAU", ")" ]
Returns the inter-event standard deviation (tau)
[ "Returns", "the", "inter", "-", "event", "standard", "deviation", "(", "tau", ")" ]
python
train
pyQode/pyqode.core
pyqode/core/widgets/splittable_tab_widget.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/widgets/splittable_tab_widget.py#L971-L979
def count(self): """ Returns the number of widgets currently displayed (takes child splits into account). """ c = self.main_tab_widget.count() for child in self.child_splitters: c += child.count() return c
[ "def", "count", "(", "self", ")", ":", "c", "=", "self", ".", "main_tab_widget", ".", "count", "(", ")", "for", "child", "in", "self", ".", "child_splitters", ":", "c", "+=", "child", ".", "count", "(", ")", "return", "c" ]
Returns the number of widgets currently displayed (takes child splits into account).
[ "Returns", "the", "number", "of", "widgets", "currently", "displayed", "(", "takes", "child", "splits", "into", "account", ")", "." ]
python
train
JarryShaw/PyPCAPKit
src/const/ipv6/routing.py
https://github.com/JarryShaw/PyPCAPKit/blob/c7f0da9aebc2cf210bf8f8b912f7d3cbb98ca10e/src/const/ipv6/routing.py#L21-L27
def get(key, default=-1): """Backport support for original codes.""" if isinstance(key, int): return Routing(key) if key not in Routing._member_map_: extend_enum(Routing, key, default) return Routing[key]
[ "def", "get", "(", "key", ",", "default", "=", "-", "1", ")", ":", "if", "isinstance", "(", "key", ",", "int", ")", ":", "return", "Routing", "(", "key", ")", "if", "key", "not", "in", "Routing", ".", "_member_map_", ":", "extend_enum", "(", "Routing", ",", "key", ",", "default", ")", "return", "Routing", "[", "key", "]" ]
Backport support for original codes.
[ "Backport", "support", "for", "original", "codes", "." ]
python
train
improbable-research/keanu
keanu-python/keanu/vertex/generated.py
https://github.com/improbable-research/keanu/blob/73189a8f569078e156168e795f82c7366c59574b/keanu-python/keanu/vertex/generated.py#L523-L529
def Sum(input_vertex: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Performs a sum across all dimensions :param input_vertex: the vertex to have its values summed """ return Double(context.jvm_view().SumVertex, label, cast_to_double_vertex(input_vertex))
[ "def", "Sum", "(", "input_vertex", ":", "vertex_constructor_param_types", ",", "label", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "Vertex", ":", "return", "Double", "(", "context", ".", "jvm_view", "(", ")", ".", "SumVertex", ",", "label", ",", "cast_to_double_vertex", "(", "input_vertex", ")", ")" ]
Performs a sum across all dimensions :param input_vertex: the vertex to have its values summed
[ "Performs", "a", "sum", "across", "all", "dimensions", ":", "param", "input_vertex", ":", "the", "vertex", "to", "have", "its", "values", "summed" ]
python
train
f3at/feat
src/feat/simulation/driver.py
https://github.com/f3at/feat/blob/15da93fc9d6ec8154f52a9172824e25821195ef8/src/feat/simulation/driver.py#L288-L297
def freeze_all(self): ''' Stop all activity of the agents running. ''' d = defer.succeed(None) for x in self.iter_agents(): d.addCallback(defer.drop_param, x._cancel_long_running_protocols) d.addCallback(defer.drop_param, x._cancel_all_delayed_calls) d.addCallback(defer.drop_param, x._kill_all_protocols) return d
[ "def", "freeze_all", "(", "self", ")", ":", "d", "=", "defer", ".", "succeed", "(", "None", ")", "for", "x", "in", "self", ".", "iter_agents", "(", ")", ":", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "x", ".", "_cancel_long_running_protocols", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "x", ".", "_cancel_all_delayed_calls", ")", "d", ".", "addCallback", "(", "defer", ".", "drop_param", ",", "x", ".", "_kill_all_protocols", ")", "return", "d" ]
Stop all activity of the agents running.
[ "Stop", "all", "activity", "of", "the", "agents", "running", "." ]
python
train
Skype4Py/Skype4Py
Skype4Py/skype.py
https://github.com/Skype4Py/Skype4Py/blob/c48d83f7034109fe46315d45a066126002c6e0d4/Skype4Py/skype.py#L555-L567
def CreateChatWith(self, *Usernames): """Creates a chat with one or more users. :Parameters: Usernames : str One or more Skypenames of the users. :return: A chat object :rtype: `Chat` :see: `Chat.AddMembers` """ return Chat(self, chop(self._DoCommand('CHAT CREATE %s' % ', '.join(Usernames)), 2)[1])
[ "def", "CreateChatWith", "(", "self", ",", "*", "Usernames", ")", ":", "return", "Chat", "(", "self", ",", "chop", "(", "self", ".", "_DoCommand", "(", "'CHAT CREATE %s'", "%", "', '", ".", "join", "(", "Usernames", ")", ")", ",", "2", ")", "[", "1", "]", ")" ]
Creates a chat with one or more users. :Parameters: Usernames : str One or more Skypenames of the users. :return: A chat object :rtype: `Chat` :see: `Chat.AddMembers`
[ "Creates", "a", "chat", "with", "one", "or", "more", "users", "." ]
python
train
peterldowns/lggr
lggr/__init__.py
https://github.com/peterldowns/lggr/blob/622968f17133e02d9a46a4900dd20fb3b19fe961/lggr/__init__.py#L322-L332
def Printer(open_file=sys.stdout, closing=False): """ Prints items with a timestamp. """ try: while True: logstr = (yield) open_file.write(logstr) open_file.write('\n') # new line except GeneratorExit: if closing: try: open_file.close() except: pass
[ "def", "Printer", "(", "open_file", "=", "sys", ".", "stdout", ",", "closing", "=", "False", ")", ":", "try", ":", "while", "True", ":", "logstr", "=", "(", "yield", ")", "open_file", ".", "write", "(", "logstr", ")", "open_file", ".", "write", "(", "'\\n'", ")", "# new line", "except", "GeneratorExit", ":", "if", "closing", ":", "try", ":", "open_file", ".", "close", "(", ")", "except", ":", "pass" ]
Prints items with a timestamp.
[ "Prints", "items", "with", "a", "timestamp", "." ]
python
train
gunthercox/ChatterBot
chatterbot/storage/sql_storage.py
https://github.com/gunthercox/ChatterBot/blob/1a03dcb45cba7bdc24d3db5e750582e0cb1518e2/chatterbot/storage/sql_storage.py#L176-L217
def create(self, **kwargs): """ Creates a new statement matching the keyword arguments specified. Returns the created statement. """ Statement = self.get_model('statement') Tag = self.get_model('tag') session = self.Session() tags = set(kwargs.pop('tags', [])) if 'search_text' not in kwargs: kwargs['search_text'] = self.tagger.get_bigram_pair_string(kwargs['text']) if 'search_in_response_to' not in kwargs: in_response_to = kwargs.get('in_response_to') if in_response_to: kwargs['search_in_response_to'] = self.tagger.get_bigram_pair_string(in_response_to) statement = Statement(**kwargs) for tag_name in tags: tag = session.query(Tag).filter_by(name=tag_name).first() if not tag: # Create the tag tag = Tag(name=tag_name) statement.tags.append(tag) session.add(statement) session.flush() session.refresh(statement) statement_object = self.model_to_object(statement) self._session_finish(session) return statement_object
[ "def", "create", "(", "self", ",", "*", "*", "kwargs", ")", ":", "Statement", "=", "self", ".", "get_model", "(", "'statement'", ")", "Tag", "=", "self", ".", "get_model", "(", "'tag'", ")", "session", "=", "self", ".", "Session", "(", ")", "tags", "=", "set", "(", "kwargs", ".", "pop", "(", "'tags'", ",", "[", "]", ")", ")", "if", "'search_text'", "not", "in", "kwargs", ":", "kwargs", "[", "'search_text'", "]", "=", "self", ".", "tagger", ".", "get_bigram_pair_string", "(", "kwargs", "[", "'text'", "]", ")", "if", "'search_in_response_to'", "not", "in", "kwargs", ":", "in_response_to", "=", "kwargs", ".", "get", "(", "'in_response_to'", ")", "if", "in_response_to", ":", "kwargs", "[", "'search_in_response_to'", "]", "=", "self", ".", "tagger", ".", "get_bigram_pair_string", "(", "in_response_to", ")", "statement", "=", "Statement", "(", "*", "*", "kwargs", ")", "for", "tag_name", "in", "tags", ":", "tag", "=", "session", ".", "query", "(", "Tag", ")", ".", "filter_by", "(", "name", "=", "tag_name", ")", ".", "first", "(", ")", "if", "not", "tag", ":", "# Create the tag", "tag", "=", "Tag", "(", "name", "=", "tag_name", ")", "statement", ".", "tags", ".", "append", "(", "tag", ")", "session", ".", "add", "(", "statement", ")", "session", ".", "flush", "(", ")", "session", ".", "refresh", "(", "statement", ")", "statement_object", "=", "self", ".", "model_to_object", "(", "statement", ")", "self", ".", "_session_finish", "(", "session", ")", "return", "statement_object" ]
Creates a new statement matching the keyword arguments specified. Returns the created statement.
[ "Creates", "a", "new", "statement", "matching", "the", "keyword", "arguments", "specified", ".", "Returns", "the", "created", "statement", "." ]
python
train
danilobellini/audiolazy
audiolazy/lazy_auditory.py
https://github.com/danilobellini/audiolazy/blob/dba0a278937909980ed40b976d866b8e97c35dee/audiolazy/lazy_auditory.py#L225-L290
def phon2dB(loudness=None): """ Loudness in phons to Sound Pressure Level (SPL) in dB using the ISO/FDIS 226:2003 model. This function needs Scipy, as ``scipy.interpolate.UnivariateSpline`` objects are used as interpolators. Parameters ---------- loudness : The loudness value in phons to be converted, or None (default) to get the threshold of hearing. Returns ------- A callable that returns the SPL dB value for each given frequency in hertz. Note ---- See ``phon2dB.iso226.schema`` and ``phon2dB.iso226.table`` to know the original frequency used for the result. The result for any other value is an interpolation (spline). Don't trust on values nor lower nor higher than the frequency limits there (20Hz and 12.5kHz) as they're not part of ISO226 and no value was collected to estimate them (they're just a spline interpolation to reach 1000dB at -30Hz and 32kHz). Likewise, the trustful loudness input range is from 20 to 90 phon, as written on ISO226, although other values aren't found by a spline interpolation but by using the formula on section 4.1 of ISO226. Hint ---- The ``phon2dB.iso226.table`` also have other useful information, such as the threshold values in SPL dB. """ from scipy.interpolate import UnivariateSpline table = phon2dB.iso226.table schema = phon2dB.iso226.schema freqs = [row[schema.index("freq")] for row in table] if loudness is None: # Threshold levels spl = [row[schema.index("threshold")] for row in table] else: # Curve for a specific phon value def get_pressure_level(freq, alpha, loudness_base, threshold): return 10 / alpha * math.log10( 4.47e-3 * (10 ** (.025 * loudness) - 1.14) + (.4 * 10 ** ((threshold + loudness_base) / 10 - 9)) ** alpha ) - loudness_base + 94 spl = [get_pressure_level(**dict(xzip(schema, args))) for args in table] interpolator = UnivariateSpline(freqs, spl, s=0) interpolator_low = UnivariateSpline([-30] + freqs, [1e3] + spl, s=0) interpolator_high = UnivariateSpline(freqs + [32000], spl + [1e3], s=0) @elementwise("freq", 0) def freq2dB_spl(freq): if freq < 20: return interpolator_low(freq).tolist() if freq > 12500: return interpolator_high(freq).tolist() return interpolator(freq).tolist() return freq2dB_spl
[ "def", "phon2dB", "(", "loudness", "=", "None", ")", ":", "from", "scipy", ".", "interpolate", "import", "UnivariateSpline", "table", "=", "phon2dB", ".", "iso226", ".", "table", "schema", "=", "phon2dB", ".", "iso226", ".", "schema", "freqs", "=", "[", "row", "[", "schema", ".", "index", "(", "\"freq\"", ")", "]", "for", "row", "in", "table", "]", "if", "loudness", "is", "None", ":", "# Threshold levels", "spl", "=", "[", "row", "[", "schema", ".", "index", "(", "\"threshold\"", ")", "]", "for", "row", "in", "table", "]", "else", ":", "# Curve for a specific phon value", "def", "get_pressure_level", "(", "freq", ",", "alpha", ",", "loudness_base", ",", "threshold", ")", ":", "return", "10", "/", "alpha", "*", "math", ".", "log10", "(", "4.47e-3", "*", "(", "10", "**", "(", ".025", "*", "loudness", ")", "-", "1.14", ")", "+", "(", ".4", "*", "10", "**", "(", "(", "threshold", "+", "loudness_base", ")", "/", "10", "-", "9", ")", ")", "**", "alpha", ")", "-", "loudness_base", "+", "94", "spl", "=", "[", "get_pressure_level", "(", "*", "*", "dict", "(", "xzip", "(", "schema", ",", "args", ")", ")", ")", "for", "args", "in", "table", "]", "interpolator", "=", "UnivariateSpline", "(", "freqs", ",", "spl", ",", "s", "=", "0", ")", "interpolator_low", "=", "UnivariateSpline", "(", "[", "-", "30", "]", "+", "freqs", ",", "[", "1e3", "]", "+", "spl", ",", "s", "=", "0", ")", "interpolator_high", "=", "UnivariateSpline", "(", "freqs", "+", "[", "32000", "]", ",", "spl", "+", "[", "1e3", "]", ",", "s", "=", "0", ")", "@", "elementwise", "(", "\"freq\"", ",", "0", ")", "def", "freq2dB_spl", "(", "freq", ")", ":", "if", "freq", "<", "20", ":", "return", "interpolator_low", "(", "freq", ")", ".", "tolist", "(", ")", "if", "freq", ">", "12500", ":", "return", "interpolator_high", "(", "freq", ")", ".", "tolist", "(", ")", "return", "interpolator", "(", "freq", ")", ".", "tolist", "(", ")", "return", "freq2dB_spl" ]
Loudness in phons to Sound Pressure Level (SPL) in dB using the ISO/FDIS 226:2003 model. This function needs Scipy, as ``scipy.interpolate.UnivariateSpline`` objects are used as interpolators. Parameters ---------- loudness : The loudness value in phons to be converted, or None (default) to get the threshold of hearing. Returns ------- A callable that returns the SPL dB value for each given frequency in hertz. Note ---- See ``phon2dB.iso226.schema`` and ``phon2dB.iso226.table`` to know the original frequency used for the result. The result for any other value is an interpolation (spline). Don't trust on values nor lower nor higher than the frequency limits there (20Hz and 12.5kHz) as they're not part of ISO226 and no value was collected to estimate them (they're just a spline interpolation to reach 1000dB at -30Hz and 32kHz). Likewise, the trustful loudness input range is from 20 to 90 phon, as written on ISO226, although other values aren't found by a spline interpolation but by using the formula on section 4.1 of ISO226. Hint ---- The ``phon2dB.iso226.table`` also have other useful information, such as the threshold values in SPL dB.
[ "Loudness", "in", "phons", "to", "Sound", "Pressure", "Level", "(", "SPL", ")", "in", "dB", "using", "the", "ISO", "/", "FDIS", "226", ":", "2003", "model", "." ]
python
train
diffeo/rejester
rejester/_registry.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_registry.py#L953-L979
def delete(self, dict_name): '''Delete an entire dictionary. This operation on its own is atomic and does not require a session lock, but a session lock is honored. :param str dict_name: name of the dictionary to delete :raises rejester.exceptions.LockError: if called with a session lock, but the system does not currently have that lock; or if called without a session lock but something else holds it ''' conn = redis.Redis(connection_pool=self.pool) script = conn.register_script(''' if redis.call("get", KEYS[1]) == ARGV[1] then redis.call("del", KEYS[2], KEYS[3]) return 0 else return -1 end ''') res = script(keys=[self._lock_name, self._namespace(dict_name), self._namespace(dict_name) + 'keys'], args=[self._session_lock_identifier]) if res == -1: raise LockError()
[ "def", "delete", "(", "self", ",", "dict_name", ")", ":", "conn", "=", "redis", ".", "Redis", "(", "connection_pool", "=", "self", ".", "pool", ")", "script", "=", "conn", ".", "register_script", "(", "'''\n if redis.call(\"get\", KEYS[1]) == ARGV[1]\n then\n redis.call(\"del\", KEYS[2], KEYS[3])\n return 0\n else\n return -1\n end\n '''", ")", "res", "=", "script", "(", "keys", "=", "[", "self", ".", "_lock_name", ",", "self", ".", "_namespace", "(", "dict_name", ")", ",", "self", ".", "_namespace", "(", "dict_name", ")", "+", "'keys'", "]", ",", "args", "=", "[", "self", ".", "_session_lock_identifier", "]", ")", "if", "res", "==", "-", "1", ":", "raise", "LockError", "(", ")" ]
Delete an entire dictionary. This operation on its own is atomic and does not require a session lock, but a session lock is honored. :param str dict_name: name of the dictionary to delete :raises rejester.exceptions.LockError: if called with a session lock, but the system does not currently have that lock; or if called without a session lock but something else holds it
[ "Delete", "an", "entire", "dictionary", "." ]
python
train
fermiPy/fermipy
fermipy/jobs/file_archive.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/file_archive.py#L632-L678
def register_file(self, filepath, creator, status=FileStatus.no_file, flags=FileFlags.no_flags): """Register a file in the archive. If the file already exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file flags : `FileFlags` Enumeration giving flags set on this file Returns `FileHandle` """ # check to see if the file already exists try: file_handle = self.get_handle(filepath) raise KeyError("File %s already exists in archive" % filepath) except KeyError: pass localpath = self._get_localpath(filepath) if status == FileStatus.exists: # Make sure the file really exists fullpath = self._get_fullpath(filepath) if not os.path.exists(fullpath): print("register_file called on called on mising file %s" % fullpath) status = FileStatus.missing timestamp = 0 else: timestamp = int(os.stat(fullpath).st_mtime) else: timestamp = 0 key = len(self._table) + 1 file_handle = FileHandle(path=localpath, key=key, creator=creator, timestamp=timestamp, status=status, flags=flags) file_handle.append_to_table(self._table) self._cache[localpath] = file_handle return file_handle
[ "def", "register_file", "(", "self", ",", "filepath", ",", "creator", ",", "status", "=", "FileStatus", ".", "no_file", ",", "flags", "=", "FileFlags", ".", "no_flags", ")", ":", "# check to see if the file already exists", "try", ":", "file_handle", "=", "self", ".", "get_handle", "(", "filepath", ")", "raise", "KeyError", "(", "\"File %s already exists in archive\"", "%", "filepath", ")", "except", "KeyError", ":", "pass", "localpath", "=", "self", ".", "_get_localpath", "(", "filepath", ")", "if", "status", "==", "FileStatus", ".", "exists", ":", "# Make sure the file really exists", "fullpath", "=", "self", ".", "_get_fullpath", "(", "filepath", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "fullpath", ")", ":", "print", "(", "\"register_file called on called on mising file %s\"", "%", "fullpath", ")", "status", "=", "FileStatus", ".", "missing", "timestamp", "=", "0", "else", ":", "timestamp", "=", "int", "(", "os", ".", "stat", "(", "fullpath", ")", ".", "st_mtime", ")", "else", ":", "timestamp", "=", "0", "key", "=", "len", "(", "self", ".", "_table", ")", "+", "1", "file_handle", "=", "FileHandle", "(", "path", "=", "localpath", ",", "key", "=", "key", ",", "creator", "=", "creator", ",", "timestamp", "=", "timestamp", ",", "status", "=", "status", ",", "flags", "=", "flags", ")", "file_handle", ".", "append_to_table", "(", "self", ".", "_table", ")", "self", ".", "_cache", "[", "localpath", "]", "=", "file_handle", "return", "file_handle" ]
Register a file in the archive. If the file already exists, this raises a `KeyError` Parameters ---------- filepath : str The path to the file creatror : int A unique key for the job that created this file status : `FileStatus` Enumeration giving current status of file flags : `FileFlags` Enumeration giving flags set on this file Returns `FileHandle`
[ "Register", "a", "file", "in", "the", "archive", "." ]
python
train
HDI-Project/ballet
ballet/util/fs.py
https://github.com/HDI-Project/ballet/blob/6f4d4b87b8234cb6bb38b9e9484a58ef8fe8fdb2/ballet/util/fs.py#L51-L62
def splitext2(filepath): """Split filepath into root, filename, ext Args: filepath (str, path): file path Returns: str """ root, filename = os.path.split(safepath(filepath)) filename, ext = os.path.splitext(safepath(filename)) return root, filename, ext
[ "def", "splitext2", "(", "filepath", ")", ":", "root", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "safepath", "(", "filepath", ")", ")", "filename", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "safepath", "(", "filename", ")", ")", "return", "root", ",", "filename", ",", "ext" ]
Split filepath into root, filename, ext Args: filepath (str, path): file path Returns: str
[ "Split", "filepath", "into", "root", "filename", "ext" ]
python
train
saltstack/salt
salt/modules/snapper.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/snapper.py#L607-L643
def status(config='root', num_pre=None, num_post=None): ''' Returns a comparison between two snapshots config Configuration name. num_pre first snapshot ID to compare. Default is last snapshot num_post last snapshot ID to compare. Default is 0 (current state) CLI example: .. code-block:: bash salt '*' snapper.status salt '*' snapper.status num_pre=19 num_post=20 ''' try: pre, post = _get_num_interval(config, num_pre, num_post) snapper.CreateComparison(config, int(pre), int(post)) files = snapper.GetFiles(config, int(pre), int(post)) status_ret = {} SUBVOLUME = list_configs()[config]['SUBVOLUME'] for file in files: # In case of SUBVOLUME is included in filepath we remove it # to prevent from filepath starting with double '/' _filepath = file[0][len(SUBVOLUME):] if file[0].startswith(SUBVOLUME) else file[0] status_ret[os.path.normpath(SUBVOLUME + _filepath)] = {'status': status_to_string(file[1])} return status_ret except dbus.DBusException as exc: raise CommandExecutionError( 'Error encountered while listing changed files: {0}' .format(_dbus_exception_to_reason(exc, locals())) )
[ "def", "status", "(", "config", "=", "'root'", ",", "num_pre", "=", "None", ",", "num_post", "=", "None", ")", ":", "try", ":", "pre", ",", "post", "=", "_get_num_interval", "(", "config", ",", "num_pre", ",", "num_post", ")", "snapper", ".", "CreateComparison", "(", "config", ",", "int", "(", "pre", ")", ",", "int", "(", "post", ")", ")", "files", "=", "snapper", ".", "GetFiles", "(", "config", ",", "int", "(", "pre", ")", ",", "int", "(", "post", ")", ")", "status_ret", "=", "{", "}", "SUBVOLUME", "=", "list_configs", "(", ")", "[", "config", "]", "[", "'SUBVOLUME'", "]", "for", "file", "in", "files", ":", "# In case of SUBVOLUME is included in filepath we remove it", "# to prevent from filepath starting with double '/'", "_filepath", "=", "file", "[", "0", "]", "[", "len", "(", "SUBVOLUME", ")", ":", "]", "if", "file", "[", "0", "]", ".", "startswith", "(", "SUBVOLUME", ")", "else", "file", "[", "0", "]", "status_ret", "[", "os", ".", "path", ".", "normpath", "(", "SUBVOLUME", "+", "_filepath", ")", "]", "=", "{", "'status'", ":", "status_to_string", "(", "file", "[", "1", "]", ")", "}", "return", "status_ret", "except", "dbus", ".", "DBusException", "as", "exc", ":", "raise", "CommandExecutionError", "(", "'Error encountered while listing changed files: {0}'", ".", "format", "(", "_dbus_exception_to_reason", "(", "exc", ",", "locals", "(", ")", ")", ")", ")" ]
Returns a comparison between two snapshots config Configuration name. num_pre first snapshot ID to compare. Default is last snapshot num_post last snapshot ID to compare. Default is 0 (current state) CLI example: .. code-block:: bash salt '*' snapper.status salt '*' snapper.status num_pre=19 num_post=20
[ "Returns", "a", "comparison", "between", "two", "snapshots" ]
python
train
skorch-dev/skorch
skorch/cli.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/cli.py#L138-L157
def parse_net_kwargs(kwargs): """Parse arguments for the estimator. Resolves dotted names and instantiated classes. Examples -------- >>> kwargs = {'lr': 0.1, 'module__nonlin': 'torch.nn.Hardtanh(-2, max_val=3)'} >>> parse_net_kwargs(kwargs) {'lr': 0.1, 'module__nonlin': Hardtanh(min_val=-2, max_val=3)} """ if not kwargs: return kwargs resolved = {} for k, v in kwargs.items(): resolved[k] = _resolve_dotted_name(v) return resolved
[ "def", "parse_net_kwargs", "(", "kwargs", ")", ":", "if", "not", "kwargs", ":", "return", "kwargs", "resolved", "=", "{", "}", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", ":", "resolved", "[", "k", "]", "=", "_resolve_dotted_name", "(", "v", ")", "return", "resolved" ]
Parse arguments for the estimator. Resolves dotted names and instantiated classes. Examples -------- >>> kwargs = {'lr': 0.1, 'module__nonlin': 'torch.nn.Hardtanh(-2, max_val=3)'} >>> parse_net_kwargs(kwargs) {'lr': 0.1, 'module__nonlin': Hardtanh(min_val=-2, max_val=3)}
[ "Parse", "arguments", "for", "the", "estimator", "." ]
python
train
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/engine.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/engine.py#L76-L91
def add_dependency (self, targets, sources): """Adds a dependency from 'targets' to 'sources' Both 'targets' and 'sources' can be either list of target names, or a single target name. """ if isinstance (targets, str): targets = [targets] if isinstance (sources, str): sources = [sources] assert is_iterable(targets) assert is_iterable(sources) for target in targets: for source in sources: self.do_add_dependency (target, source)
[ "def", "add_dependency", "(", "self", ",", "targets", ",", "sources", ")", ":", "if", "isinstance", "(", "targets", ",", "str", ")", ":", "targets", "=", "[", "targets", "]", "if", "isinstance", "(", "sources", ",", "str", ")", ":", "sources", "=", "[", "sources", "]", "assert", "is_iterable", "(", "targets", ")", "assert", "is_iterable", "(", "sources", ")", "for", "target", "in", "targets", ":", "for", "source", "in", "sources", ":", "self", ".", "do_add_dependency", "(", "target", ",", "source", ")" ]
Adds a dependency from 'targets' to 'sources' Both 'targets' and 'sources' can be either list of target names, or a single target name.
[ "Adds", "a", "dependency", "from", "targets", "to", "sources" ]
python
train
open511/open511
open511/converter/tmdd.py
https://github.com/open511/open511/blob/3d573f59d7efa06ff1b5419ea5ff4d90a90b3cf8/open511/converter/tmdd.py#L64-L75
def _generate_automatic_headline(c): """The only field that maps closely to Open511 <headline>, a required field, is optional in TMDD. So we sometimes need to generate our own.""" # Start with the event type, e.g. "Incident" headline = c.data['event_type'].replace('_', ' ').title() if c.data['roads']: # Add the road name headline += ' on ' + c.data['roads'][0]['name'] direction = c.data['roads'][0].get('direction') if direction and direction not in ('BOTH', 'NONE'): headline += ' ' + direction return headline
[ "def", "_generate_automatic_headline", "(", "c", ")", ":", "# Start with the event type, e.g. \"Incident\"", "headline", "=", "c", ".", "data", "[", "'event_type'", "]", ".", "replace", "(", "'_'", ",", "' '", ")", ".", "title", "(", ")", "if", "c", ".", "data", "[", "'roads'", "]", ":", "# Add the road name", "headline", "+=", "' on '", "+", "c", ".", "data", "[", "'roads'", "]", "[", "0", "]", "[", "'name'", "]", "direction", "=", "c", ".", "data", "[", "'roads'", "]", "[", "0", "]", ".", "get", "(", "'direction'", ")", "if", "direction", "and", "direction", "not", "in", "(", "'BOTH'", ",", "'NONE'", ")", ":", "headline", "+=", "' '", "+", "direction", "return", "headline" ]
The only field that maps closely to Open511 <headline>, a required field, is optional in TMDD. So we sometimes need to generate our own.
[ "The", "only", "field", "that", "maps", "closely", "to", "Open511", "<headline", ">", "a", "required", "field", "is", "optional", "in", "TMDD", ".", "So", "we", "sometimes", "need", "to", "generate", "our", "own", "." ]
python
valid
lemieuxl/pyGenClean
pyGenClean/run_data_clean_up.py
https://github.com/lemieuxl/pyGenClean/blob/6173a48ccc0cf3a3bd711b1f2a1fa16248b8bf55/pyGenClean/run_data_clean_up.py#L3162-L3277
def check_input_files(prefix, the_type, required_type): """Check that the file is of a certain file type. :param prefix: the prefix of the input files. :param the_type: the type of the input files (bfile, tfile or file). :param required_type: the required type of the input files (bfile, tfile or file). :type prefix: str :type the_type: str :type required_type: str :returns: ``True`` if everything is OK. Checks if the files are of the required type, according to their current type. The available types are ``bfile`` (binary), ``tfile`` (transposed) and ``file`` (normal). """ # The files required for each type bfile_type = {".bed", ".bim", ".fam"} tfile_type = {".tped", ".tfam"} file_type = {".ped", ".map"} # Check if of bfile, tfile and file plink_command = ["plink", "--noweb", "--out", prefix] if required_type == "bfile": # We need bfile plink_command += ["--make-bed"] if the_type == "bfile": return True elif the_type == "tfile": # We have tfile, we need to create bfile from tfile plink_command += ["--tfile", prefix] elif the_type == "file": # We have file, we need to create bfile from file plink_command += ["--file", prefix] else: msg = "{}: no suitable input format...".format(prefix) raise ProgramError(msg) # We create the required files if os.path.isfile(prefix + ".log"): # There is a log file... we need to copy it shutil.copyfile(prefix + ".log", prefix + ".olog") logger.info("Converting {} from {} to {}".format( prefix, the_type, required_type, )) run_command(plink_command) # Everything is now fine return True elif required_type == "tfile": # We need a tfile plink_command += ["--recode", "--transpose", "--tab"] if the_type == "tfile": return True elif the_type == "bfile": # We have bfile, we need to create tfile from bfile plink_command += ["--bfile", prefix] elif the_type == "file": # We have file, we need to create tfile from file plink_command += ["--file", prefix] else: msg = "{}: no suitable input format...".format(prefix) raise ProgramError(msg) # We create the required files if os.path.isfile(prefix + ".log"): # There is a log file... we need to copy it shutil.copyfile(prefix + ".log", prefix + ".olog") logger.info("Converting {} from {} to {}".format( prefix, the_type, required_type, )) run_command(plink_command) # Everything is now fine return True elif required_type == "file": # We need a file plink_command += ["--recode", "--tab"] if the_type == "file": return True elif the_type == "bfile": # We have bfile, we need to create file from bfile plink_command += ["--bfile", prefix] elif the_type == "tfile": # We have tfile, we need to create file from tfile plink_command += ["--tfile", prefix] else: msg = "{}: no suitable input format...".format(prefix) raise ProgramError(msg) # We create the required files if os.path.isfile(prefix + ".log"): # There is a log file... we need to copy it shutil.copyfile(prefix + ".log", prefix + ".olog") logger.info("Converting {} from {} to {}".format( prefix, the_type, required_type, )) run_command(plink_command) # Everything is now fine return True else: msg = "{}: unknown file format".format(required_type) raise ProgramError(msg)
[ "def", "check_input_files", "(", "prefix", ",", "the_type", ",", "required_type", ")", ":", "# The files required for each type", "bfile_type", "=", "{", "\".bed\"", ",", "\".bim\"", ",", "\".fam\"", "}", "tfile_type", "=", "{", "\".tped\"", ",", "\".tfam\"", "}", "file_type", "=", "{", "\".ped\"", ",", "\".map\"", "}", "# Check if of bfile, tfile and file", "plink_command", "=", "[", "\"plink\"", ",", "\"--noweb\"", ",", "\"--out\"", ",", "prefix", "]", "if", "required_type", "==", "\"bfile\"", ":", "# We need bfile", "plink_command", "+=", "[", "\"--make-bed\"", "]", "if", "the_type", "==", "\"bfile\"", ":", "return", "True", "elif", "the_type", "==", "\"tfile\"", ":", "# We have tfile, we need to create bfile from tfile", "plink_command", "+=", "[", "\"--tfile\"", ",", "prefix", "]", "elif", "the_type", "==", "\"file\"", ":", "# We have file, we need to create bfile from file", "plink_command", "+=", "[", "\"--file\"", ",", "prefix", "]", "else", ":", "msg", "=", "\"{}: no suitable input format...\"", ".", "format", "(", "prefix", ")", "raise", "ProgramError", "(", "msg", ")", "# We create the required files", "if", "os", ".", "path", ".", "isfile", "(", "prefix", "+", "\".log\"", ")", ":", "# There is a log file... we need to copy it", "shutil", ".", "copyfile", "(", "prefix", "+", "\".log\"", ",", "prefix", "+", "\".olog\"", ")", "logger", ".", "info", "(", "\"Converting {} from {} to {}\"", ".", "format", "(", "prefix", ",", "the_type", ",", "required_type", ",", ")", ")", "run_command", "(", "plink_command", ")", "# Everything is now fine", "return", "True", "elif", "required_type", "==", "\"tfile\"", ":", "# We need a tfile", "plink_command", "+=", "[", "\"--recode\"", ",", "\"--transpose\"", ",", "\"--tab\"", "]", "if", "the_type", "==", "\"tfile\"", ":", "return", "True", "elif", "the_type", "==", "\"bfile\"", ":", "# We have bfile, we need to create tfile from bfile", "plink_command", "+=", "[", "\"--bfile\"", ",", "prefix", "]", "elif", "the_type", "==", "\"file\"", ":", "# We have file, we need to create tfile from file", "plink_command", "+=", "[", "\"--file\"", ",", "prefix", "]", "else", ":", "msg", "=", "\"{}: no suitable input format...\"", ".", "format", "(", "prefix", ")", "raise", "ProgramError", "(", "msg", ")", "# We create the required files", "if", "os", ".", "path", ".", "isfile", "(", "prefix", "+", "\".log\"", ")", ":", "# There is a log file... we need to copy it", "shutil", ".", "copyfile", "(", "prefix", "+", "\".log\"", ",", "prefix", "+", "\".olog\"", ")", "logger", ".", "info", "(", "\"Converting {} from {} to {}\"", ".", "format", "(", "prefix", ",", "the_type", ",", "required_type", ",", ")", ")", "run_command", "(", "plink_command", ")", "# Everything is now fine", "return", "True", "elif", "required_type", "==", "\"file\"", ":", "# We need a file", "plink_command", "+=", "[", "\"--recode\"", ",", "\"--tab\"", "]", "if", "the_type", "==", "\"file\"", ":", "return", "True", "elif", "the_type", "==", "\"bfile\"", ":", "# We have bfile, we need to create file from bfile", "plink_command", "+=", "[", "\"--bfile\"", ",", "prefix", "]", "elif", "the_type", "==", "\"tfile\"", ":", "# We have tfile, we need to create file from tfile", "plink_command", "+=", "[", "\"--tfile\"", ",", "prefix", "]", "else", ":", "msg", "=", "\"{}: no suitable input format...\"", ".", "format", "(", "prefix", ")", "raise", "ProgramError", "(", "msg", ")", "# We create the required files", "if", "os", ".", "path", ".", "isfile", "(", "prefix", "+", "\".log\"", ")", ":", "# There is a log file... we need to copy it", "shutil", ".", "copyfile", "(", "prefix", "+", "\".log\"", ",", "prefix", "+", "\".olog\"", ")", "logger", ".", "info", "(", "\"Converting {} from {} to {}\"", ".", "format", "(", "prefix", ",", "the_type", ",", "required_type", ",", ")", ")", "run_command", "(", "plink_command", ")", "# Everything is now fine", "return", "True", "else", ":", "msg", "=", "\"{}: unknown file format\"", ".", "format", "(", "required_type", ")", "raise", "ProgramError", "(", "msg", ")" ]
Check that the file is of a certain file type. :param prefix: the prefix of the input files. :param the_type: the type of the input files (bfile, tfile or file). :param required_type: the required type of the input files (bfile, tfile or file). :type prefix: str :type the_type: str :type required_type: str :returns: ``True`` if everything is OK. Checks if the files are of the required type, according to their current type. The available types are ``bfile`` (binary), ``tfile`` (transposed) and ``file`` (normal).
[ "Check", "that", "the", "file", "is", "of", "a", "certain", "file", "type", "." ]
python
train
elifesciences/elife-tools
elifetools/utils.py
https://github.com/elifesciences/elife-tools/blob/4b9e38cbe485c61a4ed7cbd8970c6b318334fd86/elifetools/utils.py#L78-L85
def coerce_to_int(val, default=0xDEADBEEF): """Attempts to cast given value to an integer, return the original value if failed or the default if one provided.""" try: return int(val) except (TypeError, ValueError): if default != 0xDEADBEEF: return default return val
[ "def", "coerce_to_int", "(", "val", ",", "default", "=", "0xDEADBEEF", ")", ":", "try", ":", "return", "int", "(", "val", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "if", "default", "!=", "0xDEADBEEF", ":", "return", "default", "return", "val" ]
Attempts to cast given value to an integer, return the original value if failed or the default if one provided.
[ "Attempts", "to", "cast", "given", "value", "to", "an", "integer", "return", "the", "original", "value", "if", "failed", "or", "the", "default", "if", "one", "provided", "." ]
python
train
lrq3000/pyFileFixity
pyFileFixity/lib/reedsolomon/reedsolo.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/reedsolomon/reedsolo.py#L126-L178
def find_prime_polys(generator=2, c_exp=8, fast_primes=False, single=False): '''Compute the list of prime polynomials for the given generator and galois field characteristic exponent.''' # fast_primes will output less results but will be significantly faster. # single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that. # A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows. # Why do we need a "prime polynomial"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique. # For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial. # Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our "LUT" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial. # Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow). # Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf # Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. "Finding irreducible polynomials over finite fields." Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986. # Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field root_charac = 2 # we're in GF(2) field_charac = int(root_charac**c_exp - 1) field_charac_next = int(root_charac**(c_exp+1) - 1) prim_candidates = [] if fast_primes: prim_candidates = rwh_primes1(field_charac_next) # generate maybe prime polynomials and check later if they really are irreducible prim_candidates = [x for x in prim_candidates if x > field_charac] # filter out too small primes else: prim_candidates = xrange(field_charac+2, field_charac_next, root_charac) # try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible) # Start of the main loop correct_primes = [] for prim in prim_candidates: # try potential candidates primitive irreducible polys seen = bytearray(field_charac+1) # memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default) conflict = False # flag to know if there was at least one conflict # Second loop, build the whole Galois Field x = 1 for i in xrange(field_charac): # Compute the next value in the field (ie, the next power of alpha/generator) x = gf_mult_noLUT(x, generator, prim, field_charac+1) # Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime) if x > field_charac or seen[x] == 1: conflict = True break # Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha else: seen[x] = 1 # End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial! if not conflict: correct_primes.append(prim) if single: return prim # Return the list of all prime polynomials return correct_primes
[ "def", "find_prime_polys", "(", "generator", "=", "2", ",", "c_exp", "=", "8", ",", "fast_primes", "=", "False", ",", "single", "=", "False", ")", ":", "# fast_primes will output less results but will be significantly faster.", "# single will output the first prime polynomial found, so if all you want is to just find one prime polynomial to generate the LUT for Reed-Solomon to work, then just use that.", "# A prime polynomial (necessarily irreducible) is necessary to reduce the multiplications in the Galois Field, so as to avoid overflows.", "# Why do we need a \"prime polynomial\"? Can't we just reduce modulo 255 (for GF(2^8) for example)? Because we need the values to be unique.", "# For example: if the generator (alpha) = 2 and c_exp = 8 (GF(2^8) == GF(256)), then the generated Galois Field (0, 1, α, α^1, α^2, ..., α^(p-1)) will be galois field it becomes 0, 1, 2, 4, 8, 16, etc. However, upon reaching 128, the next value will be doubled (ie, next power of 2), which will give 256. Then we must reduce, because we have overflowed above the maximum value of 255. But, if we modulo 255, this will generate 256 == 1. Then 2, 4, 8, 16, etc. giving us a repeating pattern of numbers. This is very bad, as it's then not anymore a bijection (ie, a non-zero value doesn't have a unique index). That's why we can't just modulo 255, but we need another number above 255, which is called the prime polynomial.", "# Why so much hassle? Because we are using precomputed look-up tables for multiplication: instead of multiplying a*b, we precompute alpha^a, alpha^b and alpha^(a+b), so that we can just use our lookup table at alpha^(a+b) and get our result. But just like in our original field we had 0,1,2,...,p-1 distinct unique values, in our \"LUT\" field using alpha we must have unique distinct values (we don't care that they are different from the original field as long as they are unique and distinct). That's why we need to avoid duplicated values, and to avoid duplicated values we need to use a prime irreducible polynomial.", "# Here is implemented a bruteforce approach to find all these prime polynomials, by generating every possible prime polynomials (ie, every integers between field_charac+1 and field_charac*2), and then we build the whole Galois Field, and we reject the candidate prime polynomial if it duplicates even one value or if it generates a value above field_charac (ie, cause an overflow).", "# Note that this algorithm is slow if the field is too big (above 12), because it's an exhaustive search algorithm. There are probabilistic approaches, and almost surely prime approaches, but there is no determistic polynomial time algorithm to find irreducible monic polynomials. More info can be found at: http://people.mpi-inf.mpg.de/~csaha/lectures/lec9.pdf", "# Another faster algorithm may be found at Adleman, Leonard M., and Hendrik W. Lenstra. \"Finding irreducible polynomials over finite fields.\" Proceedings of the eighteenth annual ACM symposium on Theory of computing. ACM, 1986.", "# Prepare the finite field characteristic (2^p - 1), this also represent the maximum possible value in this field", "root_charac", "=", "2", "# we're in GF(2)", "field_charac", "=", "int", "(", "root_charac", "**", "c_exp", "-", "1", ")", "field_charac_next", "=", "int", "(", "root_charac", "**", "(", "c_exp", "+", "1", ")", "-", "1", ")", "prim_candidates", "=", "[", "]", "if", "fast_primes", ":", "prim_candidates", "=", "rwh_primes1", "(", "field_charac_next", ")", "# generate maybe prime polynomials and check later if they really are irreducible", "prim_candidates", "=", "[", "x", "for", "x", "in", "prim_candidates", "if", "x", ">", "field_charac", "]", "# filter out too small primes", "else", ":", "prim_candidates", "=", "xrange", "(", "field_charac", "+", "2", ",", "field_charac_next", ",", "root_charac", ")", "# try each possible prime polynomial, but skip even numbers (because divisible by 2 so necessarily not irreducible)", "# Start of the main loop", "correct_primes", "=", "[", "]", "for", "prim", "in", "prim_candidates", ":", "# try potential candidates primitive irreducible polys", "seen", "=", "bytearray", "(", "field_charac", "+", "1", ")", "# memory variable to indicate if a value was already generated in the field (value at index x is set to 1) or not (set to 0 by default)", "conflict", "=", "False", "# flag to know if there was at least one conflict", "# Second loop, build the whole Galois Field", "x", "=", "1", "for", "i", "in", "xrange", "(", "field_charac", ")", ":", "# Compute the next value in the field (ie, the next power of alpha/generator)", "x", "=", "gf_mult_noLUT", "(", "x", ",", "generator", ",", "prim", ",", "field_charac", "+", "1", ")", "# Rejection criterion: if the value overflowed (above field_charac) or is a duplicate of a previously generated power of alpha, then we reject this polynomial (not prime)", "if", "x", ">", "field_charac", "or", "seen", "[", "x", "]", "==", "1", ":", "conflict", "=", "True", "break", "# Else we flag this value as seen (to maybe detect future duplicates), and we continue onto the next power of alpha", "else", ":", "seen", "[", "x", "]", "=", "1", "# End of the second loop: if there's no conflict (no overflow nor duplicated value), this is a prime polynomial!", "if", "not", "conflict", ":", "correct_primes", ".", "append", "(", "prim", ")", "if", "single", ":", "return", "prim", "# Return the list of all prime polynomials", "return", "correct_primes" ]
Compute the list of prime polynomials for the given generator and galois field characteristic exponent.
[ "Compute", "the", "list", "of", "prime", "polynomials", "for", "the", "given", "generator", "and", "galois", "field", "characteristic", "exponent", "." ]
python
train
nion-software/nionswift
nion/typeshed/API_1_0.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/typeshed/API_1_0.py#L1110-L1119
def create_data_and_metadata_from_data(self, data: numpy.ndarray, intensity_calibration: Calibration.Calibration=None, dimensional_calibrations: typing.List[Calibration.Calibration]=None, metadata: dict=None, timestamp: str=None) -> DataAndMetadata.DataAndMetadata: """Create a data_and_metadata object from data. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead. Scriptable: No """ ...
[ "def", "create_data_and_metadata_from_data", "(", "self", ",", "data", ":", "numpy", ".", "ndarray", ",", "intensity_calibration", ":", "Calibration", ".", "Calibration", "=", "None", ",", "dimensional_calibrations", ":", "typing", ".", "List", "[", "Calibration", ".", "Calibration", "]", "=", "None", ",", "metadata", ":", "dict", "=", "None", ",", "timestamp", ":", "str", "=", "None", ")", "->", "DataAndMetadata", ".", "DataAndMetadata", ":", "..." ]
Create a data_and_metadata object from data. .. versionadded:: 1.0 .. deprecated:: 1.1 Use :py:meth:`~nion.swift.Facade.DataItem.create_data_and_metadata` instead. Scriptable: No
[ "Create", "a", "data_and_metadata", "object", "from", "data", "." ]
python
train
Esri/ArcREST
src/arcrest/agol/services.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/agol/services.py#L796-L825
def synchronizeReplica(self, replicaID, transportType="esriTransportTypeUrl", replicaServerGen=None, returnIdsForAdds=False, edits=None, returnAttachmentDatabyURL=False, async=False, syncDirection="snapshot", syncLayers="perReplica", editsUploadID=None, editsUploadFormat=None, dataFormat="json", rollbackOnFailure=True): """ TODO: implement synchronize replica http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000000vv000000 """ params = { "f" : "json", "replicaID" : replicaID, "transportType" : transportType, "dataFormat" : dataFormat, "rollbackOnFailure" : rollbackOnFailure, "async" : async, "returnIdsForAdds": returnIdsForAdds, "syncDirection" : syncDirection, "returnAttachmentDatabyURL" : returnAttachmentDatabyURL } return
[ "def", "synchronizeReplica", "(", "self", ",", "replicaID", ",", "transportType", "=", "\"esriTransportTypeUrl\"", ",", "replicaServerGen", "=", "None", ",", "returnIdsForAdds", "=", "False", ",", "edits", "=", "None", ",", "returnAttachmentDatabyURL", "=", "False", ",", "async", "=", "False", ",", "syncDirection", "=", "\"snapshot\"", ",", "syncLayers", "=", "\"perReplica\"", ",", "editsUploadID", "=", "None", ",", "editsUploadFormat", "=", "None", ",", "dataFormat", "=", "\"json\"", ",", "rollbackOnFailure", "=", "True", ")", ":", "params", "=", "{", "\"f\"", ":", "\"json\"", ",", "\"replicaID\"", ":", "replicaID", ",", "\"transportType\"", ":", "transportType", ",", "\"dataFormat\"", ":", "dataFormat", ",", "\"rollbackOnFailure\"", ":", "rollbackOnFailure", ",", "\"async\"", ":", "async", ",", "\"returnIdsForAdds\"", ":", "returnIdsForAdds", ",", "\"syncDirection\"", ":", "syncDirection", ",", "\"returnAttachmentDatabyURL\"", ":", "returnAttachmentDatabyURL", "}", "return" ]
TODO: implement synchronize replica http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000000vv000000
[ "TODO", ":", "implement", "synchronize", "replica", "http", ":", "//", "resources", ".", "arcgis", ".", "com", "/", "en", "/", "help", "/", "arcgis", "-", "rest", "-", "api", "/", "index", ".", "html#", "//", "02r3000000vv000000" ]
python
train
OCA/openupgradelib
openupgradelib/openupgrade.py
https://github.com/OCA/openupgradelib/blob/b220b6498075d62c1b64073cc934513a465cfd85/openupgradelib/openupgrade.py#L1193-L1268
def map_values( cr, source_column, target_column, mapping, model=None, table=None, write='sql'): """ Map old values to new values within the same model or table. Old values presumably come from a legacy column. You will typically want to use it in post-migration scripts. :param cr: The database cursor :param source_column: the database column that contains old values to be \ mapped :param target_column: the database column, or model field (if 'write' is \ 'orm') that the new values are written to :param mapping: list of tuples [(old value, new value)] Old value True represents "is set", False "is not set". :param model: used for writing if 'write' is 'orm', or to retrieve the \ table if 'table' is not given. :param table: the database table used to query the old values, and write \ the new values (if 'write' is 'sql') :param write: Either 'orm' or 'sql'. Note that old ids are always \ identified by an sql read. This method does not support mapping m2m, o2m or property fields. \ For o2m you can migrate the inverse field's column instead. .. versionadded:: 8.0 """ if write not in ('sql', 'orm'): logger.exception( "map_values is called with unknown value for write param: %s", write) if not table: if not model: logger.exception("map_values is called with no table and no model") table = model._table if source_column == target_column: logger.exception( "map_values is called with the same value for source and old" " columns : %s", source_column) for old, new in mapping: new = "'%s'" % new if old is True: old = 'NOT NULL' op = 'IS' elif old is False: old = 'NULL' op = 'IS' else: old = "'%s'" % old op = '=' values = { 'table': table, 'source': source_column, 'target': target_column, 'old': old, 'new': new, 'op': op, } if write == 'sql': query = """UPDATE %(table)s SET %(target)s = %(new)s WHERE %(source)s %(op)s %(old)s""" % values else: query = """SELECT id FROM %(table)s WHERE %(source)s %(op)s %(old)s""" % values logged_query(cr, query, values) if write == 'orm': model.write( cr, SUPERUSER_ID, [row[0] for row in cr.fetchall()], {target_column: new})
[ "def", "map_values", "(", "cr", ",", "source_column", ",", "target_column", ",", "mapping", ",", "model", "=", "None", ",", "table", "=", "None", ",", "write", "=", "'sql'", ")", ":", "if", "write", "not", "in", "(", "'sql'", ",", "'orm'", ")", ":", "logger", ".", "exception", "(", "\"map_values is called with unknown value for write param: %s\"", ",", "write", ")", "if", "not", "table", ":", "if", "not", "model", ":", "logger", ".", "exception", "(", "\"map_values is called with no table and no model\"", ")", "table", "=", "model", ".", "_table", "if", "source_column", "==", "target_column", ":", "logger", ".", "exception", "(", "\"map_values is called with the same value for source and old\"", "\" columns : %s\"", ",", "source_column", ")", "for", "old", ",", "new", "in", "mapping", ":", "new", "=", "\"'%s'\"", "%", "new", "if", "old", "is", "True", ":", "old", "=", "'NOT NULL'", "op", "=", "'IS'", "elif", "old", "is", "False", ":", "old", "=", "'NULL'", "op", "=", "'IS'", "else", ":", "old", "=", "\"'%s'\"", "%", "old", "op", "=", "'='", "values", "=", "{", "'table'", ":", "table", ",", "'source'", ":", "source_column", ",", "'target'", ":", "target_column", ",", "'old'", ":", "old", ",", "'new'", ":", "new", ",", "'op'", ":", "op", ",", "}", "if", "write", "==", "'sql'", ":", "query", "=", "\"\"\"UPDATE %(table)s\n SET %(target)s = %(new)s\n WHERE %(source)s %(op)s %(old)s\"\"\"", "%", "values", "else", ":", "query", "=", "\"\"\"SELECT id FROM %(table)s\n WHERE %(source)s %(op)s %(old)s\"\"\"", "%", "values", "logged_query", "(", "cr", ",", "query", ",", "values", ")", "if", "write", "==", "'orm'", ":", "model", ".", "write", "(", "cr", ",", "SUPERUSER_ID", ",", "[", "row", "[", "0", "]", "for", "row", "in", "cr", ".", "fetchall", "(", ")", "]", ",", "{", "target_column", ":", "new", "}", ")" ]
Map old values to new values within the same model or table. Old values presumably come from a legacy column. You will typically want to use it in post-migration scripts. :param cr: The database cursor :param source_column: the database column that contains old values to be \ mapped :param target_column: the database column, or model field (if 'write' is \ 'orm') that the new values are written to :param mapping: list of tuples [(old value, new value)] Old value True represents "is set", False "is not set". :param model: used for writing if 'write' is 'orm', or to retrieve the \ table if 'table' is not given. :param table: the database table used to query the old values, and write \ the new values (if 'write' is 'sql') :param write: Either 'orm' or 'sql'. Note that old ids are always \ identified by an sql read. This method does not support mapping m2m, o2m or property fields. \ For o2m you can migrate the inverse field's column instead. .. versionadded:: 8.0
[ "Map", "old", "values", "to", "new", "values", "within", "the", "same", "model", "or", "table", ".", "Old", "values", "presumably", "come", "from", "a", "legacy", "column", ".", "You", "will", "typically", "want", "to", "use", "it", "in", "post", "-", "migration", "scripts", "." ]
python
train
bcbio/bcbio-nextgen
bcbio/upload/irods.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/upload/irods.py#L33-L47
def _upload_dir_icommands_cli(local_dir, irods_dir, config=None, metadata=None): """ Upload directory recursively via the standard icommands CLI. example: irsync -Kvar -R $resource $local_dir i:$irods_dir go to https://docs.irods.org/4.2.0/icommands/user/#irsync for more info """ args = ["-K","-v","-a","-r"] if config: if config.get("resource"): args += ["-R", config.get("resource")] _check_create_collection(irods_dir,isdir=True) cmd = ["irsync"] + args + [local_dir, "i:"+irods_dir] do.run(cmd, "Uploading to iRODS")
[ "def", "_upload_dir_icommands_cli", "(", "local_dir", ",", "irods_dir", ",", "config", "=", "None", ",", "metadata", "=", "None", ")", ":", "args", "=", "[", "\"-K\"", ",", "\"-v\"", ",", "\"-a\"", ",", "\"-r\"", "]", "if", "config", ":", "if", "config", ".", "get", "(", "\"resource\"", ")", ":", "args", "+=", "[", "\"-R\"", ",", "config", ".", "get", "(", "\"resource\"", ")", "]", "_check_create_collection", "(", "irods_dir", ",", "isdir", "=", "True", ")", "cmd", "=", "[", "\"irsync\"", "]", "+", "args", "+", "[", "local_dir", ",", "\"i:\"", "+", "irods_dir", "]", "do", ".", "run", "(", "cmd", ",", "\"Uploading to iRODS\"", ")" ]
Upload directory recursively via the standard icommands CLI. example: irsync -Kvar -R $resource $local_dir i:$irods_dir go to https://docs.irods.org/4.2.0/icommands/user/#irsync for more info
[ "Upload", "directory", "recursively", "via", "the", "standard", "icommands", "CLI", ".", "example", ":", "irsync", "-", "Kvar", "-", "R", "$resource", "$local_dir", "i", ":", "$irods_dir", "go", "to", "https", ":", "//", "docs", ".", "irods", ".", "org", "/", "4", ".", "2", ".", "0", "/", "icommands", "/", "user", "/", "#irsync", "for", "more", "info" ]
python
train
ska-sa/katcp-python
katcp/resource_client.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/resource_client.py#L1408-L1411
def until_not_synced(self, timeout=None): """Return a tornado Future; resolves when any subordinate client is not synced""" yield until_any(*[r.until_not_synced() for r in dict.values(self.children)], timeout=timeout)
[ "def", "until_not_synced", "(", "self", ",", "timeout", "=", "None", ")", ":", "yield", "until_any", "(", "*", "[", "r", ".", "until_not_synced", "(", ")", "for", "r", "in", "dict", ".", "values", "(", "self", ".", "children", ")", "]", ",", "timeout", "=", "timeout", ")" ]
Return a tornado Future; resolves when any subordinate client is not synced
[ "Return", "a", "tornado", "Future", ";", "resolves", "when", "any", "subordinate", "client", "is", "not", "synced" ]
python
train
marcelcaraciolo/foursquare
examples/django/example/djfoursquare/views.py
https://github.com/marcelcaraciolo/foursquare/blob/a8bda33cc2d61e25aa8df72011246269fd98aa13/examples/django/example/djfoursquare/views.py#L22-L30
def unauth(request): """ logout and remove all session data """ if check_key(request): api = get_api(request) request.session.clear() logout(request) return HttpResponseRedirect(reverse('main'))
[ "def", "unauth", "(", "request", ")", ":", "if", "check_key", "(", "request", ")", ":", "api", "=", "get_api", "(", "request", ")", "request", ".", "session", ".", "clear", "(", ")", "logout", "(", "request", ")", "return", "HttpResponseRedirect", "(", "reverse", "(", "'main'", ")", ")" ]
logout and remove all session data
[ "logout", "and", "remove", "all", "session", "data" ]
python
train
dwavesystems/dimod
dimod/reference/composites/higherordercomposites.py
https://github.com/dwavesystems/dimod/blob/beff1b7f86b559d923ac653c1de6d593876d6d38/dimod/reference/composites/higherordercomposites.py#L132-L161
def penalty_satisfaction(response, bqm): """ Creates a penalty satisfaction list Given a sampleSet and a bqm object, will create a binary list informing whether the penalties introduced during degree reduction are satisfied for each sample in sampleSet Args: response (:obj:`.SampleSet`): Samples corresponding to provided bqm bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains its reduction info. Returns: :obj:`numpy.ndarray`: a binary array of penalty satisfaction information """ record = response.record label_dict = response.variables.index if len(bqm.info['reduction']) == 0: return np.array([1] * len(record.sample)) penalty_vector = np.prod([record.sample[:, label_dict[qi]] * record.sample[:, label_dict[qj]] == record.sample[:, label_dict[valdict['product']]] for (qi, qj), valdict in bqm.info['reduction'].items()], axis=0) return penalty_vector
[ "def", "penalty_satisfaction", "(", "response", ",", "bqm", ")", ":", "record", "=", "response", ".", "record", "label_dict", "=", "response", ".", "variables", ".", "index", "if", "len", "(", "bqm", ".", "info", "[", "'reduction'", "]", ")", "==", "0", ":", "return", "np", ".", "array", "(", "[", "1", "]", "*", "len", "(", "record", ".", "sample", ")", ")", "penalty_vector", "=", "np", ".", "prod", "(", "[", "record", ".", "sample", "[", ":", ",", "label_dict", "[", "qi", "]", "]", "*", "record", ".", "sample", "[", ":", ",", "label_dict", "[", "qj", "]", "]", "==", "record", ".", "sample", "[", ":", ",", "label_dict", "[", "valdict", "[", "'product'", "]", "]", "]", "for", "(", "qi", ",", "qj", ")", ",", "valdict", "in", "bqm", ".", "info", "[", "'reduction'", "]", ".", "items", "(", ")", "]", ",", "axis", "=", "0", ")", "return", "penalty_vector" ]
Creates a penalty satisfaction list Given a sampleSet and a bqm object, will create a binary list informing whether the penalties introduced during degree reduction are satisfied for each sample in sampleSet Args: response (:obj:`.SampleSet`): Samples corresponding to provided bqm bqm (:obj:`.BinaryQuadraticModel`): a bqm object that contains its reduction info. Returns: :obj:`numpy.ndarray`: a binary array of penalty satisfaction information
[ "Creates", "a", "penalty", "satisfaction", "list" ]
python
train
pycontribs/pyrax
pyrax/clouddns.py
https://github.com/pycontribs/pyrax/blob/9ddfd5064b3a292d7337906f3b2d5dce95b50b99/pyrax/clouddns.py#L676-L698
def update_domain(self, domain, emailAddress=None, ttl=None, comment=None): """ Provides a way to modify the following attributes of a domain record: - email address - ttl setting - comment """ if not any((emailAddress, ttl, comment)): raise exc.MissingDNSSettings( "No settings provided to update_domain().") uri = "/domains/%s" % utils.get_id(domain) body = {"comment": comment, "ttl": ttl, "emailAddress": emailAddress, } none_keys = [key for key, val in body.items() if val is None] for none_key in none_keys: body.pop(none_key) resp, resp_body = self._async_call(uri, method="PUT", body=body, error_class=exc.DomainUpdateFailed, has_response=False) return resp_body
[ "def", "update_domain", "(", "self", ",", "domain", ",", "emailAddress", "=", "None", ",", "ttl", "=", "None", ",", "comment", "=", "None", ")", ":", "if", "not", "any", "(", "(", "emailAddress", ",", "ttl", ",", "comment", ")", ")", ":", "raise", "exc", ".", "MissingDNSSettings", "(", "\"No settings provided to update_domain().\"", ")", "uri", "=", "\"/domains/%s\"", "%", "utils", ".", "get_id", "(", "domain", ")", "body", "=", "{", "\"comment\"", ":", "comment", ",", "\"ttl\"", ":", "ttl", ",", "\"emailAddress\"", ":", "emailAddress", ",", "}", "none_keys", "=", "[", "key", "for", "key", ",", "val", "in", "body", ".", "items", "(", ")", "if", "val", "is", "None", "]", "for", "none_key", "in", "none_keys", ":", "body", ".", "pop", "(", "none_key", ")", "resp", ",", "resp_body", "=", "self", ".", "_async_call", "(", "uri", ",", "method", "=", "\"PUT\"", ",", "body", "=", "body", ",", "error_class", "=", "exc", ".", "DomainUpdateFailed", ",", "has_response", "=", "False", ")", "return", "resp_body" ]
Provides a way to modify the following attributes of a domain record: - email address - ttl setting - comment
[ "Provides", "a", "way", "to", "modify", "the", "following", "attributes", "of", "a", "domain", "record", ":", "-", "email", "address", "-", "ttl", "setting", "-", "comment" ]
python
train
fracpete/python-weka-wrapper3
python/weka/core/stemmers.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/stemmers.py#L43-L52
def stem(self, s): """ Performs stemming on the string. :param s: the string to stem :type s: str :return: the stemmed string :rtype: str """ return javabridge.get_env().get_string(self.__stem(javabridge.get_env().new_string_utf(s)))
[ "def", "stem", "(", "self", ",", "s", ")", ":", "return", "javabridge", ".", "get_env", "(", ")", ".", "get_string", "(", "self", ".", "__stem", "(", "javabridge", ".", "get_env", "(", ")", ".", "new_string_utf", "(", "s", ")", ")", ")" ]
Performs stemming on the string. :param s: the string to stem :type s: str :return: the stemmed string :rtype: str
[ "Performs", "stemming", "on", "the", "string", "." ]
python
train
yougov/mongo-connector
mongo_connector/namespace_config.py
https://github.com/yougov/mongo-connector/blob/557cafd4b54c848cd54ef28a258391a154650cb4/mongo_connector/namespace_config.py#L560-L567
def namespace_to_regex(namespace): """Create a RegexObject from a wildcard namespace.""" db_name, coll_name = namespace.split(".", 1) # A database name cannot contain a '.' character db_regex = re.escape(db_name).replace(r"\*", "([^.]*)") # But a collection name can. coll_regex = re.escape(coll_name).replace(r"\*", "(.*)") return re.compile(r"\A" + db_regex + r"\." + coll_regex + r"\Z")
[ "def", "namespace_to_regex", "(", "namespace", ")", ":", "db_name", ",", "coll_name", "=", "namespace", ".", "split", "(", "\".\"", ",", "1", ")", "# A database name cannot contain a '.' character", "db_regex", "=", "re", ".", "escape", "(", "db_name", ")", ".", "replace", "(", "r\"\\*\"", ",", "\"([^.]*)\"", ")", "# But a collection name can.", "coll_regex", "=", "re", ".", "escape", "(", "coll_name", ")", ".", "replace", "(", "r\"\\*\"", ",", "\"(.*)\"", ")", "return", "re", ".", "compile", "(", "r\"\\A\"", "+", "db_regex", "+", "r\"\\.\"", "+", "coll_regex", "+", "r\"\\Z\"", ")" ]
Create a RegexObject from a wildcard namespace.
[ "Create", "a", "RegexObject", "from", "a", "wildcard", "namespace", "." ]
python
train
pvlib/pvlib-python
pvlib/iotools/crn.py
https://github.com/pvlib/pvlib-python/blob/2e844a595b820b43d1170269781fa66bd0ccc8a3/pvlib/iotools/crn.py#L43-L106
def read_crn(filename): """ Read NOAA USCRN [1]_ [2]_ fixed-width file into pandas dataframe. Parameters ---------- filename: str filepath or url to read for the fixed-width file. Returns ------- data: Dataframe A dataframe with DatetimeIndex and all of the variables in the file. Notes ----- CRN files contain 5 minute averages labeled by the interval ending time. Here, missing data is flagged as NaN, rather than the lowest possible integer for a field (e.g. -999 or -99). Air temperature in deg C. Wind speed in m/s at a height of 1.5 m above ground level. Variables corresponding to standard pvlib variables are renamed, e.g. `SOLAR_RADIATION` becomes `ghi`. See the `pvlib.iotools.crn.VARIABLE_MAP` dict for the complete mapping. References ---------- .. [1] U.S. Climate Reference Network `https://www.ncdc.noaa.gov/crn/qcdatasets.html <https://www.ncdc.noaa.gov/crn/qcdatasets.html>`_ .. [2] Diamond, H. J. et. al., 2013: U.S. Climate Reference Network after one decade of operations: status and assessment. Bull. Amer. Meteor. Soc., 94, 489-498. :doi:`10.1175/BAMS-D-12-00170.1` """ # read in data data = pd.read_fwf(filename, header=None, names=HEADERS.split(' '), widths=WIDTHS) # loop here because dtype kwarg not supported in read_fwf until 0.20 for (col, _dtype) in zip(data.columns, DTYPES): data[col] = data[col].astype(_dtype) # set index # UTC_TIME does not have leading 0s, so must zfill(4) to comply # with %H%M format dts = data[['UTC_DATE', 'UTC_TIME']].astype(str) dtindex = pd.to_datetime(dts['UTC_DATE'] + dts['UTC_TIME'].str.zfill(4), format='%Y%m%d%H%M', utc=True) data = data.set_index(dtindex) try: # to_datetime(utc=True) does not work in older versions of pandas data = data.tz_localize('UTC') except TypeError: pass # set nans for val in [-99, -999, -9999]: data = data.where(data != val, np.nan) data = data.rename(columns=VARIABLE_MAP) return data
[ "def", "read_crn", "(", "filename", ")", ":", "# read in data", "data", "=", "pd", ".", "read_fwf", "(", "filename", ",", "header", "=", "None", ",", "names", "=", "HEADERS", ".", "split", "(", "' '", ")", ",", "widths", "=", "WIDTHS", ")", "# loop here because dtype kwarg not supported in read_fwf until 0.20", "for", "(", "col", ",", "_dtype", ")", "in", "zip", "(", "data", ".", "columns", ",", "DTYPES", ")", ":", "data", "[", "col", "]", "=", "data", "[", "col", "]", ".", "astype", "(", "_dtype", ")", "# set index", "# UTC_TIME does not have leading 0s, so must zfill(4) to comply", "# with %H%M format", "dts", "=", "data", "[", "[", "'UTC_DATE'", ",", "'UTC_TIME'", "]", "]", ".", "astype", "(", "str", ")", "dtindex", "=", "pd", ".", "to_datetime", "(", "dts", "[", "'UTC_DATE'", "]", "+", "dts", "[", "'UTC_TIME'", "]", ".", "str", ".", "zfill", "(", "4", ")", ",", "format", "=", "'%Y%m%d%H%M'", ",", "utc", "=", "True", ")", "data", "=", "data", ".", "set_index", "(", "dtindex", ")", "try", ":", "# to_datetime(utc=True) does not work in older versions of pandas", "data", "=", "data", ".", "tz_localize", "(", "'UTC'", ")", "except", "TypeError", ":", "pass", "# set nans", "for", "val", "in", "[", "-", "99", ",", "-", "999", ",", "-", "9999", "]", ":", "data", "=", "data", ".", "where", "(", "data", "!=", "val", ",", "np", ".", "nan", ")", "data", "=", "data", ".", "rename", "(", "columns", "=", "VARIABLE_MAP", ")", "return", "data" ]
Read NOAA USCRN [1]_ [2]_ fixed-width file into pandas dataframe. Parameters ---------- filename: str filepath or url to read for the fixed-width file. Returns ------- data: Dataframe A dataframe with DatetimeIndex and all of the variables in the file. Notes ----- CRN files contain 5 minute averages labeled by the interval ending time. Here, missing data is flagged as NaN, rather than the lowest possible integer for a field (e.g. -999 or -99). Air temperature in deg C. Wind speed in m/s at a height of 1.5 m above ground level. Variables corresponding to standard pvlib variables are renamed, e.g. `SOLAR_RADIATION` becomes `ghi`. See the `pvlib.iotools.crn.VARIABLE_MAP` dict for the complete mapping. References ---------- .. [1] U.S. Climate Reference Network `https://www.ncdc.noaa.gov/crn/qcdatasets.html <https://www.ncdc.noaa.gov/crn/qcdatasets.html>`_ .. [2] Diamond, H. J. et. al., 2013: U.S. Climate Reference Network after one decade of operations: status and assessment. Bull. Amer. Meteor. Soc., 94, 489-498. :doi:`10.1175/BAMS-D-12-00170.1`
[ "Read", "NOAA", "USCRN", "[", "1", "]", "_", "[", "2", "]", "_", "fixed", "-", "width", "file", "into", "pandas", "dataframe", "." ]
python
train
Koed00/django-q
django_q/tasks.py
https://github.com/Koed00/django-q/blob/c84fd11a67c9a47d821786dfcdc189bb258c6f54/django_q/tasks.py#L532-L540
def run(self): """ Start queueing the chain to the worker cluster :return: the chain's group id """ self.group = async_chain(chain=self.chain[:], group=self.group, cached=self.cached, sync=self.sync, broker=self.broker) self.started = True return self.group
[ "def", "run", "(", "self", ")", ":", "self", ".", "group", "=", "async_chain", "(", "chain", "=", "self", ".", "chain", "[", ":", "]", ",", "group", "=", "self", ".", "group", ",", "cached", "=", "self", ".", "cached", ",", "sync", "=", "self", ".", "sync", ",", "broker", "=", "self", ".", "broker", ")", "self", ".", "started", "=", "True", "return", "self", ".", "group" ]
Start queueing the chain to the worker cluster :return: the chain's group id
[ "Start", "queueing", "the", "chain", "to", "the", "worker", "cluster", ":", "return", ":", "the", "chain", "s", "group", "id" ]
python
train
codelv/enaml-native
src/enamlnative/android/android_coordinator_layout.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/android/android_coordinator_layout.py#L35-L40
def create_widget(self): """ Create the underlying widget. """ d = self.declaration self.widget = CoordinatorLayout(self.get_context(), None, d.style)
[ "def", "create_widget", "(", "self", ")", ":", "d", "=", "self", ".", "declaration", "self", ".", "widget", "=", "CoordinatorLayout", "(", "self", ".", "get_context", "(", ")", ",", "None", ",", "d", ".", "style", ")" ]
Create the underlying widget.
[ "Create", "the", "underlying", "widget", "." ]
python
train
googlefonts/glyphsLib
Lib/glyphsLib/affine/__init__.py
https://github.com/googlefonts/glyphsLib/blob/9c12dc70c8d13f08d92b824e6710f6e3bb5037bb/Lib/glyphsLib/affine/__init__.py#L214-L243
def rotation(cls, angle, pivot=None): """Create a rotation transform at the specified angle, optionally about the specified pivot point. :param angle: Rotation angle in degrees :type angle: float :param pivot: Point to rotate about, if omitted the rotation is about the origin. :type pivot: sequence :rtype: Affine """ ca, sa = cos_sin_deg(angle) if pivot is None: return tuple.__new__(cls, (ca, sa, 0.0, -sa, ca, 0.0, 0.0, 0.0, 1.0)) else: px, py = pivot return tuple.__new__( cls, ( ca, sa, px - px * ca + py * sa, -sa, ca, py - px * sa - py * ca, 0.0, 0.0, 1.0, ), )
[ "def", "rotation", "(", "cls", ",", "angle", ",", "pivot", "=", "None", ")", ":", "ca", ",", "sa", "=", "cos_sin_deg", "(", "angle", ")", "if", "pivot", "is", "None", ":", "return", "tuple", ".", "__new__", "(", "cls", ",", "(", "ca", ",", "sa", ",", "0.0", ",", "-", "sa", ",", "ca", ",", "0.0", ",", "0.0", ",", "0.0", ",", "1.0", ")", ")", "else", ":", "px", ",", "py", "=", "pivot", "return", "tuple", ".", "__new__", "(", "cls", ",", "(", "ca", ",", "sa", ",", "px", "-", "px", "*", "ca", "+", "py", "*", "sa", ",", "-", "sa", ",", "ca", ",", "py", "-", "px", "*", "sa", "-", "py", "*", "ca", ",", "0.0", ",", "0.0", ",", "1.0", ",", ")", ",", ")" ]
Create a rotation transform at the specified angle, optionally about the specified pivot point. :param angle: Rotation angle in degrees :type angle: float :param pivot: Point to rotate about, if omitted the rotation is about the origin. :type pivot: sequence :rtype: Affine
[ "Create", "a", "rotation", "transform", "at", "the", "specified", "angle", "optionally", "about", "the", "specified", "pivot", "point", "." ]
python
train
saltstack/salt
salt/modules/macdefaults.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/macdefaults.py#L93-L116
def delete(domain, key, user=None): ''' Delete a default from the system CLI Example: .. code-block:: bash salt '*' macdefaults.delete com.apple.CrashReporter DialogType salt '*' macdefaults.delete NSGlobalDomain ApplePersistence domain The name of the domain to delete from key The key of the given domain to delete user The user to delete the defaults with ''' cmd = 'defaults delete "{0}" "{1}"'.format(domain, key) return __salt__['cmd.run_all'](cmd, runas=user, output_loglevel='debug')
[ "def", "delete", "(", "domain", ",", "key", ",", "user", "=", "None", ")", ":", "cmd", "=", "'defaults delete \"{0}\" \"{1}\"'", ".", "format", "(", "domain", ",", "key", ")", "return", "__salt__", "[", "'cmd.run_all'", "]", "(", "cmd", ",", "runas", "=", "user", ",", "output_loglevel", "=", "'debug'", ")" ]
Delete a default from the system CLI Example: .. code-block:: bash salt '*' macdefaults.delete com.apple.CrashReporter DialogType salt '*' macdefaults.delete NSGlobalDomain ApplePersistence domain The name of the domain to delete from key The key of the given domain to delete user The user to delete the defaults with
[ "Delete", "a", "default", "from", "the", "system" ]
python
train
tell-k/django-modelsdoc
modelsdoc/templatetags/modelsdoc_tags.py
https://github.com/tell-k/django-modelsdoc/blob/c9d336e76251feb142347b3a41365430d3365436/modelsdoc/templatetags/modelsdoc_tags.py#L31-L54
def emptylineless(parser, token): """ Removes empty line. Example usage:: {% emptylineless %} test1 test2 test3 {% endemptylineless %} This example would return this HTML:: test1 test2 test3 """ nodelist = parser.parse(('endemptylineless',)) parser.delete_first_token() return EmptylinelessNode(nodelist)
[ "def", "emptylineless", "(", "parser", ",", "token", ")", ":", "nodelist", "=", "parser", ".", "parse", "(", "(", "'endemptylineless'", ",", ")", ")", "parser", ".", "delete_first_token", "(", ")", "return", "EmptylinelessNode", "(", "nodelist", ")" ]
Removes empty line. Example usage:: {% emptylineless %} test1 test2 test3 {% endemptylineless %} This example would return this HTML:: test1 test2 test3
[ "Removes", "empty", "line", "." ]
python
train
luckydonald/pytgbot
pytgbot/api_types/sendable/passport.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/pytgbot/api_types/sendable/passport.py#L91-L104
def to_array(self): """ Serializes this PassportElementErrorDataField to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(PassportElementErrorDataField, self).to_array() array['source'] = u(self.source) # py2: type unicode, py3: type str array['type'] = u(self.type) # py2: type unicode, py3: type str array['field_name'] = u(self.field_name) # py2: type unicode, py3: type str array['data_hash'] = u(self.data_hash) # py2: type unicode, py3: type str array['message'] = u(self.message) # py2: type unicode, py3: type str return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "PassportElementErrorDataField", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'source'", "]", "=", "u", "(", "self", ".", "source", ")", "# py2: type unicode, py3: type str", "array", "[", "'type'", "]", "=", "u", "(", "self", ".", "type", ")", "# py2: type unicode, py3: type str", "array", "[", "'field_name'", "]", "=", "u", "(", "self", ".", "field_name", ")", "# py2: type unicode, py3: type str", "array", "[", "'data_hash'", "]", "=", "u", "(", "self", ".", "data_hash", ")", "# py2: type unicode, py3: type str", "array", "[", "'message'", "]", "=", "u", "(", "self", ".", "message", ")", "# py2: type unicode, py3: type str", "return", "array" ]
Serializes this PassportElementErrorDataField to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "PassportElementErrorDataField", "to", "a", "dictionary", "." ]
python
train
fhcrc/seqmagick
seqmagick/transform.py
https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L380-L389
def _update_id(record, new_id): """ Update a record id to new_id, also modifying the ID in record.description """ old_id = record.id record.id = new_id # At least for FASTA, record ID starts the description record.description = re.sub('^' + re.escape(old_id), new_id, record.description) return record
[ "def", "_update_id", "(", "record", ",", "new_id", ")", ":", "old_id", "=", "record", ".", "id", "record", ".", "id", "=", "new_id", "# At least for FASTA, record ID starts the description", "record", ".", "description", "=", "re", ".", "sub", "(", "'^'", "+", "re", ".", "escape", "(", "old_id", ")", ",", "new_id", ",", "record", ".", "description", ")", "return", "record" ]
Update a record id to new_id, also modifying the ID in record.description
[ "Update", "a", "record", "id", "to", "new_id", "also", "modifying", "the", "ID", "in", "record", ".", "description" ]
python
train
spacetelescope/drizzlepac
drizzlepac/wcs_functions.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/wcs_functions.py#L580-L708
def mergeWCS(default_wcs, user_pars): """ Merges the user specified WCS values given as dictionary derived from the input configObj object with the output PyWCS object computed using distortion.output_wcs(). The user_pars dictionary needs to have the following set of keys:: user_pars = {'ra':None,'dec':None,'scale':None,'rot':None, 'outnx':None,'outny':None,'crpix1':None,'crpix2':None} """ # # Start by making a copy of the input WCS... # outwcs = default_wcs.deepcopy() # If there are no user set parameters, just return a copy of # the original WCS: if all([upar is None for upar in user_pars.values()]): return outwcs if _check_custom_WCS_pars('ra', 'dec', user_pars): _crval = (user_pars['ra'], user_pars['dec']) else: _crval = None if ('scale' in user_pars and user_pars['scale'] is not None and not _check_close_scale(user_pars['scale'], outwcs.pscale)): _scale = user_pars['scale'] _ratio = outwcs.pscale / _scale else: _ratio = None _scale = None if ('rot' not in user_pars) or user_pars['rot'] is None: _delta_rot = None else: _delta_rot = outwcs.orientat - user_pars['rot'] if _delta_rot == 0.0: _delta_rot = None if _check_custom_WCS_pars('crpix1', 'crpix2', user_pars): _crpix = (user_pars['crpix1'], user_pars['crpix2']) else: _crpix = None shape = None if _check_custom_WCS_pars('outnx', 'outny', user_pars): shape = ( int(_py2round(user_pars['outnx'])), int(_py2round(user_pars['outny'])) ) if shape[0] < 1 or shape[1] < 1: raise ValueError("Custom WCS output image size smaller than 1") if _crpix is None: # make sure new image is centered on the CRPIX of the old WCS: _crpix = ((shape[0] + 1.0) / 2.0, (shape[1] + 1.0) / 2.0) else: naxis1, naxis2 = outwcs.pixel_shape if _delta_rot is None: # no rotation is involved if _ratio is not None: # apply scale only: # compute output image shape: shape = ( max(1, int(_py2round(_ratio * naxis1))), max(1, int(_py2round(_ratio * naxis2))) ) # update CRPIX: if _crpix is None: _crpix = 1.0 + _ratio * (outwcs.wcs.crpix - 1.0) else: _corners = np.array( [[0.5, 0.5], [naxis1 + 0.5, 0.5], [0.5, naxis2 + 0.5], [naxis1 + 0.5, naxis2 + 0.5]] ) - outwcs.wcs.crpix if _ratio is not None: # scale corners: _corners *= _ratio # rotate corners and find new image range: ((_xmin, _xmax), (_ymin, _ymax)) = util.getRotatedSize(_corners, _delta_rot) # compute output image shape: # NOTE: _py2round may be replaced with np.ceil shape = ( max(1, int(_py2round(_xmax - _xmin))), max(1, int(_py2round(_ymax - _ymin))) ) if _crpix is None: # update CRPIX: _crpix = (-_xmin + 0.5, -_ymin + 0.5) # Set up the new WCS based on values from old one: if _ratio is not None: # Update plate scale outwcs.wcs.cd = outwcs.wcs.cd / _ratio outwcs.pscale = _scale # update orientation if _delta_rot is not None: outwcs.wcs.cd = _rotateCD(outwcs.wcs.cd, _delta_rot) outwcs.orientat -= _delta_rot if shape is not None: # update size: outwcs.pixel_shape = shape # update reference position if _crpix is not None: outwcs.wcs.crpix = np.array(_crpix, dtype=np.float64) if _crval is not None: outwcs.wcs.crval = np.array(_crval, dtype=np.float64) return outwcs
[ "def", "mergeWCS", "(", "default_wcs", ",", "user_pars", ")", ":", "#", "# Start by making a copy of the input WCS...", "#", "outwcs", "=", "default_wcs", ".", "deepcopy", "(", ")", "# If there are no user set parameters, just return a copy of", "# the original WCS:", "if", "all", "(", "[", "upar", "is", "None", "for", "upar", "in", "user_pars", ".", "values", "(", ")", "]", ")", ":", "return", "outwcs", "if", "_check_custom_WCS_pars", "(", "'ra'", ",", "'dec'", ",", "user_pars", ")", ":", "_crval", "=", "(", "user_pars", "[", "'ra'", "]", ",", "user_pars", "[", "'dec'", "]", ")", "else", ":", "_crval", "=", "None", "if", "(", "'scale'", "in", "user_pars", "and", "user_pars", "[", "'scale'", "]", "is", "not", "None", "and", "not", "_check_close_scale", "(", "user_pars", "[", "'scale'", "]", ",", "outwcs", ".", "pscale", ")", ")", ":", "_scale", "=", "user_pars", "[", "'scale'", "]", "_ratio", "=", "outwcs", ".", "pscale", "/", "_scale", "else", ":", "_ratio", "=", "None", "_scale", "=", "None", "if", "(", "'rot'", "not", "in", "user_pars", ")", "or", "user_pars", "[", "'rot'", "]", "is", "None", ":", "_delta_rot", "=", "None", "else", ":", "_delta_rot", "=", "outwcs", ".", "orientat", "-", "user_pars", "[", "'rot'", "]", "if", "_delta_rot", "==", "0.0", ":", "_delta_rot", "=", "None", "if", "_check_custom_WCS_pars", "(", "'crpix1'", ",", "'crpix2'", ",", "user_pars", ")", ":", "_crpix", "=", "(", "user_pars", "[", "'crpix1'", "]", ",", "user_pars", "[", "'crpix2'", "]", ")", "else", ":", "_crpix", "=", "None", "shape", "=", "None", "if", "_check_custom_WCS_pars", "(", "'outnx'", ",", "'outny'", ",", "user_pars", ")", ":", "shape", "=", "(", "int", "(", "_py2round", "(", "user_pars", "[", "'outnx'", "]", ")", ")", ",", "int", "(", "_py2round", "(", "user_pars", "[", "'outny'", "]", ")", ")", ")", "if", "shape", "[", "0", "]", "<", "1", "or", "shape", "[", "1", "]", "<", "1", ":", "raise", "ValueError", "(", "\"Custom WCS output image size smaller than 1\"", ")", "if", "_crpix", "is", "None", ":", "# make sure new image is centered on the CRPIX of the old WCS:", "_crpix", "=", "(", "(", "shape", "[", "0", "]", "+", "1.0", ")", "/", "2.0", ",", "(", "shape", "[", "1", "]", "+", "1.0", ")", "/", "2.0", ")", "else", ":", "naxis1", ",", "naxis2", "=", "outwcs", ".", "pixel_shape", "if", "_delta_rot", "is", "None", ":", "# no rotation is involved", "if", "_ratio", "is", "not", "None", ":", "# apply scale only:", "# compute output image shape:", "shape", "=", "(", "max", "(", "1", ",", "int", "(", "_py2round", "(", "_ratio", "*", "naxis1", ")", ")", ")", ",", "max", "(", "1", ",", "int", "(", "_py2round", "(", "_ratio", "*", "naxis2", ")", ")", ")", ")", "# update CRPIX:", "if", "_crpix", "is", "None", ":", "_crpix", "=", "1.0", "+", "_ratio", "*", "(", "outwcs", ".", "wcs", ".", "crpix", "-", "1.0", ")", "else", ":", "_corners", "=", "np", ".", "array", "(", "[", "[", "0.5", ",", "0.5", "]", ",", "[", "naxis1", "+", "0.5", ",", "0.5", "]", ",", "[", "0.5", ",", "naxis2", "+", "0.5", "]", ",", "[", "naxis1", "+", "0.5", ",", "naxis2", "+", "0.5", "]", "]", ")", "-", "outwcs", ".", "wcs", ".", "crpix", "if", "_ratio", "is", "not", "None", ":", "# scale corners:", "_corners", "*=", "_ratio", "# rotate corners and find new image range:", "(", "(", "_xmin", ",", "_xmax", ")", ",", "(", "_ymin", ",", "_ymax", ")", ")", "=", "util", ".", "getRotatedSize", "(", "_corners", ",", "_delta_rot", ")", "# compute output image shape:", "# NOTE: _py2round may be replaced with np.ceil", "shape", "=", "(", "max", "(", "1", ",", "int", "(", "_py2round", "(", "_xmax", "-", "_xmin", ")", ")", ")", ",", "max", "(", "1", ",", "int", "(", "_py2round", "(", "_ymax", "-", "_ymin", ")", ")", ")", ")", "if", "_crpix", "is", "None", ":", "# update CRPIX:", "_crpix", "=", "(", "-", "_xmin", "+", "0.5", ",", "-", "_ymin", "+", "0.5", ")", "# Set up the new WCS based on values from old one:", "if", "_ratio", "is", "not", "None", ":", "# Update plate scale", "outwcs", ".", "wcs", ".", "cd", "=", "outwcs", ".", "wcs", ".", "cd", "/", "_ratio", "outwcs", ".", "pscale", "=", "_scale", "# update orientation", "if", "_delta_rot", "is", "not", "None", ":", "outwcs", ".", "wcs", ".", "cd", "=", "_rotateCD", "(", "outwcs", ".", "wcs", ".", "cd", ",", "_delta_rot", ")", "outwcs", ".", "orientat", "-=", "_delta_rot", "if", "shape", "is", "not", "None", ":", "# update size:", "outwcs", ".", "pixel_shape", "=", "shape", "# update reference position", "if", "_crpix", "is", "not", "None", ":", "outwcs", ".", "wcs", ".", "crpix", "=", "np", ".", "array", "(", "_crpix", ",", "dtype", "=", "np", ".", "float64", ")", "if", "_crval", "is", "not", "None", ":", "outwcs", ".", "wcs", ".", "crval", "=", "np", ".", "array", "(", "_crval", ",", "dtype", "=", "np", ".", "float64", ")", "return", "outwcs" ]
Merges the user specified WCS values given as dictionary derived from the input configObj object with the output PyWCS object computed using distortion.output_wcs(). The user_pars dictionary needs to have the following set of keys:: user_pars = {'ra':None,'dec':None,'scale':None,'rot':None, 'outnx':None,'outny':None,'crpix1':None,'crpix2':None}
[ "Merges", "the", "user", "specified", "WCS", "values", "given", "as", "dictionary", "derived", "from", "the", "input", "configObj", "object", "with", "the", "output", "PyWCS", "object", "computed", "using", "distortion", ".", "output_wcs", "()", "." ]
python
train
siznax/wptools
wptools/core.py
https://github.com/siznax/wptools/blob/100eaea585c34aa9ad87a9eda8982bb4898f6ec9/wptools/core.py#L107-L118
def _continue_params(self): """ Returns query string fragment continue parameters """ if not self.data.get('continue'): return params = [] for item in self.data['continue']: params.append("&%s=%s" % (item, self.data['continue'][item])) return ''.join(params)
[ "def", "_continue_params", "(", "self", ")", ":", "if", "not", "self", ".", "data", ".", "get", "(", "'continue'", ")", ":", "return", "params", "=", "[", "]", "for", "item", "in", "self", ".", "data", "[", "'continue'", "]", ":", "params", ".", "append", "(", "\"&%s=%s\"", "%", "(", "item", ",", "self", ".", "data", "[", "'continue'", "]", "[", "item", "]", ")", ")", "return", "''", ".", "join", "(", "params", ")" ]
Returns query string fragment continue parameters
[ "Returns", "query", "string", "fragment", "continue", "parameters" ]
python
train
LogicalDash/LiSE
allegedb/allegedb/query.py
https://github.com/LogicalDash/LiSE/blob/fe6fd4f0a7c1780e065f4c9babb9bc443af6bb84/allegedb/allegedb/query.py#L599-L605
def commit(self): """Commit the transaction""" self.flush() if hasattr(self, 'transaction') and self.transaction.is_active: self.transaction.commit() elif hasattr(self, 'connection'): self.connection.commit()
[ "def", "commit", "(", "self", ")", ":", "self", ".", "flush", "(", ")", "if", "hasattr", "(", "self", ",", "'transaction'", ")", "and", "self", ".", "transaction", ".", "is_active", ":", "self", ".", "transaction", ".", "commit", "(", ")", "elif", "hasattr", "(", "self", ",", "'connection'", ")", ":", "self", ".", "connection", ".", "commit", "(", ")" ]
Commit the transaction
[ "Commit", "the", "transaction" ]
python
train
anti1869/sunhead
src/sunhead/conf.py
https://github.com/anti1869/sunhead/blob/5117ec797a38eb82d955241d20547d125efe80f3/src/sunhead/conf.py#L120-L137
def discover_config_path(self, config_filename: str) -> str: """ Search for config file in a number of places. If there is no config file found, will return None. :param config_filename: Config file name or custom path to filename with config. :return: Path to the discovered config file or None. """ if config_filename and os.path.isfile(config_filename): return config_filename for place in _common_places: config_path = os.path.join(place, config_filename) if os.path.isfile(config_path): return config_path return
[ "def", "discover_config_path", "(", "self", ",", "config_filename", ":", "str", ")", "->", "str", ":", "if", "config_filename", "and", "os", ".", "path", ".", "isfile", "(", "config_filename", ")", ":", "return", "config_filename", "for", "place", "in", "_common_places", ":", "config_path", "=", "os", ".", "path", ".", "join", "(", "place", ",", "config_filename", ")", "if", "os", ".", "path", ".", "isfile", "(", "config_path", ")", ":", "return", "config_path", "return" ]
Search for config file in a number of places. If there is no config file found, will return None. :param config_filename: Config file name or custom path to filename with config. :return: Path to the discovered config file or None.
[ "Search", "for", "config", "file", "in", "a", "number", "of", "places", ".", "If", "there", "is", "no", "config", "file", "found", "will", "return", "None", "." ]
python
train
jason-weirather/py-seq-tools
seqtools/format/sam/__init__.py
https://github.com/jason-weirather/py-seq-tools/blob/f642c2c73ffef2acc83656a78059a476fc734ca1/seqtools/format/sam/__init__.py#L145-L163
def target_sequence_length(self): """ Get the length of the target sequence. length of the entire chromosome throws an error if there is no information available :return: length :rtype: int """ if not self.is_aligned(): raise ValueError("no length for reference when read is not not aligned") if self.entries.tlen: return self.entries.tlen #simplest is if tlen is set if self.header: if self.entries.rname in self.header.sequence_lengths: return self.header.sequence_lengths[self.entries.rname] elif self.reference: return len(self.reference[self.entries.rname]) else: raise ValueError("some reference needs to be set to go from psl to bam\n") raise ValueError("No reference available")
[ "def", "target_sequence_length", "(", "self", ")", ":", "if", "not", "self", ".", "is_aligned", "(", ")", ":", "raise", "ValueError", "(", "\"no length for reference when read is not not aligned\"", ")", "if", "self", ".", "entries", ".", "tlen", ":", "return", "self", ".", "entries", ".", "tlen", "#simplest is if tlen is set", "if", "self", ".", "header", ":", "if", "self", ".", "entries", ".", "rname", "in", "self", ".", "header", ".", "sequence_lengths", ":", "return", "self", ".", "header", ".", "sequence_lengths", "[", "self", ".", "entries", ".", "rname", "]", "elif", "self", ".", "reference", ":", "return", "len", "(", "self", ".", "reference", "[", "self", ".", "entries", ".", "rname", "]", ")", "else", ":", "raise", "ValueError", "(", "\"some reference needs to be set to go from psl to bam\\n\"", ")", "raise", "ValueError", "(", "\"No reference available\"", ")" ]
Get the length of the target sequence. length of the entire chromosome throws an error if there is no information available :return: length :rtype: int
[ "Get", "the", "length", "of", "the", "target", "sequence", ".", "length", "of", "the", "entire", "chromosome" ]
python
train
StanfordVL/robosuite
robosuite/utils/mjcf_utils.py
https://github.com/StanfordVL/robosuite/blob/65cd16810e2ed647e3ec88746af3412065b7f278/robosuite/utils/mjcf_utils.py#L82-L97
def new_site(name, rgba=RED, pos=(0, 0, 0), size=(0.005,), **kwargs): """ Creates a site element with attributes specified by @**kwargs. Args: name (str): site name. rgba: color and transparency. Defaults to solid red. pos: 3d position of the site. size ([float]): site size (sites are spherical by default). """ kwargs["rgba"] = array_to_string(rgba) kwargs["pos"] = array_to_string(pos) kwargs["size"] = array_to_string(size) kwargs["name"] = name element = ET.Element("site", attrib=kwargs) return element
[ "def", "new_site", "(", "name", ",", "rgba", "=", "RED", ",", "pos", "=", "(", "0", ",", "0", ",", "0", ")", ",", "size", "=", "(", "0.005", ",", ")", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "\"rgba\"", "]", "=", "array_to_string", "(", "rgba", ")", "kwargs", "[", "\"pos\"", "]", "=", "array_to_string", "(", "pos", ")", "kwargs", "[", "\"size\"", "]", "=", "array_to_string", "(", "size", ")", "kwargs", "[", "\"name\"", "]", "=", "name", "element", "=", "ET", ".", "Element", "(", "\"site\"", ",", "attrib", "=", "kwargs", ")", "return", "element" ]
Creates a site element with attributes specified by @**kwargs. Args: name (str): site name. rgba: color and transparency. Defaults to solid red. pos: 3d position of the site. size ([float]): site size (sites are spherical by default).
[ "Creates", "a", "site", "element", "with", "attributes", "specified", "by", "@", "**", "kwargs", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ntp.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ntp.py#L12-L23
def show_ntp_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_ntp = ET.Element("show_ntp") config = show_ntp input = ET.SubElement(show_ntp, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_ntp_input_rbridge_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_ntp", "=", "ET", ".", "Element", "(", "\"show_ntp\"", ")", "config", "=", "show_ntp", "input", "=", "ET", ".", "SubElement", "(", "show_ntp", ",", "\"input\"", ")", "rbridge_id", "=", "ET", ".", "SubElement", "(", "input", ",", "\"rbridge-id\"", ")", "rbridge_id", ".", "text", "=", "kwargs", ".", "pop", "(", "'rbridge_id'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
Yubico/yubikey-manager
ykman/cli/oath.py
https://github.com/Yubico/yubikey-manager/blob/3ac27bc59ae76a59db9d09a530494add2edbbabf/ykman/cli/oath.py#L403-L429
def delete(ctx, query, force): """ Delete a credential. Delete a credential from your YubiKey. Provide a query string to match the credential to delete. """ ensure_validated(ctx) controller = ctx.obj['controller'] creds = controller.list() hits = _search(creds, query) if len(hits) == 0: click.echo('No matches, nothing to be done.') elif len(hits) == 1: cred = hits[0] if force or (click.confirm( u'Delete credential: {} ?'.format(cred.printable_key), default=False, err=True )): controller.delete(cred) click.echo(u'Deleted {}.'.format(cred.printable_key)) else: click.echo('Deletion aborted by user.') else: _error_multiple_hits(ctx, hits)
[ "def", "delete", "(", "ctx", ",", "query", ",", "force", ")", ":", "ensure_validated", "(", "ctx", ")", "controller", "=", "ctx", ".", "obj", "[", "'controller'", "]", "creds", "=", "controller", ".", "list", "(", ")", "hits", "=", "_search", "(", "creds", ",", "query", ")", "if", "len", "(", "hits", ")", "==", "0", ":", "click", ".", "echo", "(", "'No matches, nothing to be done.'", ")", "elif", "len", "(", "hits", ")", "==", "1", ":", "cred", "=", "hits", "[", "0", "]", "if", "force", "or", "(", "click", ".", "confirm", "(", "u'Delete credential: {} ?'", ".", "format", "(", "cred", ".", "printable_key", ")", ",", "default", "=", "False", ",", "err", "=", "True", ")", ")", ":", "controller", ".", "delete", "(", "cred", ")", "click", ".", "echo", "(", "u'Deleted {}.'", ".", "format", "(", "cred", ".", "printable_key", ")", ")", "else", ":", "click", ".", "echo", "(", "'Deletion aborted by user.'", ")", "else", ":", "_error_multiple_hits", "(", "ctx", ",", "hits", ")" ]
Delete a credential. Delete a credential from your YubiKey. Provide a query string to match the credential to delete.
[ "Delete", "a", "credential", "." ]
python
train
tamasgal/km3pipe
km3pipe/utils/streamds.py
https://github.com/tamasgal/km3pipe/blob/7a9b59ac899a28775b5bdc5d391d9a5340d08040/km3pipe/utils/streamds.py#L56-L80
def get_data(stream, parameters, fmt): """Retrieve data for given stream and parameters, or None if not found""" sds = kp.db.StreamDS() if stream not in sds.streams: log.error("Stream '{}' not found in the database.".format(stream)) return params = {} if parameters: for parameter in parameters: if '=' not in parameter: log.error( "Invalid parameter syntax '{}'\n" "The correct syntax is 'parameter=value'". format(parameter) ) continue key, value = parameter.split('=') params[key] = value data = sds.get(stream, fmt, **params) if data is not None: with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(data) else: sds.help(stream)
[ "def", "get_data", "(", "stream", ",", "parameters", ",", "fmt", ")", ":", "sds", "=", "kp", ".", "db", ".", "StreamDS", "(", ")", "if", "stream", "not", "in", "sds", ".", "streams", ":", "log", ".", "error", "(", "\"Stream '{}' not found in the database.\"", ".", "format", "(", "stream", ")", ")", "return", "params", "=", "{", "}", "if", "parameters", ":", "for", "parameter", "in", "parameters", ":", "if", "'='", "not", "in", "parameter", ":", "log", ".", "error", "(", "\"Invalid parameter syntax '{}'\\n\"", "\"The correct syntax is 'parameter=value'\"", ".", "format", "(", "parameter", ")", ")", "continue", "key", ",", "value", "=", "parameter", ".", "split", "(", "'='", ")", "params", "[", "key", "]", "=", "value", "data", "=", "sds", ".", "get", "(", "stream", ",", "fmt", ",", "*", "*", "params", ")", "if", "data", "is", "not", "None", ":", "with", "pd", ".", "option_context", "(", "'display.max_rows'", ",", "None", ",", "'display.max_columns'", ",", "None", ")", ":", "print", "(", "data", ")", "else", ":", "sds", ".", "help", "(", "stream", ")" ]
Retrieve data for given stream and parameters, or None if not found
[ "Retrieve", "data", "for", "given", "stream", "and", "parameters", "or", "None", "if", "not", "found" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor_ext.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_system_monitor_ext.py#L154-L167
def show_system_monitor_output_switch_status_port_status_port_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_system_monitor = ET.Element("show_system_monitor") config = show_system_monitor output = ET.SubElement(show_system_monitor, "output") switch_status = ET.SubElement(output, "switch-status") port_status = ET.SubElement(switch_status, "port-status") port_name = ET.SubElement(port_status, "port-name") port_name.text = kwargs.pop('port_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_system_monitor_output_switch_status_port_status_port_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_system_monitor", "=", "ET", ".", "Element", "(", "\"show_system_monitor\"", ")", "config", "=", "show_system_monitor", "output", "=", "ET", ".", "SubElement", "(", "show_system_monitor", ",", "\"output\"", ")", "switch_status", "=", "ET", ".", "SubElement", "(", "output", ",", "\"switch-status\"", ")", "port_status", "=", "ET", ".", "SubElement", "(", "switch_status", ",", "\"port-status\"", ")", "port_name", "=", "ET", ".", "SubElement", "(", "port_status", ",", "\"port-name\"", ")", "port_name", ".", "text", "=", "kwargs", ".", "pop", "(", "'port_name'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train