repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
sequence
docstring
stringlengths
1
46.9k
docstring_tokens
sequence
language
stringclasses
1 value
partition
stringclasses
3 values
python-diamond/Diamond
src/collectors/postgres/postgres.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/collectors/postgres/postgres.py#L74-L126
def collect(self): """ Do pre-flight checks, get list of db names, collect metrics, publish """ if psycopg2 is None: self.log.error('Unable to import module psycopg2') return {} # Get list of databases dbs = self._get_db_names() if len(dbs) == 0: self.log.error("I have 0 databases!") return {} if self.config['metrics']: metrics = self.config['metrics'] elif str_to_bool(self.config['extended']): metrics = registry['extended'] if str_to_bool(self.config['has_admin']) \ and 'WalSegmentStats' not in metrics: metrics.append('WalSegmentStats') else: metrics = registry['basic'] # Iterate every QueryStats class for metric_name in set(metrics): if metric_name not in metrics_registry: self.log.error( 'metric_name %s not found in metric registry' % metric_name) continue for dbase in dbs: conn = self._connect(database=dbase) try: klass = metrics_registry[metric_name] stat = klass(dbase, conn, underscore=self.config['underscore']) stat.fetch(self.config['pg_version']) for metric, value in stat: if value is not None: self.publish(metric, value) # Setting multi_db to True will run this query on all known # databases. This is bad for queries that hit views like # pg_database, which are shared across databases. # # If multi_db is False, bail early after the first query # iteration. Otherwise, continue to remaining databases. if stat.multi_db is False: break finally: conn.close()
[ "def", "collect", "(", "self", ")", ":", "if", "psycopg2", "is", "None", ":", "self", ".", "log", ".", "error", "(", "'Unable to import module psycopg2'", ")", "return", "{", "}", "# Get list of databases", "dbs", "=", "self", ".", "_get_db_names", "(", ")", "if", "len", "(", "dbs", ")", "==", "0", ":", "self", ".", "log", ".", "error", "(", "\"I have 0 databases!\"", ")", "return", "{", "}", "if", "self", ".", "config", "[", "'metrics'", "]", ":", "metrics", "=", "self", ".", "config", "[", "'metrics'", "]", "elif", "str_to_bool", "(", "self", ".", "config", "[", "'extended'", "]", ")", ":", "metrics", "=", "registry", "[", "'extended'", "]", "if", "str_to_bool", "(", "self", ".", "config", "[", "'has_admin'", "]", ")", "and", "'WalSegmentStats'", "not", "in", "metrics", ":", "metrics", ".", "append", "(", "'WalSegmentStats'", ")", "else", ":", "metrics", "=", "registry", "[", "'basic'", "]", "# Iterate every QueryStats class", "for", "metric_name", "in", "set", "(", "metrics", ")", ":", "if", "metric_name", "not", "in", "metrics_registry", ":", "self", ".", "log", ".", "error", "(", "'metric_name %s not found in metric registry'", "%", "metric_name", ")", "continue", "for", "dbase", "in", "dbs", ":", "conn", "=", "self", ".", "_connect", "(", "database", "=", "dbase", ")", "try", ":", "klass", "=", "metrics_registry", "[", "metric_name", "]", "stat", "=", "klass", "(", "dbase", ",", "conn", ",", "underscore", "=", "self", ".", "config", "[", "'underscore'", "]", ")", "stat", ".", "fetch", "(", "self", ".", "config", "[", "'pg_version'", "]", ")", "for", "metric", ",", "value", "in", "stat", ":", "if", "value", "is", "not", "None", ":", "self", ".", "publish", "(", "metric", ",", "value", ")", "# Setting multi_db to True will run this query on all known", "# databases. This is bad for queries that hit views like", "# pg_database, which are shared across databases.", "#", "# If multi_db is False, bail early after the first query", "# iteration. Otherwise, continue to remaining databases.", "if", "stat", ".", "multi_db", "is", "False", ":", "break", "finally", ":", "conn", ".", "close", "(", ")" ]
Do pre-flight checks, get list of db names, collect metrics, publish
[ "Do", "pre", "-", "flight", "checks", "get", "list", "of", "db", "names", "collect", "metrics", "publish" ]
python
train
flyte/upnpclient
upnpclient/soap.py
https://github.com/flyte/upnpclient/blob/5529b950df33c0eaf0c24a9a307cf00fe627d0ad/upnpclient/soap.py#L62-L72
def _remove_extraneous_xml_declarations(xml_str): """ Sometimes devices return XML with more than one XML declaration in, such as when returning their own XML config files. This removes the extra ones and preserves the first one. """ xml_declaration = '' if xml_str.startswith('<?xml'): xml_declaration, xml_str = xml_str.split('?>', maxsplit=1) xml_declaration += '?>' xml_str = re.sub(r'<\?xml.*?\?>', '', xml_str, flags=re.I) return xml_declaration + xml_str
[ "def", "_remove_extraneous_xml_declarations", "(", "xml_str", ")", ":", "xml_declaration", "=", "''", "if", "xml_str", ".", "startswith", "(", "'<?xml'", ")", ":", "xml_declaration", ",", "xml_str", "=", "xml_str", ".", "split", "(", "'?>'", ",", "maxsplit", "=", "1", ")", "xml_declaration", "+=", "'?>'", "xml_str", "=", "re", ".", "sub", "(", "r'<\\?xml.*?\\?>'", ",", "''", ",", "xml_str", ",", "flags", "=", "re", ".", "I", ")", "return", "xml_declaration", "+", "xml_str" ]
Sometimes devices return XML with more than one XML declaration in, such as when returning their own XML config files. This removes the extra ones and preserves the first one.
[ "Sometimes", "devices", "return", "XML", "with", "more", "than", "one", "XML", "declaration", "in", "such", "as", "when", "returning", "their", "own", "XML", "config", "files", ".", "This", "removes", "the", "extra", "ones", "and", "preserves", "the", "first", "one", "." ]
python
train
jplusplus/statscraper
statscraper/scrapers/work_injury_scraper.py
https://github.com/jplusplus/statscraper/blob/932ec048b23d15b3dbdaf829facc55fd78ec0109/statscraper/scrapers/work_injury_scraper.py#L135-L154
def _fetch_itemslist(self, item): """ We define two collection: - Number of work injuries ("Arbetsolycka") - Number of workrelated diseases ("Arbetssjukdom") Each contains four datasets: - Per municipality and year - Per county and year - Per municipality and month - Per municipality and year """ if item.is_root: for c in ["Arbetsolycka", "Arbetssjukdom"]: yield Collection(c, blob=(c, None, None)) else: c = item.id for r in [u"kommun", u"län"]: for p in [u"år", u"månad"]: yield Dataset(u"%s-%s-%s" % (c, r, p), blob=(c, r, p), label=u"%s, antal per %s och %s" % (c, r, p))
[ "def", "_fetch_itemslist", "(", "self", ",", "item", ")", ":", "if", "item", ".", "is_root", ":", "for", "c", "in", "[", "\"Arbetsolycka\"", ",", "\"Arbetssjukdom\"", "]", ":", "yield", "Collection", "(", "c", ",", "blob", "=", "(", "c", ",", "None", ",", "None", ")", ")", "else", ":", "c", "=", "item", ".", "id", "for", "r", "in", "[", "u\"kommun\"", ",", "u\"län\"]", ":", "", "for", "p", "in", "[", "u\"år\",", " ", "\"månad\"]:", "", "", "yield", "Dataset", "(", "u\"%s-%s-%s\"", "%", "(", "c", ",", "r", ",", "p", ")", ",", "blob", "=", "(", "c", ",", "r", ",", "p", ")", ",", "label", "=", "u\"%s, antal per %s och %s\"", "%", "(", "c", ",", "r", ",", "p", ")", ")" ]
We define two collection: - Number of work injuries ("Arbetsolycka") - Number of workrelated diseases ("Arbetssjukdom") Each contains four datasets: - Per municipality and year - Per county and year - Per municipality and month - Per municipality and year
[ "We", "define", "two", "collection", ":", "-", "Number", "of", "work", "injuries", "(", "Arbetsolycka", ")", "-", "Number", "of", "workrelated", "diseases", "(", "Arbetssjukdom", ")", "Each", "contains", "four", "datasets", ":", "-", "Per", "municipality", "and", "year", "-", "Per", "county", "and", "year", "-", "Per", "municipality", "and", "month", "-", "Per", "municipality", "and", "year" ]
python
train
offu/WeRoBot
werobot/pay.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/pay.py#L168-L204
def pay_order_query(self, out_trade_no): """ 查询订单状态 一般用于无法确定 订单状态时候补偿 :param out_trade_no: 本地订单号 :return: 订单信息dict """ package = { 'partner': self.pay_partner_id, 'out_trade_no': out_trade_no, } _package = package.items() _package.sort() s = '&'.join( [ "%s=%s" % (p[0], str(p[1])) for p in (_package + [('key', self.pay_partner_key)]) ] ) package['sign'] = md5(s).hexdigest().upper() package = '&'.join(["%s=%s" % (p[0], p[1]) for p in package.items()]) params, sign, _ = self._pay_sign_dict( add_noncestr=False, package=package ) params['app_signature'] = sign params['sign_method'] = 'sha1' return self.post( url="https://api.weixin.qq.com/pay/orderquery", data=params )
[ "def", "pay_order_query", "(", "self", ",", "out_trade_no", ")", ":", "package", "=", "{", "'partner'", ":", "self", ".", "pay_partner_id", ",", "'out_trade_no'", ":", "out_trade_no", ",", "}", "_package", "=", "package", ".", "items", "(", ")", "_package", ".", "sort", "(", ")", "s", "=", "'&'", ".", "join", "(", "[", "\"%s=%s\"", "%", "(", "p", "[", "0", "]", ",", "str", "(", "p", "[", "1", "]", ")", ")", "for", "p", "in", "(", "_package", "+", "[", "(", "'key'", ",", "self", ".", "pay_partner_key", ")", "]", ")", "]", ")", "package", "[", "'sign'", "]", "=", "md5", "(", "s", ")", ".", "hexdigest", "(", ")", ".", "upper", "(", ")", "package", "=", "'&'", ".", "join", "(", "[", "\"%s=%s\"", "%", "(", "p", "[", "0", "]", ",", "p", "[", "1", "]", ")", "for", "p", "in", "package", ".", "items", "(", ")", "]", ")", "params", ",", "sign", ",", "_", "=", "self", ".", "_pay_sign_dict", "(", "add_noncestr", "=", "False", ",", "package", "=", "package", ")", "params", "[", "'app_signature'", "]", "=", "sign", "params", "[", "'sign_method'", "]", "=", "'sha1'", "return", "self", ".", "post", "(", "url", "=", "\"https://api.weixin.qq.com/pay/orderquery\"", ",", "data", "=", "params", ")" ]
查询订单状态 一般用于无法确定 订单状态时候补偿 :param out_trade_no: 本地订单号 :return: 订单信息dict
[ "查询订单状态", "一般用于无法确定", "订单状态时候补偿" ]
python
train
PyThaiNLP/pythainlp
pythainlp/ulmfit/__init__.py
https://github.com/PyThaiNLP/pythainlp/blob/e9a300b8a99dfd1a67a955e7c06f62e4afe0fbca/pythainlp/ulmfit/__init__.py#L42-L52
def _get_path(fname: str) -> str: """ :meth: download get path of file from pythainlp-corpus :param str fname: file name :return: path to downloaded file """ path = get_corpus_path(fname) if not path: download(fname) path = get_corpus_path(fname) return path
[ "def", "_get_path", "(", "fname", ":", "str", ")", "->", "str", ":", "path", "=", "get_corpus_path", "(", "fname", ")", "if", "not", "path", ":", "download", "(", "fname", ")", "path", "=", "get_corpus_path", "(", "fname", ")", "return", "path" ]
:meth: download get path of file from pythainlp-corpus :param str fname: file name :return: path to downloaded file
[ ":", "meth", ":", "download", "get", "path", "of", "file", "from", "pythainlp", "-", "corpus", ":", "param", "str", "fname", ":", "file", "name", ":", "return", ":", "path", "to", "downloaded", "file" ]
python
train
aleju/imgaug
imgaug/augmentables/polys.py
https://github.com/aleju/imgaug/blob/786be74aa855513840113ea523c5df495dc6a8af/imgaug/augmentables/polys.py#L1024-L1045
def on(self, image): """ Project polygons from one image to a new one. Parameters ---------- image : ndarray or tuple of int New image onto which the polygons are to be projected. May also simply be that new image's shape tuple. Returns ------- imgaug.PolygonsOnImage Object containing all projected polygons. """ shape = normalize_shape(image) if shape[0:2] == self.shape[0:2]: return self.deepcopy() polygons = [poly.project(self.shape, shape) for poly in self.polygons] # TODO use deepcopy() here return PolygonsOnImage(polygons, shape)
[ "def", "on", "(", "self", ",", "image", ")", ":", "shape", "=", "normalize_shape", "(", "image", ")", "if", "shape", "[", "0", ":", "2", "]", "==", "self", ".", "shape", "[", "0", ":", "2", "]", ":", "return", "self", ".", "deepcopy", "(", ")", "polygons", "=", "[", "poly", ".", "project", "(", "self", ".", "shape", ",", "shape", ")", "for", "poly", "in", "self", ".", "polygons", "]", "# TODO use deepcopy() here", "return", "PolygonsOnImage", "(", "polygons", ",", "shape", ")" ]
Project polygons from one image to a new one. Parameters ---------- image : ndarray or tuple of int New image onto which the polygons are to be projected. May also simply be that new image's shape tuple. Returns ------- imgaug.PolygonsOnImage Object containing all projected polygons.
[ "Project", "polygons", "from", "one", "image", "to", "a", "new", "one", "." ]
python
valid
BD2KGenomics/protect
src/protect/mutation_calling/muse.py
https://github.com/BD2KGenomics/protect/blob/06310682c50dcf8917b912c8e551299ff7ee41ce/src/protect/mutation_calling/muse.py#L63-L101
def run_muse(job, tumor_bam, normal_bam, univ_options, muse_options): """ Spawn a MuSE job for each chromosome on the DNA bams. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict muse_options: Options specific to MuSE :return: Dict of results from running MuSE on every chromosome perchrom_muse: |- 'chr1': fsID |- 'chr2' fsID | |-... | +- 'chrM': fsID :rtype: dict """ # Get a list of chromosomes to handle if muse_options['chromosomes']: chromosomes = muse_options['chromosomes'] else: chromosomes = sample_chromosomes(job, muse_options['genome_fai']) perchrom_muse = defaultdict() for chrom in chromosomes: call = job.addChildJobFn(run_muse_perchrom, tumor_bam, normal_bam, univ_options, muse_options, chrom, disk=PromisedRequirement( muse_disk, tumor_bam['tumor_dna_fix_pg_sorted.bam'], normal_bam['normal_dna_fix_pg_sorted.bam'], muse_options['genome_fasta']), memory='6G') sump = call.addChildJobFn(run_muse_sump_perchrom, call.rv(), univ_options, muse_options, chrom, disk=PromisedRequirement(muse_sump_disk, muse_options['dbsnp_vcf']), memory='6G') perchrom_muse[chrom] = sump.rv() return perchrom_muse
[ "def", "run_muse", "(", "job", ",", "tumor_bam", ",", "normal_bam", ",", "univ_options", ",", "muse_options", ")", ":", "# Get a list of chromosomes to handle", "if", "muse_options", "[", "'chromosomes'", "]", ":", "chromosomes", "=", "muse_options", "[", "'chromosomes'", "]", "else", ":", "chromosomes", "=", "sample_chromosomes", "(", "job", ",", "muse_options", "[", "'genome_fai'", "]", ")", "perchrom_muse", "=", "defaultdict", "(", ")", "for", "chrom", "in", "chromosomes", ":", "call", "=", "job", ".", "addChildJobFn", "(", "run_muse_perchrom", ",", "tumor_bam", ",", "normal_bam", ",", "univ_options", ",", "muse_options", ",", "chrom", ",", "disk", "=", "PromisedRequirement", "(", "muse_disk", ",", "tumor_bam", "[", "'tumor_dna_fix_pg_sorted.bam'", "]", ",", "normal_bam", "[", "'normal_dna_fix_pg_sorted.bam'", "]", ",", "muse_options", "[", "'genome_fasta'", "]", ")", ",", "memory", "=", "'6G'", ")", "sump", "=", "call", ".", "addChildJobFn", "(", "run_muse_sump_perchrom", ",", "call", ".", "rv", "(", ")", ",", "univ_options", ",", "muse_options", ",", "chrom", ",", "disk", "=", "PromisedRequirement", "(", "muse_sump_disk", ",", "muse_options", "[", "'dbsnp_vcf'", "]", ")", ",", "memory", "=", "'6G'", ")", "perchrom_muse", "[", "chrom", "]", "=", "sump", ".", "rv", "(", ")", "return", "perchrom_muse" ]
Spawn a MuSE job for each chromosome on the DNA bams. :param dict tumor_bam: Dict of bam and bai for tumor DNA-Seq :param dict normal_bam: Dict of bam and bai for normal DNA-Seq :param dict univ_options: Dict of universal options used by almost all tools :param dict muse_options: Options specific to MuSE :return: Dict of results from running MuSE on every chromosome perchrom_muse: |- 'chr1': fsID |- 'chr2' fsID | |-... | +- 'chrM': fsID :rtype: dict
[ "Spawn", "a", "MuSE", "job", "for", "each", "chromosome", "on", "the", "DNA", "bams", "." ]
python
train
fake-name/ChromeController
ChromeController/Generator/Generated.py
https://github.com/fake-name/ChromeController/blob/914dd136184e8f1165c7aa6ef30418aaf10c61f0/ChromeController/Generator/Generated.py#L5100-L5117
def ServiceWorker_stopWorker(self, versionId): """ Function path: ServiceWorker.stopWorker Domain: ServiceWorker Method name: stopWorker Parameters: Required arguments: 'versionId' (type: string) -> No description No return value. """ assert isinstance(versionId, (str,) ), "Argument 'versionId' must be of type '['str']'. Received type: '%s'" % type( versionId) subdom_funcs = self.synchronous_command('ServiceWorker.stopWorker', versionId=versionId) return subdom_funcs
[ "def", "ServiceWorker_stopWorker", "(", "self", ",", "versionId", ")", ":", "assert", "isinstance", "(", "versionId", ",", "(", "str", ",", ")", ")", ",", "\"Argument 'versionId' must be of type '['str']'. Received type: '%s'\"", "%", "type", "(", "versionId", ")", "subdom_funcs", "=", "self", ".", "synchronous_command", "(", "'ServiceWorker.stopWorker'", ",", "versionId", "=", "versionId", ")", "return", "subdom_funcs" ]
Function path: ServiceWorker.stopWorker Domain: ServiceWorker Method name: stopWorker Parameters: Required arguments: 'versionId' (type: string) -> No description No return value.
[ "Function", "path", ":", "ServiceWorker", ".", "stopWorker", "Domain", ":", "ServiceWorker", "Method", "name", ":", "stopWorker", "Parameters", ":", "Required", "arguments", ":", "versionId", "(", "type", ":", "string", ")", "-", ">", "No", "description", "No", "return", "value", "." ]
python
train
openstack/horizon
openstack_auth/views.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_auth/views.py#L284-L353
def switch_keystone_provider(request, keystone_provider=None, redirect_field_name=auth.REDIRECT_FIELD_NAME): """Switches the user's keystone provider using K2K Federation If keystone_provider is given then we switch the user to the keystone provider using K2K federation. Otherwise if keystone_provider is None then we switch the user back to the Identity Provider Keystone which a non federated token auth will be used. """ base_token = request.session.get('k2k_base_unscoped_token', None) k2k_auth_url = request.session.get('k2k_auth_url', None) keystone_providers = request.session.get('keystone_providers', None) recent_project = request.COOKIES.get('recent_project') if not base_token or not k2k_auth_url: msg = _('K2K Federation not setup for this session') raise exceptions.KeystoneAuthException(msg) redirect_to = request.GET.get(redirect_field_name, '') if not is_safe_url(url=redirect_to, host=request.get_host()): redirect_to = settings.LOGIN_REDIRECT_URL unscoped_auth_ref = None keystone_idp_id = getattr( settings, 'KEYSTONE_PROVIDER_IDP_ID', 'localkeystone') if keystone_provider == keystone_idp_id: current_plugin = plugin.TokenPlugin() unscoped_auth = current_plugin.get_plugin(auth_url=k2k_auth_url, token=base_token) else: # Switch to service provider using K2K federation plugins = [plugin.TokenPlugin()] current_plugin = plugin.K2KAuthPlugin() unscoped_auth = current_plugin.get_plugin( auth_url=k2k_auth_url, service_provider=keystone_provider, plugins=plugins, token=base_token, recent_project=recent_project) try: # Switch to identity provider using token auth unscoped_auth_ref = current_plugin.get_access_info(unscoped_auth) except exceptions.KeystoneAuthException as exc: msg = 'Switching to Keystone Provider %s has failed. %s' \ % (keystone_provider, (six.text_type(exc))) messages.error(request, msg) if unscoped_auth_ref: try: request.user = auth.authenticate( request=request, auth_url=unscoped_auth.auth_url, token=unscoped_auth_ref.auth_token) except exceptions.KeystoneAuthException as exc: msg = 'Keystone provider switch failed: %s' % six.text_type(exc) res = django_http.HttpResponseRedirect(settings.LOGIN_URL) res.set_cookie('logout_reason', msg, max_age=10) return res auth.login(request, request.user) auth_user.set_session_from_user(request, request.user) request.session['keystone_provider_id'] = keystone_provider request.session['keystone_providers'] = keystone_providers request.session['k2k_base_unscoped_token'] = base_token request.session['k2k_auth_url'] = k2k_auth_url message = ( _('Switch to Keystone Provider "%(keystone_provider)s" ' 'successful.') % {'keystone_provider': keystone_provider}) messages.success(request, message) response = shortcuts.redirect(redirect_to) return response
[ "def", "switch_keystone_provider", "(", "request", ",", "keystone_provider", "=", "None", ",", "redirect_field_name", "=", "auth", ".", "REDIRECT_FIELD_NAME", ")", ":", "base_token", "=", "request", ".", "session", ".", "get", "(", "'k2k_base_unscoped_token'", ",", "None", ")", "k2k_auth_url", "=", "request", ".", "session", ".", "get", "(", "'k2k_auth_url'", ",", "None", ")", "keystone_providers", "=", "request", ".", "session", ".", "get", "(", "'keystone_providers'", ",", "None", ")", "recent_project", "=", "request", ".", "COOKIES", ".", "get", "(", "'recent_project'", ")", "if", "not", "base_token", "or", "not", "k2k_auth_url", ":", "msg", "=", "_", "(", "'K2K Federation not setup for this session'", ")", "raise", "exceptions", ".", "KeystoneAuthException", "(", "msg", ")", "redirect_to", "=", "request", ".", "GET", ".", "get", "(", "redirect_field_name", ",", "''", ")", "if", "not", "is_safe_url", "(", "url", "=", "redirect_to", ",", "host", "=", "request", ".", "get_host", "(", ")", ")", ":", "redirect_to", "=", "settings", ".", "LOGIN_REDIRECT_URL", "unscoped_auth_ref", "=", "None", "keystone_idp_id", "=", "getattr", "(", "settings", ",", "'KEYSTONE_PROVIDER_IDP_ID'", ",", "'localkeystone'", ")", "if", "keystone_provider", "==", "keystone_idp_id", ":", "current_plugin", "=", "plugin", ".", "TokenPlugin", "(", ")", "unscoped_auth", "=", "current_plugin", ".", "get_plugin", "(", "auth_url", "=", "k2k_auth_url", ",", "token", "=", "base_token", ")", "else", ":", "# Switch to service provider using K2K federation", "plugins", "=", "[", "plugin", ".", "TokenPlugin", "(", ")", "]", "current_plugin", "=", "plugin", ".", "K2KAuthPlugin", "(", ")", "unscoped_auth", "=", "current_plugin", ".", "get_plugin", "(", "auth_url", "=", "k2k_auth_url", ",", "service_provider", "=", "keystone_provider", ",", "plugins", "=", "plugins", ",", "token", "=", "base_token", ",", "recent_project", "=", "recent_project", ")", "try", ":", "# Switch to identity provider using token auth", "unscoped_auth_ref", "=", "current_plugin", ".", "get_access_info", "(", "unscoped_auth", ")", "except", "exceptions", ".", "KeystoneAuthException", "as", "exc", ":", "msg", "=", "'Switching to Keystone Provider %s has failed. %s'", "%", "(", "keystone_provider", ",", "(", "six", ".", "text_type", "(", "exc", ")", ")", ")", "messages", ".", "error", "(", "request", ",", "msg", ")", "if", "unscoped_auth_ref", ":", "try", ":", "request", ".", "user", "=", "auth", ".", "authenticate", "(", "request", "=", "request", ",", "auth_url", "=", "unscoped_auth", ".", "auth_url", ",", "token", "=", "unscoped_auth_ref", ".", "auth_token", ")", "except", "exceptions", ".", "KeystoneAuthException", "as", "exc", ":", "msg", "=", "'Keystone provider switch failed: %s'", "%", "six", ".", "text_type", "(", "exc", ")", "res", "=", "django_http", ".", "HttpResponseRedirect", "(", "settings", ".", "LOGIN_URL", ")", "res", ".", "set_cookie", "(", "'logout_reason'", ",", "msg", ",", "max_age", "=", "10", ")", "return", "res", "auth", ".", "login", "(", "request", ",", "request", ".", "user", ")", "auth_user", ".", "set_session_from_user", "(", "request", ",", "request", ".", "user", ")", "request", ".", "session", "[", "'keystone_provider_id'", "]", "=", "keystone_provider", "request", ".", "session", "[", "'keystone_providers'", "]", "=", "keystone_providers", "request", ".", "session", "[", "'k2k_base_unscoped_token'", "]", "=", "base_token", "request", ".", "session", "[", "'k2k_auth_url'", "]", "=", "k2k_auth_url", "message", "=", "(", "_", "(", "'Switch to Keystone Provider \"%(keystone_provider)s\" '", "'successful.'", ")", "%", "{", "'keystone_provider'", ":", "keystone_provider", "}", ")", "messages", ".", "success", "(", "request", ",", "message", ")", "response", "=", "shortcuts", ".", "redirect", "(", "redirect_to", ")", "return", "response" ]
Switches the user's keystone provider using K2K Federation If keystone_provider is given then we switch the user to the keystone provider using K2K federation. Otherwise if keystone_provider is None then we switch the user back to the Identity Provider Keystone which a non federated token auth will be used.
[ "Switches", "the", "user", "s", "keystone", "provider", "using", "K2K", "Federation" ]
python
train
teepark/junction
junction/hub.py
https://github.com/teepark/junction/blob/481d135d9e53acb55c72686e2eb4483432f35fa6/junction/hub.py#L149-L180
def publish(self, service, routing_id, method, args=None, kwargs=None, broadcast=False, udp=False): '''Send a 1-way message :param service: the service name (the routing top level) :type service: anything hash-able :param int routing_id: the id used for routing within the registered handlers of the service :param string method: the method name to call :param tuple args: The positional arguments to send along with the request. If the first positional argument is a generator object, the publish will be sent in chunks :ref:`(more info) <chunked-messages>`. :param dict kwargs: keyword arguments to send along with the request :param bool broadcast: if ``True``, send to every peer with a matching subscription. :param bool udp: deliver the message over UDP instead of the usual TCP :returns: None. use 'rpc' methods for requests with responses. :raises: :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message ''' if udp: func = self._dispatcher.send_publish_udp else: func = self._dispatcher.send_publish if not func(None, service, routing_id, method, args or (), kwargs or {}, singular=not broadcast): raise errors.Unroutable()
[ "def", "publish", "(", "self", ",", "service", ",", "routing_id", ",", "method", ",", "args", "=", "None", ",", "kwargs", "=", "None", ",", "broadcast", "=", "False", ",", "udp", "=", "False", ")", ":", "if", "udp", ":", "func", "=", "self", ".", "_dispatcher", ".", "send_publish_udp", "else", ":", "func", "=", "self", ".", "_dispatcher", ".", "send_publish", "if", "not", "func", "(", "None", ",", "service", ",", "routing_id", ",", "method", ",", "args", "or", "(", ")", ",", "kwargs", "or", "{", "}", ",", "singular", "=", "not", "broadcast", ")", ":", "raise", "errors", ".", "Unroutable", "(", ")" ]
Send a 1-way message :param service: the service name (the routing top level) :type service: anything hash-able :param int routing_id: the id used for routing within the registered handlers of the service :param string method: the method name to call :param tuple args: The positional arguments to send along with the request. If the first positional argument is a generator object, the publish will be sent in chunks :ref:`(more info) <chunked-messages>`. :param dict kwargs: keyword arguments to send along with the request :param bool broadcast: if ``True``, send to every peer with a matching subscription. :param bool udp: deliver the message over UDP instead of the usual TCP :returns: None. use 'rpc' methods for requests with responses. :raises: :class:`Unroutable <junction.errors.Unroutable>` if no peers are registered to receive the message
[ "Send", "a", "1", "-", "way", "message" ]
python
train
jeffknupp/sandman
sandman/sandman.py
https://github.com/jeffknupp/sandman/blob/253ea4d15cbccd9f0016d66fedd7478614cc0b2f/sandman/sandman.py#L88-L103
def _single_resource_json_response(resource, depth=0): """Return the JSON representation of *resource*. :param resource: :class:`sandman.model.Model` to render :type resource: :class:`sandman.model.Model` :rtype: :class:`flask.Response` """ links = resource.links() response = jsonify(**resource.as_dict(depth)) response.headers['Link'] = '' for link in links: response.headers['Link'] += '<{}>; rel="{}",'.format( link['uri'], link['rel']) response.headers['Link'] = response.headers['Link'][:-1] return response
[ "def", "_single_resource_json_response", "(", "resource", ",", "depth", "=", "0", ")", ":", "links", "=", "resource", ".", "links", "(", ")", "response", "=", "jsonify", "(", "*", "*", "resource", ".", "as_dict", "(", "depth", ")", ")", "response", ".", "headers", "[", "'Link'", "]", "=", "''", "for", "link", "in", "links", ":", "response", ".", "headers", "[", "'Link'", "]", "+=", "'<{}>; rel=\"{}\",'", ".", "format", "(", "link", "[", "'uri'", "]", ",", "link", "[", "'rel'", "]", ")", "response", ".", "headers", "[", "'Link'", "]", "=", "response", ".", "headers", "[", "'Link'", "]", "[", ":", "-", "1", "]", "return", "response" ]
Return the JSON representation of *resource*. :param resource: :class:`sandman.model.Model` to render :type resource: :class:`sandman.model.Model` :rtype: :class:`flask.Response`
[ "Return", "the", "JSON", "representation", "of", "*", "resource", "*", "." ]
python
train
biocore/burrito-fillings
bfillings/usearch.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/usearch.py#L1153-L1476
def usearch_qf( fasta_filepath, refseqs_fp=None, output_dir=None, percent_id=0.97, percent_id_err=0.97, minsize=4, abundance_skew=2.0, db_filepath=None, rev=False, label_prefix="", label_suffix="", retain_label_as_comment=False, count_start=0, perc_id_blast=0.97, save_intermediate_files=False, HALT_EXEC=False, global_alignment=True, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, minlen=64, de_novo_chimera_detection=True, derep_fullseq=False, reference_chimera_detection=True, cluster_size_filtering=True, remove_usearch_logs=False, usersort=True, suppress_new_clusters=False, chimeras_retention="union", verbose=False ): """ Main convenience wrapper for using usearch to filter/cluster seqs The complete 'usearch_qf' process is a multistep process with many calls to usearch with various parameters. It is likely to change from the original implementation. A lot. fasta_filepath = fasta filepath to filtering/clustering (e.g., output seqs.fna file from split_libraries.py) refseqs_fp = fasta filepath for ref-based otu picking. output_dir = directory to store the otu mapping file, as well logs and the intermediate files created if save_intermediate_files is True. percent_ID = percent ID for clustering sequences. percent_ID_err = percent ID for filtering out chimeras minsize = Minimum size of cluster for retention after chimera removal. abundance_skew = threshold setting for chimera removal with de novo chimera detection. db_filepath = filepath of reference fasta sequence set for ref based chimera detection. rev = search plus and minus strands of sequences, used in ref based chimera detection. label_prefix = optional prefix added to filtered fasta file. label_suffix = optional suffix added to filtered fasta file. retain_label_as_comment = option to add usearch generated label to enumerated fasta labels. count_start = integer to begin counting at for sequence enumeration. perc_id_blast = percent identity setting for using blast algorithm to assign original sequence labels to filtered fasta. global_alignment = Setting for assignment of original seq labels to filtered seqs. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. save_intermediate_files = retain all the intermediate files created during this process. minlen = (not specified in usearch helpstring), but seems like a good bet that this refers to the minimum length of the sequences for dereplication. HALT_EXEC = used to debug app controller problems. de_novo_chimera_detection = If True, will detect chimeras de novo reference_chimera_detection = If True, will detect chimeras ref based cluster_size_filtering = If True, will filter OTUs according to seq counts. remove_usearch_logs = If True, will not call the --log function for each usearch call. usersort = Used for specifying custom sorting (i.e., non-length based sorting) with usearch/uclust. suppress_new_clusters = with reference based OTU picking, if enabled, will prevent new clusters that do not match the reference from being clustered. chimeras_retention = accepts either 'intersection' or 'union'. Will test for chimeras against the full input error clustered sequence set, and retain sequences flagged as non-chimeras by either (union) or only those flagged as non-chimeras by both (intersection). """ # Save a list of intermediate filepaths in case they are to be removed. intermediate_files = [] # Need absolute paths to avoid problems with app controller if output_dir: output_dir = abspath(output_dir) + '/' fasta_filepath = abspath(fasta_filepath) try: if verbose: print "Sorting sequences by length..." # Sort seqs by length app_result, output_filepath_len_sorted =\ usearch_fasta_sort_from_filepath(fasta_filepath, output_filepath= join( output_dir, 'len_sorted.fasta'), save_intermediate_files=save_intermediate_files, remove_usearch_logs=remove_usearch_logs, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(output_filepath_len_sorted) if verbose: print "Dereplicating sequences..." # Dereplicate sequences app_result, output_filepath_dereplicated =\ usearch_dereplicate_exact_subseqs(output_filepath_len_sorted, output_filepath=join( output_dir, 'dereplicated_seqs.fasta'), minlen=minlen, w=w, slots=slots, sizeout=sizeout, maxrejects=maxrejects, save_intermediate_files=save_intermediate_files, remove_usearch_logs=remove_usearch_logs, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(output_filepath_dereplicated) if verbose: print "Sorting by abundance..." # Sort by abundance, initially no filter based on seqs/otu app_result, output_fp =\ usearch_sort_by_abundance(output_filepath_dereplicated, output_filepath=join( output_dir, 'abundance_sorted.fasta'), usersort=True, sizein=sizein, sizeout=sizeout, minsize=0, remove_usearch_logs=remove_usearch_logs, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(output_fp) if verbose: print "Clustering sequences for error correction..." # Create .uc file of clusters file, to identify original sequences # later output_uc_filepath = output_dir + 'err_corrected_clusters.uc' app_result, error_clustered_output_fp =\ usearch_cluster_error_correction(output_fp, output_filepath=join(output_dir, 'clustered_error_corrected.fasta'), output_uc_filepath=output_uc_filepath, usersort=True, percent_id_err=percent_id_err, sizein=sizein, sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects, remove_usearch_logs=remove_usearch_logs, save_intermediate_files=save_intermediate_files, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(error_clustered_output_fp) intermediate_files.append(output_uc_filepath) # Series of conditional tests, using generic 'output_fp' name so the # conditional filtering, if any/all are selected, do not matter. if de_novo_chimera_detection: if verbose: print "Performing de novo chimera detection..." app_result, output_fp_de_novo_nonchimeras =\ usearch_chimera_filter_de_novo(error_clustered_output_fp, abundance_skew=abundance_skew, output_chimera_filepath= join( output_dir, 'de_novo_chimeras.fasta'), output_non_chimera_filepath=join( output_dir, 'de_novo_non_chimeras.fasta'), usersort=True, save_intermediate_files=save_intermediate_files, remove_usearch_logs=remove_usearch_logs, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(output_fp_de_novo_nonchimeras) output_fp = output_fp_de_novo_nonchimeras if reference_chimera_detection: if verbose: print "Performing reference based chimera detection..." app_result, output_fp_ref_nonchimeras =\ usearch_chimera_filter_ref_based(error_clustered_output_fp, db_filepath=db_filepath, output_chimera_filepath= join( output_dir, 'reference_chimeras.fasta'), output_non_chimera_filepath= join(output_dir, 'reference_non_chimeras.fasta'), usersort=True, save_intermediate_files=save_intermediate_files, rev=rev, remove_usearch_logs=remove_usearch_logs, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(output_fp_ref_nonchimeras) output_fp = output_fp_ref_nonchimeras # get intersection or union if both ref and de novo chimera detection if de_novo_chimera_detection and reference_chimera_detection: if verbose: print "Finding %s of non-chimeras..." % chimeras_retention output_fp = get_retained_chimeras( output_fp_de_novo_nonchimeras, output_fp_ref_nonchimeras, output_combined_fp= join(output_dir, 'combined_non_chimeras.fasta'), chimeras_retention=chimeras_retention) intermediate_files.append(output_fp) if cluster_size_filtering: # Test for empty filepath following filters, raise error if all seqs # have been removed if verbose: print "Filtering by cluster size..." # chimera detection was not performed, use output file of step 4 as input # to filtering by cluster size if not (reference_chimera_detection and de_novo_chimera_detection): output_fp = error_clustered_output_fp app_result, output_fp =\ usearch_sort_by_abundance(output_fp, output_filepath= join(output_dir, 'abundance_sorted_minsize_' + str(minsize) + '.fasta'), minsize=minsize, sizein=sizein, sizeout=sizeout, remove_usearch_logs=remove_usearch_logs, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(output_fp) # cluster seqs # Should we add in option to use alternative OTU picking here? # Seems like it will be a bit of a mess...maybe after we determine # if usearch_qf should become standard. if refseqs_fp: if verbose: print "Clustering against reference sequences..." app_result, output_filepath =\ usearch_cluster_seqs_ref(output_fp, output_filepath= join( output_dir, 'ref_clustered_seqs.uc'), percent_id=percent_id, sizein=sizein, sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects, save_intermediate_files=save_intermediate_files, remove_usearch_logs=remove_usearch_logs, suppress_new_clusters=suppress_new_clusters, refseqs_fp=refseqs_fp, output_dir=output_dir, working_dir=output_dir, rev=rev, HALT_EXEC=HALT_EXEC ) else: if verbose: print "De novo clustering sequences..." app_result, output_filepath =\ usearch_cluster_seqs(output_fp, output_filepath= join(output_dir, 'clustered_seqs.fasta'), percent_id=percent_id, sizein=sizein, sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects, save_intermediate_files=save_intermediate_files, remove_usearch_logs=remove_usearch_logs, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(output_filepath) # Enumerate the OTUs in the clusters if not suppress_new_clusters: if verbose: print "Enumerating OTUs..." output_filepath =\ enumerate_otus(output_filepath, output_filepath= join(output_dir, 'enumerated_otus.fasta'), label_prefix=label_prefix, label_suffix=label_suffix, count_start=count_start, retain_label_as_comment=retain_label_as_comment) intermediate_files.append(output_filepath) # Get original sequence label identities if verbose: print "Assigning sequences to clusters..." app_result, clusters_file = assign_reads_to_otus(fasta_filepath, filtered_fasta=output_filepath, output_filepath=join( output_dir, 'assign_reads_to_otus.uc'), perc_id_blast=percent_id, global_alignment=global_alignment, remove_usearch_logs=remove_usearch_logs, working_dir=output_dir, HALT_EXEC=HALT_EXEC) intermediate_files.append(clusters_file) except ApplicationError: raise ApplicationError('Error running usearch. Possible causes are ' 'unsupported version (current supported version is usearch ' + 'v5.2.236) is installed or improperly formatted input file was ' + 'provided') except ApplicationNotFoundError: remove_files(files_to_remove) raise ApplicationNotFoundError('usearch not found, is it properly ' + 'installed?') # Get dict of clusters, list of failures # Set OTU ID field to 9 for the case of closed reference OTU picking if suppress_new_clusters: otu_id_field = 9 else: otu_id_field = 1 clusters, failures = clusters_from_blast_uc_file(open(clusters_file, "U"), otu_id_field) # Remove temp files unless user specifies output filepath if not save_intermediate_files: remove_files(intermediate_files) return clusters, failures
[ "def", "usearch_qf", "(", "fasta_filepath", ",", "refseqs_fp", "=", "None", ",", "output_dir", "=", "None", ",", "percent_id", "=", "0.97", ",", "percent_id_err", "=", "0.97", ",", "minsize", "=", "4", ",", "abundance_skew", "=", "2.0", ",", "db_filepath", "=", "None", ",", "rev", "=", "False", ",", "label_prefix", "=", "\"\"", ",", "label_suffix", "=", "\"\"", ",", "retain_label_as_comment", "=", "False", ",", "count_start", "=", "0", ",", "perc_id_blast", "=", "0.97", ",", "save_intermediate_files", "=", "False", ",", "HALT_EXEC", "=", "False", ",", "global_alignment", "=", "True", ",", "sizein", "=", "True", ",", "sizeout", "=", "True", ",", "w", "=", "64", ",", "slots", "=", "16769023", ",", "maxrejects", "=", "64", ",", "minlen", "=", "64", ",", "de_novo_chimera_detection", "=", "True", ",", "derep_fullseq", "=", "False", ",", "reference_chimera_detection", "=", "True", ",", "cluster_size_filtering", "=", "True", ",", "remove_usearch_logs", "=", "False", ",", "usersort", "=", "True", ",", "suppress_new_clusters", "=", "False", ",", "chimeras_retention", "=", "\"union\"", ",", "verbose", "=", "False", ")", ":", "# Save a list of intermediate filepaths in case they are to be removed.", "intermediate_files", "=", "[", "]", "# Need absolute paths to avoid problems with app controller", "if", "output_dir", ":", "output_dir", "=", "abspath", "(", "output_dir", ")", "+", "'/'", "fasta_filepath", "=", "abspath", "(", "fasta_filepath", ")", "try", ":", "if", "verbose", ":", "print", "\"Sorting sequences by length...\"", "# Sort seqs by length", "app_result", ",", "output_filepath_len_sorted", "=", "usearch_fasta_sort_from_filepath", "(", "fasta_filepath", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'len_sorted.fasta'", ")", ",", "save_intermediate_files", "=", "save_intermediate_files", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "output_filepath_len_sorted", ")", "if", "verbose", ":", "print", "\"Dereplicating sequences...\"", "# Dereplicate sequences", "app_result", ",", "output_filepath_dereplicated", "=", "usearch_dereplicate_exact_subseqs", "(", "output_filepath_len_sorted", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'dereplicated_seqs.fasta'", ")", ",", "minlen", "=", "minlen", ",", "w", "=", "w", ",", "slots", "=", "slots", ",", "sizeout", "=", "sizeout", ",", "maxrejects", "=", "maxrejects", ",", "save_intermediate_files", "=", "save_intermediate_files", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "output_filepath_dereplicated", ")", "if", "verbose", ":", "print", "\"Sorting by abundance...\"", "# Sort by abundance, initially no filter based on seqs/otu", "app_result", ",", "output_fp", "=", "usearch_sort_by_abundance", "(", "output_filepath_dereplicated", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'abundance_sorted.fasta'", ")", ",", "usersort", "=", "True", ",", "sizein", "=", "sizein", ",", "sizeout", "=", "sizeout", ",", "minsize", "=", "0", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "output_fp", ")", "if", "verbose", ":", "print", "\"Clustering sequences for error correction...\"", "# Create .uc file of clusters file, to identify original sequences", "# later", "output_uc_filepath", "=", "output_dir", "+", "'err_corrected_clusters.uc'", "app_result", ",", "error_clustered_output_fp", "=", "usearch_cluster_error_correction", "(", "output_fp", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'clustered_error_corrected.fasta'", ")", ",", "output_uc_filepath", "=", "output_uc_filepath", ",", "usersort", "=", "True", ",", "percent_id_err", "=", "percent_id_err", ",", "sizein", "=", "sizein", ",", "sizeout", "=", "sizeout", ",", "w", "=", "w", ",", "slots", "=", "slots", ",", "maxrejects", "=", "maxrejects", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "save_intermediate_files", "=", "save_intermediate_files", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "error_clustered_output_fp", ")", "intermediate_files", ".", "append", "(", "output_uc_filepath", ")", "# Series of conditional tests, using generic 'output_fp' name so the", "# conditional filtering, if any/all are selected, do not matter.", "if", "de_novo_chimera_detection", ":", "if", "verbose", ":", "print", "\"Performing de novo chimera detection...\"", "app_result", ",", "output_fp_de_novo_nonchimeras", "=", "usearch_chimera_filter_de_novo", "(", "error_clustered_output_fp", ",", "abundance_skew", "=", "abundance_skew", ",", "output_chimera_filepath", "=", "join", "(", "output_dir", ",", "'de_novo_chimeras.fasta'", ")", ",", "output_non_chimera_filepath", "=", "join", "(", "output_dir", ",", "'de_novo_non_chimeras.fasta'", ")", ",", "usersort", "=", "True", ",", "save_intermediate_files", "=", "save_intermediate_files", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "output_fp_de_novo_nonchimeras", ")", "output_fp", "=", "output_fp_de_novo_nonchimeras", "if", "reference_chimera_detection", ":", "if", "verbose", ":", "print", "\"Performing reference based chimera detection...\"", "app_result", ",", "output_fp_ref_nonchimeras", "=", "usearch_chimera_filter_ref_based", "(", "error_clustered_output_fp", ",", "db_filepath", "=", "db_filepath", ",", "output_chimera_filepath", "=", "join", "(", "output_dir", ",", "'reference_chimeras.fasta'", ")", ",", "output_non_chimera_filepath", "=", "join", "(", "output_dir", ",", "'reference_non_chimeras.fasta'", ")", ",", "usersort", "=", "True", ",", "save_intermediate_files", "=", "save_intermediate_files", ",", "rev", "=", "rev", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "output_fp_ref_nonchimeras", ")", "output_fp", "=", "output_fp_ref_nonchimeras", "# get intersection or union if both ref and de novo chimera detection", "if", "de_novo_chimera_detection", "and", "reference_chimera_detection", ":", "if", "verbose", ":", "print", "\"Finding %s of non-chimeras...\"", "%", "chimeras_retention", "output_fp", "=", "get_retained_chimeras", "(", "output_fp_de_novo_nonchimeras", ",", "output_fp_ref_nonchimeras", ",", "output_combined_fp", "=", "join", "(", "output_dir", ",", "'combined_non_chimeras.fasta'", ")", ",", "chimeras_retention", "=", "chimeras_retention", ")", "intermediate_files", ".", "append", "(", "output_fp", ")", "if", "cluster_size_filtering", ":", "# Test for empty filepath following filters, raise error if all seqs", "# have been removed", "if", "verbose", ":", "print", "\"Filtering by cluster size...\"", "# chimera detection was not performed, use output file of step 4 as input", "# to filtering by cluster size", "if", "not", "(", "reference_chimera_detection", "and", "de_novo_chimera_detection", ")", ":", "output_fp", "=", "error_clustered_output_fp", "app_result", ",", "output_fp", "=", "usearch_sort_by_abundance", "(", "output_fp", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'abundance_sorted_minsize_'", "+", "str", "(", "minsize", ")", "+", "'.fasta'", ")", ",", "minsize", "=", "minsize", ",", "sizein", "=", "sizein", ",", "sizeout", "=", "sizeout", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "output_fp", ")", "# cluster seqs", "# Should we add in option to use alternative OTU picking here?", "# Seems like it will be a bit of a mess...maybe after we determine", "# if usearch_qf should become standard.", "if", "refseqs_fp", ":", "if", "verbose", ":", "print", "\"Clustering against reference sequences...\"", "app_result", ",", "output_filepath", "=", "usearch_cluster_seqs_ref", "(", "output_fp", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'ref_clustered_seqs.uc'", ")", ",", "percent_id", "=", "percent_id", ",", "sizein", "=", "sizein", ",", "sizeout", "=", "sizeout", ",", "w", "=", "w", ",", "slots", "=", "slots", ",", "maxrejects", "=", "maxrejects", ",", "save_intermediate_files", "=", "save_intermediate_files", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "suppress_new_clusters", "=", "suppress_new_clusters", ",", "refseqs_fp", "=", "refseqs_fp", ",", "output_dir", "=", "output_dir", ",", "working_dir", "=", "output_dir", ",", "rev", "=", "rev", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "else", ":", "if", "verbose", ":", "print", "\"De novo clustering sequences...\"", "app_result", ",", "output_filepath", "=", "usearch_cluster_seqs", "(", "output_fp", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'clustered_seqs.fasta'", ")", ",", "percent_id", "=", "percent_id", ",", "sizein", "=", "sizein", ",", "sizeout", "=", "sizeout", ",", "w", "=", "w", ",", "slots", "=", "slots", ",", "maxrejects", "=", "maxrejects", ",", "save_intermediate_files", "=", "save_intermediate_files", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "output_filepath", ")", "# Enumerate the OTUs in the clusters", "if", "not", "suppress_new_clusters", ":", "if", "verbose", ":", "print", "\"Enumerating OTUs...\"", "output_filepath", "=", "enumerate_otus", "(", "output_filepath", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'enumerated_otus.fasta'", ")", ",", "label_prefix", "=", "label_prefix", ",", "label_suffix", "=", "label_suffix", ",", "count_start", "=", "count_start", ",", "retain_label_as_comment", "=", "retain_label_as_comment", ")", "intermediate_files", ".", "append", "(", "output_filepath", ")", "# Get original sequence label identities", "if", "verbose", ":", "print", "\"Assigning sequences to clusters...\"", "app_result", ",", "clusters_file", "=", "assign_reads_to_otus", "(", "fasta_filepath", ",", "filtered_fasta", "=", "output_filepath", ",", "output_filepath", "=", "join", "(", "output_dir", ",", "'assign_reads_to_otus.uc'", ")", ",", "perc_id_blast", "=", "percent_id", ",", "global_alignment", "=", "global_alignment", ",", "remove_usearch_logs", "=", "remove_usearch_logs", ",", "working_dir", "=", "output_dir", ",", "HALT_EXEC", "=", "HALT_EXEC", ")", "intermediate_files", ".", "append", "(", "clusters_file", ")", "except", "ApplicationError", ":", "raise", "ApplicationError", "(", "'Error running usearch. Possible causes are '", "'unsupported version (current supported version is usearch '", "+", "'v5.2.236) is installed or improperly formatted input file was '", "+", "'provided'", ")", "except", "ApplicationNotFoundError", ":", "remove_files", "(", "files_to_remove", ")", "raise", "ApplicationNotFoundError", "(", "'usearch not found, is it properly '", "+", "'installed?'", ")", "# Get dict of clusters, list of failures", "# Set OTU ID field to 9 for the case of closed reference OTU picking", "if", "suppress_new_clusters", ":", "otu_id_field", "=", "9", "else", ":", "otu_id_field", "=", "1", "clusters", ",", "failures", "=", "clusters_from_blast_uc_file", "(", "open", "(", "clusters_file", ",", "\"U\"", ")", ",", "otu_id_field", ")", "# Remove temp files unless user specifies output filepath", "if", "not", "save_intermediate_files", ":", "remove_files", "(", "intermediate_files", ")", "return", "clusters", ",", "failures" ]
Main convenience wrapper for using usearch to filter/cluster seqs The complete 'usearch_qf' process is a multistep process with many calls to usearch with various parameters. It is likely to change from the original implementation. A lot. fasta_filepath = fasta filepath to filtering/clustering (e.g., output seqs.fna file from split_libraries.py) refseqs_fp = fasta filepath for ref-based otu picking. output_dir = directory to store the otu mapping file, as well logs and the intermediate files created if save_intermediate_files is True. percent_ID = percent ID for clustering sequences. percent_ID_err = percent ID for filtering out chimeras minsize = Minimum size of cluster for retention after chimera removal. abundance_skew = threshold setting for chimera removal with de novo chimera detection. db_filepath = filepath of reference fasta sequence set for ref based chimera detection. rev = search plus and minus strands of sequences, used in ref based chimera detection. label_prefix = optional prefix added to filtered fasta file. label_suffix = optional suffix added to filtered fasta file. retain_label_as_comment = option to add usearch generated label to enumerated fasta labels. count_start = integer to begin counting at for sequence enumeration. perc_id_blast = percent identity setting for using blast algorithm to assign original sequence labels to filtered fasta. global_alignment = Setting for assignment of original seq labels to filtered seqs. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. save_intermediate_files = retain all the intermediate files created during this process. minlen = (not specified in usearch helpstring), but seems like a good bet that this refers to the minimum length of the sequences for dereplication. HALT_EXEC = used to debug app controller problems. de_novo_chimera_detection = If True, will detect chimeras de novo reference_chimera_detection = If True, will detect chimeras ref based cluster_size_filtering = If True, will filter OTUs according to seq counts. remove_usearch_logs = If True, will not call the --log function for each usearch call. usersort = Used for specifying custom sorting (i.e., non-length based sorting) with usearch/uclust. suppress_new_clusters = with reference based OTU picking, if enabled, will prevent new clusters that do not match the reference from being clustered. chimeras_retention = accepts either 'intersection' or 'union'. Will test for chimeras against the full input error clustered sequence set, and retain sequences flagged as non-chimeras by either (union) or only those flagged as non-chimeras by both (intersection).
[ "Main", "convenience", "wrapper", "for", "using", "usearch", "to", "filter", "/", "cluster", "seqs" ]
python
train
gesellkammer/sndfileio
sndfileio/sndfileio.py
https://github.com/gesellkammer/sndfileio/blob/8e2b264cadb652f09d2e775f54090c0a3cb2ced2/sndfileio/sndfileio.py#L298-L322
def asmono(samples:np.ndarray, channel:Union[int, str]=0) -> np.ndarray: """ convert samples to mono if they are not mono already. The returned array will always have the shape (numframes,) channel: the channel number to use, or 'mix' to mix-down all channels """ if numchannels(samples) == 1: # it could be [1,2,3,4,...], or [[1], [2], [3], [4], ...] if isinstance(samples[0], float): return samples elif isinstance(samples[0], np.dnarray): return np.reshape(samples, (len(samples),)) else: raise TypeError("Samples should be numeric, found: %s" % str(type(samples[0]))) if isinstance(channel, int): return samples[:, channel] elif channel == 'mix': return _mix(samples, scale_by_numchannels=True) else: raise ValueError("channel has to be an integer indicating a channel," " or 'mix' to mix down all channels")
[ "def", "asmono", "(", "samples", ":", "np", ".", "ndarray", ",", "channel", ":", "Union", "[", "int", ",", "str", "]", "=", "0", ")", "->", "np", ".", "ndarray", ":", "if", "numchannels", "(", "samples", ")", "==", "1", ":", "# it could be [1,2,3,4,...], or [[1], [2], [3], [4], ...]", "if", "isinstance", "(", "samples", "[", "0", "]", ",", "float", ")", ":", "return", "samples", "elif", "isinstance", "(", "samples", "[", "0", "]", ",", "np", ".", "dnarray", ")", ":", "return", "np", ".", "reshape", "(", "samples", ",", "(", "len", "(", "samples", ")", ",", ")", ")", "else", ":", "raise", "TypeError", "(", "\"Samples should be numeric, found: %s\"", "%", "str", "(", "type", "(", "samples", "[", "0", "]", ")", ")", ")", "if", "isinstance", "(", "channel", ",", "int", ")", ":", "return", "samples", "[", ":", ",", "channel", "]", "elif", "channel", "==", "'mix'", ":", "return", "_mix", "(", "samples", ",", "scale_by_numchannels", "=", "True", ")", "else", ":", "raise", "ValueError", "(", "\"channel has to be an integer indicating a channel,\"", "\" or 'mix' to mix down all channels\"", ")" ]
convert samples to mono if they are not mono already. The returned array will always have the shape (numframes,) channel: the channel number to use, or 'mix' to mix-down all channels
[ "convert", "samples", "to", "mono", "if", "they", "are", "not", "mono", "already", "." ]
python
train
udragon/pybrctl
pybrctl/pybrctl.py
https://github.com/udragon/pybrctl/blob/9e834a605b57bd969a81c56a886dee81f7d715c1/pybrctl/pybrctl.py#L145-L150
def _runshell(cmd, exception): """ Run a shell command. if fails, raise a proper exception. """ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if p.wait() != 0: raise BridgeException(exception) return p
[ "def", "_runshell", "(", "cmd", ",", "exception", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "if", "p", ".", "wait", "(", ")", "!=", "0", ":", "raise", "BridgeException", "(", "exception", ")", "return", "p" ]
Run a shell command. if fails, raise a proper exception.
[ "Run", "a", "shell", "command", ".", "if", "fails", "raise", "a", "proper", "exception", "." ]
python
train
RedFantom/ttkwidgets
ttkwidgets/color/colorpicker.py
https://github.com/RedFantom/ttkwidgets/blob/02150322060f867b6e59a175522ef84b09168019/ttkwidgets/color/colorpicker.py#L394-L408
def _change_sel_color(self, event): """Respond to motion of the color selection cross.""" (r, g, b), (h, s, v), color = self.square.get() self.red.set(r) self.green.set(g) self.blue.set(b) self.saturation.set(s) self.value.set(v) self.hexa.delete(0, "end") self.hexa.insert(0, color.upper()) if self.alpha_channel: self.alphabar.set_color((r, g, b)) self.hexa.insert('end', ("%2.2x" % self.alpha.get()).upper()) self._update_preview()
[ "def", "_change_sel_color", "(", "self", ",", "event", ")", ":", "(", "r", ",", "g", ",", "b", ")", ",", "(", "h", ",", "s", ",", "v", ")", ",", "color", "=", "self", ".", "square", ".", "get", "(", ")", "self", ".", "red", ".", "set", "(", "r", ")", "self", ".", "green", ".", "set", "(", "g", ")", "self", ".", "blue", ".", "set", "(", "b", ")", "self", ".", "saturation", ".", "set", "(", "s", ")", "self", ".", "value", ".", "set", "(", "v", ")", "self", ".", "hexa", ".", "delete", "(", "0", ",", "\"end\"", ")", "self", ".", "hexa", ".", "insert", "(", "0", ",", "color", ".", "upper", "(", ")", ")", "if", "self", ".", "alpha_channel", ":", "self", ".", "alphabar", ".", "set_color", "(", "(", "r", ",", "g", ",", "b", ")", ")", "self", ".", "hexa", ".", "insert", "(", "'end'", ",", "(", "\"%2.2x\"", "%", "self", ".", "alpha", ".", "get", "(", ")", ")", ".", "upper", "(", ")", ")", "self", ".", "_update_preview", "(", ")" ]
Respond to motion of the color selection cross.
[ "Respond", "to", "motion", "of", "the", "color", "selection", "cross", "." ]
python
train
etingof/pyasn1
pyasn1/type/univ.py
https://github.com/etingof/pyasn1/blob/25cf116ef8d11bb0e08454c0f3635c9f4002c2d6/pyasn1/type/univ.py#L1867-L1897
def isValue(self): """Indicate that |ASN.1| object represents ASN.1 value. If *isValue* is `False` then this object represents just ASN.1 schema. If *isValue* is `True` then, in addition to its ASN.1 schema features, this object can also be used like a Python built-in object (e.g. `int`, `str`, `dict` etc.). Returns ------- : :class:`bool` :class:`False` if object represents just ASN.1 schema. :class:`True` if object represents ASN.1 schema and can be used as a normal value. Note ---- There is an important distinction between PyASN1 schema and value objects. The PyASN1 schema objects can only participate in ASN.1 schema-related operations (e.g. defining or testing the structure of the data). Most obvious uses of ASN.1 schema is to guide serialisation codecs whilst encoding/decoding serialised ASN.1 contents. The PyASN1 value objects can **additionally** participate in many operations involving regular Python objects (e.g. arithmetic, comprehension etc). """ for componentValue in self._componentValues: if componentValue is noValue or not componentValue.isValue: return False return True
[ "def", "isValue", "(", "self", ")", ":", "for", "componentValue", "in", "self", ".", "_componentValues", ":", "if", "componentValue", "is", "noValue", "or", "not", "componentValue", ".", "isValue", ":", "return", "False", "return", "True" ]
Indicate that |ASN.1| object represents ASN.1 value. If *isValue* is `False` then this object represents just ASN.1 schema. If *isValue* is `True` then, in addition to its ASN.1 schema features, this object can also be used like a Python built-in object (e.g. `int`, `str`, `dict` etc.). Returns ------- : :class:`bool` :class:`False` if object represents just ASN.1 schema. :class:`True` if object represents ASN.1 schema and can be used as a normal value. Note ---- There is an important distinction between PyASN1 schema and value objects. The PyASN1 schema objects can only participate in ASN.1 schema-related operations (e.g. defining or testing the structure of the data). Most obvious uses of ASN.1 schema is to guide serialisation codecs whilst encoding/decoding serialised ASN.1 contents. The PyASN1 value objects can **additionally** participate in many operations involving regular Python objects (e.g. arithmetic, comprehension etc).
[ "Indicate", "that", "|ASN", ".", "1|", "object", "represents", "ASN", ".", "1", "value", "." ]
python
train
nilp0inter/cpe
cpe/cpelang2_3.py
https://github.com/nilp0inter/cpe/blob/670d947472a7652af5149324977b50f9a7af9bcf/cpe/cpelang2_3.py#L145-L166
def _unbind(cls, boundname): """ Unbinds a bound form to a WFN. :param string boundname: CPE name :returns: WFN object associated with boundname. :rtype: CPE2_3_WFN """ try: fs = CPE2_3_FS(boundname) except: # CPE name is not formatted string try: uri = CPE2_3_URI(boundname) except: # CPE name is not URI but WFN return CPE2_3_WFN(boundname) else: return CPE2_3_WFN(uri.as_wfn()) else: return CPE2_3_WFN(fs.as_wfn())
[ "def", "_unbind", "(", "cls", ",", "boundname", ")", ":", "try", ":", "fs", "=", "CPE2_3_FS", "(", "boundname", ")", "except", ":", "# CPE name is not formatted string", "try", ":", "uri", "=", "CPE2_3_URI", "(", "boundname", ")", "except", ":", "# CPE name is not URI but WFN", "return", "CPE2_3_WFN", "(", "boundname", ")", "else", ":", "return", "CPE2_3_WFN", "(", "uri", ".", "as_wfn", "(", ")", ")", "else", ":", "return", "CPE2_3_WFN", "(", "fs", ".", "as_wfn", "(", ")", ")" ]
Unbinds a bound form to a WFN. :param string boundname: CPE name :returns: WFN object associated with boundname. :rtype: CPE2_3_WFN
[ "Unbinds", "a", "bound", "form", "to", "a", "WFN", "." ]
python
train
mar10/wsgidav
wsgidav/samples/mysql_dav_provider.py
https://github.com/mar10/wsgidav/blob/cec0d84222fc24bea01be1cea91729001963f172/wsgidav/samples/mysql_dav_provider.py#L295-L302
def set_property_value(self, name, value, dry_run=False): """Set or remove property value. See DAVResource.set_property_value() """ raise DAVError( HTTP_FORBIDDEN, err_condition=PRECONDITION_CODE_ProtectedProperty )
[ "def", "set_property_value", "(", "self", ",", "name", ",", "value", ",", "dry_run", "=", "False", ")", ":", "raise", "DAVError", "(", "HTTP_FORBIDDEN", ",", "err_condition", "=", "PRECONDITION_CODE_ProtectedProperty", ")" ]
Set or remove property value. See DAVResource.set_property_value()
[ "Set", "or", "remove", "property", "value", "." ]
python
valid
Kronuz/pyScss
scss/compiler.py
https://github.com/Kronuz/pyScss/blob/fb32b317f6e2b4b4aad2b86a74844658ac4aa11e/scss/compiler.py#L1295-L1314
def create_css(self, rules): """ Generate the final CSS string """ style = rules[0].legacy_compiler_options.get( 'style', self.compiler.output_style) debug_info = self.compiler.generate_source_map if style == 'legacy': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', False, '', '\n', '\n', '\n', debug_info elif style == 'compressed': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = False, '', '', False, '', '', '', '', False elif style == 'compact': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', '', False, '\n', ' ', '\n', ' ', debug_info elif style == 'expanded': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', False, '\n', '\n', '\n', '\n', debug_info else: # if style == 'nested': sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg = True, ' ', ' ', True, '\n', '\n', '\n', ' ', debug_info return self._create_css(rules, sc, sp, tb, nst, srnl, nl, rnl, lnl, dbg)
[ "def", "create_css", "(", "self", ",", "rules", ")", ":", "style", "=", "rules", "[", "0", "]", ".", "legacy_compiler_options", ".", "get", "(", "'style'", ",", "self", ".", "compiler", ".", "output_style", ")", "debug_info", "=", "self", ".", "compiler", ".", "generate_source_map", "if", "style", "==", "'legacy'", ":", "sc", ",", "sp", ",", "tb", ",", "nst", ",", "srnl", ",", "nl", ",", "rnl", ",", "lnl", ",", "dbg", "=", "True", ",", "' '", ",", "' '", ",", "False", ",", "''", ",", "'\\n'", ",", "'\\n'", ",", "'\\n'", ",", "debug_info", "elif", "style", "==", "'compressed'", ":", "sc", ",", "sp", ",", "tb", ",", "nst", ",", "srnl", ",", "nl", ",", "rnl", ",", "lnl", ",", "dbg", "=", "False", ",", "''", ",", "''", ",", "False", ",", "''", ",", "''", ",", "''", ",", "''", ",", "False", "elif", "style", "==", "'compact'", ":", "sc", ",", "sp", ",", "tb", ",", "nst", ",", "srnl", ",", "nl", ",", "rnl", ",", "lnl", ",", "dbg", "=", "True", ",", "' '", ",", "''", ",", "False", ",", "'\\n'", ",", "' '", ",", "'\\n'", ",", "' '", ",", "debug_info", "elif", "style", "==", "'expanded'", ":", "sc", ",", "sp", ",", "tb", ",", "nst", ",", "srnl", ",", "nl", ",", "rnl", ",", "lnl", ",", "dbg", "=", "True", ",", "' '", ",", "' '", ",", "False", ",", "'\\n'", ",", "'\\n'", ",", "'\\n'", ",", "'\\n'", ",", "debug_info", "else", ":", "# if style == 'nested':", "sc", ",", "sp", ",", "tb", ",", "nst", ",", "srnl", ",", "nl", ",", "rnl", ",", "lnl", ",", "dbg", "=", "True", ",", "' '", ",", "' '", ",", "True", ",", "'\\n'", ",", "'\\n'", ",", "'\\n'", ",", "' '", ",", "debug_info", "return", "self", ".", "_create_css", "(", "rules", ",", "sc", ",", "sp", ",", "tb", ",", "nst", ",", "srnl", ",", "nl", ",", "rnl", ",", "lnl", ",", "dbg", ")" ]
Generate the final CSS string
[ "Generate", "the", "final", "CSS", "string" ]
python
train
AmesCornish/buttersink
buttersink/BestDiffs.py
https://github.com/AmesCornish/buttersink/blob/5cc37e30d9f8071fcf3497dca8b8a91b910321ea/buttersink/BestDiffs.py#L155-L270
def _analyzeDontMeasure(self, chunkSize, willMeasureLater, *sinks): """ Figure out the best diffs to use to reach all our required volumes. """ nodes = [None] height = 1 def sortKey(node): if node is None: return None return (node.intermediate, self._totalSize(node)) while len(nodes) > 0: logger.debug("Analyzing %d nodes for height %d...", len(nodes), height) nodes.sort(key=sortKey) for fromNode in nodes: if self._height(fromNode) >= height: continue if fromNode is not None and fromNode.diffSize is None: continue fromVol = fromNode.volume if fromNode else None logger.debug("Following edges from %s", fromVol) for sink in sinks: # logger.debug( # "Listing edges in %s", # sink # ) for edge in sink.getEdges(fromVol): toVol = edge.toVol # logger.debug("Edge: %s", edge) # Skip any edges already in the destination if sink != self.dest and self.dest.hasEdge(edge): continue if toVol in self.nodes: toNode = self.nodes[toVol] # Don't transfer any edges we won't need in the destination # elif sink != self.dest: # logger.debug("Won't transfer unnecessary %s", edge) # continue else: toNode = _Node(toVol, True) self.nodes[toVol] = toNode logger.debug("Considering %s", edge) edgeSize = edge.size if edge.sizeIsEstimated: if willMeasureLater: # Slight preference for accurate sizes edgeSize *= 1.2 else: # Large preference for accurate sizes edgeSize *= 2 newCost = self._cost(sink, edgeSize, fromNode, height) if toNode.diff is None: oldCost = None else: oldCost = self._cost( toNode.sink, toNode.diffSize, self._getNode(toNode.previous), self._height(toNode) ) # Don't use a more-expensive path if oldCost is not None and oldCost <= newCost: continue # Don't create circular paths if self._wouldLoop(fromVol, toVol): # logger.debug("Ignoring looping edge: %s", toVol.display(sink)) continue # if measureSize and sink != self.dest and edge.sizeIsEstimated: # sink.measureSize(edge, chunkSize) # newCost = self._cost(sink, edge.size, fromSize, height) # if oldCost is not None and oldCost <= newCost: # continue logger.debug( "Replacing edge (%s -> %s cost)\n%s", humanize(oldCost), humanize(newCost), toNode.display(sink) ) # logger.debug("Cost elements: %s", dict( # sink=str(sink), # edgeSize=humanize(edgeSize), # fromSize=humanize(fromSize), # height=height, # )) toNode.diff = edge nodes = [node for node in self.nodes.values() if self._height(node) == height] height += 1 self._prune() for node in self.nodes.values(): node.height = self._height(node) if node.diff is None: logger.error( "No source diffs for %s", node.volume.display(sinks[-1], detail="line"), )
[ "def", "_analyzeDontMeasure", "(", "self", ",", "chunkSize", ",", "willMeasureLater", ",", "*", "sinks", ")", ":", "nodes", "=", "[", "None", "]", "height", "=", "1", "def", "sortKey", "(", "node", ")", ":", "if", "node", "is", "None", ":", "return", "None", "return", "(", "node", ".", "intermediate", ",", "self", ".", "_totalSize", "(", "node", ")", ")", "while", "len", "(", "nodes", ")", ">", "0", ":", "logger", ".", "debug", "(", "\"Analyzing %d nodes for height %d...\"", ",", "len", "(", "nodes", ")", ",", "height", ")", "nodes", ".", "sort", "(", "key", "=", "sortKey", ")", "for", "fromNode", "in", "nodes", ":", "if", "self", ".", "_height", "(", "fromNode", ")", ">=", "height", ":", "continue", "if", "fromNode", "is", "not", "None", "and", "fromNode", ".", "diffSize", "is", "None", ":", "continue", "fromVol", "=", "fromNode", ".", "volume", "if", "fromNode", "else", "None", "logger", ".", "debug", "(", "\"Following edges from %s\"", ",", "fromVol", ")", "for", "sink", "in", "sinks", ":", "# logger.debug(", "# \"Listing edges in %s\",", "# sink", "# )", "for", "edge", "in", "sink", ".", "getEdges", "(", "fromVol", ")", ":", "toVol", "=", "edge", ".", "toVol", "# logger.debug(\"Edge: %s\", edge)", "# Skip any edges already in the destination", "if", "sink", "!=", "self", ".", "dest", "and", "self", ".", "dest", ".", "hasEdge", "(", "edge", ")", ":", "continue", "if", "toVol", "in", "self", ".", "nodes", ":", "toNode", "=", "self", ".", "nodes", "[", "toVol", "]", "# Don't transfer any edges we won't need in the destination", "# elif sink != self.dest:", "# logger.debug(\"Won't transfer unnecessary %s\", edge)", "# continue", "else", ":", "toNode", "=", "_Node", "(", "toVol", ",", "True", ")", "self", ".", "nodes", "[", "toVol", "]", "=", "toNode", "logger", ".", "debug", "(", "\"Considering %s\"", ",", "edge", ")", "edgeSize", "=", "edge", ".", "size", "if", "edge", ".", "sizeIsEstimated", ":", "if", "willMeasureLater", ":", "# Slight preference for accurate sizes", "edgeSize", "*=", "1.2", "else", ":", "# Large preference for accurate sizes", "edgeSize", "*=", "2", "newCost", "=", "self", ".", "_cost", "(", "sink", ",", "edgeSize", ",", "fromNode", ",", "height", ")", "if", "toNode", ".", "diff", "is", "None", ":", "oldCost", "=", "None", "else", ":", "oldCost", "=", "self", ".", "_cost", "(", "toNode", ".", "sink", ",", "toNode", ".", "diffSize", ",", "self", ".", "_getNode", "(", "toNode", ".", "previous", ")", ",", "self", ".", "_height", "(", "toNode", ")", ")", "# Don't use a more-expensive path", "if", "oldCost", "is", "not", "None", "and", "oldCost", "<=", "newCost", ":", "continue", "# Don't create circular paths", "if", "self", ".", "_wouldLoop", "(", "fromVol", ",", "toVol", ")", ":", "# logger.debug(\"Ignoring looping edge: %s\", toVol.display(sink))", "continue", "# if measureSize and sink != self.dest and edge.sizeIsEstimated:", "# sink.measureSize(edge, chunkSize)", "# newCost = self._cost(sink, edge.size, fromSize, height)", "# if oldCost is not None and oldCost <= newCost:", "# continue", "logger", ".", "debug", "(", "\"Replacing edge (%s -> %s cost)\\n%s\"", ",", "humanize", "(", "oldCost", ")", ",", "humanize", "(", "newCost", ")", ",", "toNode", ".", "display", "(", "sink", ")", ")", "# logger.debug(\"Cost elements: %s\", dict(", "# sink=str(sink),", "# edgeSize=humanize(edgeSize),", "# fromSize=humanize(fromSize),", "# height=height,", "# ))", "toNode", ".", "diff", "=", "edge", "nodes", "=", "[", "node", "for", "node", "in", "self", ".", "nodes", ".", "values", "(", ")", "if", "self", ".", "_height", "(", "node", ")", "==", "height", "]", "height", "+=", "1", "self", ".", "_prune", "(", ")", "for", "node", "in", "self", ".", "nodes", ".", "values", "(", ")", ":", "node", ".", "height", "=", "self", ".", "_height", "(", "node", ")", "if", "node", ".", "diff", "is", "None", ":", "logger", ".", "error", "(", "\"No source diffs for %s\"", ",", "node", ".", "volume", ".", "display", "(", "sinks", "[", "-", "1", "]", ",", "detail", "=", "\"line\"", ")", ",", ")" ]
Figure out the best diffs to use to reach all our required volumes.
[ "Figure", "out", "the", "best", "diffs", "to", "use", "to", "reach", "all", "our", "required", "volumes", "." ]
python
train
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/message.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/message.py#L407-L430
def update(collection_name, upsert, multi, spec, doc, safe, last_error_args, check_keys, opts): """Get an **update** message. """ options = 0 if upsert: options += 1 if multi: options += 2 data = _ZERO_32 data += bson._make_c_string(collection_name) data += struct.pack("<i", options) data += bson.BSON.encode(spec, False, opts) encoded = bson.BSON.encode(doc, check_keys, opts) data += encoded if safe: (_, update_message) = __pack_message(2001, data) (request_id, error_message, _) = __last_error(collection_name, last_error_args) return (request_id, update_message + error_message, len(encoded)) else: (request_id, update_message) = __pack_message(2001, data) return (request_id, update_message, len(encoded))
[ "def", "update", "(", "collection_name", ",", "upsert", ",", "multi", ",", "spec", ",", "doc", ",", "safe", ",", "last_error_args", ",", "check_keys", ",", "opts", ")", ":", "options", "=", "0", "if", "upsert", ":", "options", "+=", "1", "if", "multi", ":", "options", "+=", "2", "data", "=", "_ZERO_32", "data", "+=", "bson", ".", "_make_c_string", "(", "collection_name", ")", "data", "+=", "struct", ".", "pack", "(", "\"<i\"", ",", "options", ")", "data", "+=", "bson", ".", "BSON", ".", "encode", "(", "spec", ",", "False", ",", "opts", ")", "encoded", "=", "bson", ".", "BSON", ".", "encode", "(", "doc", ",", "check_keys", ",", "opts", ")", "data", "+=", "encoded", "if", "safe", ":", "(", "_", ",", "update_message", ")", "=", "__pack_message", "(", "2001", ",", "data", ")", "(", "request_id", ",", "error_message", ",", "_", ")", "=", "__last_error", "(", "collection_name", ",", "last_error_args", ")", "return", "(", "request_id", ",", "update_message", "+", "error_message", ",", "len", "(", "encoded", ")", ")", "else", ":", "(", "request_id", ",", "update_message", ")", "=", "__pack_message", "(", "2001", ",", "data", ")", "return", "(", "request_id", ",", "update_message", ",", "len", "(", "encoded", ")", ")" ]
Get an **update** message.
[ "Get", "an", "**", "update", "**", "message", "." ]
python
train
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_vswitch.py#L1143-L1155
def vcenter_credentials_password(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id_key = ET.SubElement(vcenter, "id") id_key.text = kwargs.pop('id') credentials = ET.SubElement(vcenter, "credentials") password = ET.SubElement(credentials, "password") password.text = kwargs.pop('password') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "vcenter_credentials_password", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "vcenter", "=", "ET", ".", "SubElement", "(", "config", ",", "\"vcenter\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-vswitch\"", ")", "id_key", "=", "ET", ".", "SubElement", "(", "vcenter", ",", "\"id\"", ")", "id_key", ".", "text", "=", "kwargs", ".", "pop", "(", "'id'", ")", "credentials", "=", "ET", ".", "SubElement", "(", "vcenter", ",", "\"credentials\"", ")", "password", "=", "ET", ".", "SubElement", "(", "credentials", ",", "\"password\"", ")", "password", ".", "text", "=", "kwargs", ".", "pop", "(", "'password'", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
paylogic/pip-accel
pip_accel/req.py
https://github.com/paylogic/pip-accel/blob/ccad1b784927a322d996db593403b1d2d2e22666/pip_accel/req.py#L125-L136
def last_modified(self): """ The last modified time of the requirement's source distribution archive(s) (a number). The value of this property is based on the :attr:`related_archives` property. If no related archives are found the current time is reported. In the balance between not invalidating cached binary distributions enough and invalidating them too frequently, this property causes the latter to happen. """ mtimes = list(map(os.path.getmtime, self.related_archives)) return max(mtimes) if mtimes else time.time()
[ "def", "last_modified", "(", "self", ")", ":", "mtimes", "=", "list", "(", "map", "(", "os", ".", "path", ".", "getmtime", ",", "self", ".", "related_archives", ")", ")", "return", "max", "(", "mtimes", ")", "if", "mtimes", "else", "time", ".", "time", "(", ")" ]
The last modified time of the requirement's source distribution archive(s) (a number). The value of this property is based on the :attr:`related_archives` property. If no related archives are found the current time is reported. In the balance between not invalidating cached binary distributions enough and invalidating them too frequently, this property causes the latter to happen.
[ "The", "last", "modified", "time", "of", "the", "requirement", "s", "source", "distribution", "archive", "(", "s", ")", "(", "a", "number", ")", "." ]
python
train
garnaat/kappa
kappa/context.py
https://github.com/garnaat/kappa/blob/46709b6b790fead13294c2c18ffa5d63ea5133c7/kappa/context.py#L157-L178
def set_logger(self, logger_name, level=logging.INFO): """ Convenience function to quickly configure full debug output to go to the console. """ log = logging.getLogger(logger_name) log.setLevel(level) ch = logging.StreamHandler(None) ch.setLevel(level) # create formatter if level == logging.INFO: formatter = logging.Formatter(InfoFmtString) else: formatter = logging.Formatter(DebugFmtString) # add formatter to ch ch.setFormatter(formatter) # add ch to logger log.addHandler(ch)
[ "def", "set_logger", "(", "self", ",", "logger_name", ",", "level", "=", "logging", ".", "INFO", ")", ":", "log", "=", "logging", ".", "getLogger", "(", "logger_name", ")", "log", ".", "setLevel", "(", "level", ")", "ch", "=", "logging", ".", "StreamHandler", "(", "None", ")", "ch", ".", "setLevel", "(", "level", ")", "# create formatter", "if", "level", "==", "logging", ".", "INFO", ":", "formatter", "=", "logging", ".", "Formatter", "(", "InfoFmtString", ")", "else", ":", "formatter", "=", "logging", ".", "Formatter", "(", "DebugFmtString", ")", "# add formatter to ch", "ch", ".", "setFormatter", "(", "formatter", ")", "# add ch to logger", "log", ".", "addHandler", "(", "ch", ")" ]
Convenience function to quickly configure full debug output to go to the console.
[ "Convenience", "function", "to", "quickly", "configure", "full", "debug", "output", "to", "go", "to", "the", "console", "." ]
python
train
jtwhite79/pyemu
pyemu/prototypes/da.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/prototypes/da.py#L162-L179
def forecast(self,parensemble=None): """for the enkf formulation, this simply moves the ensemble forward by running the model once for each realization""" if parensemble is None: parensemble = self.parensemble self.logger.log("evaluating ensemble") failed_runs, obsensemble = self._calc_obs(parensemble) if failed_runs is not None: self.logger.warn("dropping failed realizations") parensemble.loc[failed_runs, :] = np.NaN parensemble = parensemble.dropna() obsensemble.loc[failed_runs, :] = np.NaN obsensemble = obsensemble.dropna() self.logger.log("evaluating ensemble") return obsensemble
[ "def", "forecast", "(", "self", ",", "parensemble", "=", "None", ")", ":", "if", "parensemble", "is", "None", ":", "parensemble", "=", "self", ".", "parensemble", "self", ".", "logger", ".", "log", "(", "\"evaluating ensemble\"", ")", "failed_runs", ",", "obsensemble", "=", "self", ".", "_calc_obs", "(", "parensemble", ")", "if", "failed_runs", "is", "not", "None", ":", "self", ".", "logger", ".", "warn", "(", "\"dropping failed realizations\"", ")", "parensemble", ".", "loc", "[", "failed_runs", ",", ":", "]", "=", "np", ".", "NaN", "parensemble", "=", "parensemble", ".", "dropna", "(", ")", "obsensemble", ".", "loc", "[", "failed_runs", ",", ":", "]", "=", "np", ".", "NaN", "obsensemble", "=", "obsensemble", ".", "dropna", "(", ")", "self", ".", "logger", ".", "log", "(", "\"evaluating ensemble\"", ")", "return", "obsensemble" ]
for the enkf formulation, this simply moves the ensemble forward by running the model once for each realization
[ "for", "the", "enkf", "formulation", "this", "simply", "moves", "the", "ensemble", "forward", "by", "running", "the", "model", "once", "for", "each", "realization" ]
python
train
tensorflow/cleverhans
examples/nips17_adversarial_competition/eval_infra/code/worker.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/eval_infra/code/worker.py#L462-L489
def run(self, input_dir, output_file_path): """Runs defense inside Docker. Args: input_dir: directory with input (adversarial images). output_file_path: path of the output file. Returns: how long it took to run submission in seconds """ logging.info('Running defense %s', self.submission_id) tmp_run_dir = self.temp_copy_extracted_submission() output_dir = os.path.dirname(output_file_path) output_filename = os.path.basename(output_file_path) cmd = ['--network=none', '-m=24g', '--cpus=3.75', '-v', '{0}:/input_images:ro'.format(input_dir), '-v', '{0}:/output_data'.format(output_dir), '-v', '{0}:/code'.format(tmp_run_dir), '-w', '/code', self.container_name, './' + self.entry_point, '/input_images', '/output_data/' + output_filename] elapsed_time_sec = self.run_with_time_limit(cmd) sudo_remove_dirtree(tmp_run_dir) return elapsed_time_sec
[ "def", "run", "(", "self", ",", "input_dir", ",", "output_file_path", ")", ":", "logging", ".", "info", "(", "'Running defense %s'", ",", "self", ".", "submission_id", ")", "tmp_run_dir", "=", "self", ".", "temp_copy_extracted_submission", "(", ")", "output_dir", "=", "os", ".", "path", ".", "dirname", "(", "output_file_path", ")", "output_filename", "=", "os", ".", "path", ".", "basename", "(", "output_file_path", ")", "cmd", "=", "[", "'--network=none'", ",", "'-m=24g'", ",", "'--cpus=3.75'", ",", "'-v'", ",", "'{0}:/input_images:ro'", ".", "format", "(", "input_dir", ")", ",", "'-v'", ",", "'{0}:/output_data'", ".", "format", "(", "output_dir", ")", ",", "'-v'", ",", "'{0}:/code'", ".", "format", "(", "tmp_run_dir", ")", ",", "'-w'", ",", "'/code'", ",", "self", ".", "container_name", ",", "'./'", "+", "self", ".", "entry_point", ",", "'/input_images'", ",", "'/output_data/'", "+", "output_filename", "]", "elapsed_time_sec", "=", "self", ".", "run_with_time_limit", "(", "cmd", ")", "sudo_remove_dirtree", "(", "tmp_run_dir", ")", "return", "elapsed_time_sec" ]
Runs defense inside Docker. Args: input_dir: directory with input (adversarial images). output_file_path: path of the output file. Returns: how long it took to run submission in seconds
[ "Runs", "defense", "inside", "Docker", "." ]
python
train
kapot65/python-df-parser
dfparser/rsh_parser.py
https://github.com/kapot65/python-df-parser/blob/bb3eec0fb7ca85d72cb1d9ed7415efe074594f26/dfparser/rsh_parser.py#L170-L272
def serialize_to_rsb(params: dict) -> bytes: """Сериализация JSON хедера rsb. @params -- параметры в формате JSON (dfparser.def_values.DEF_RSH_PARAMS) @return -- бинарный хедер (2048 bytes) """ header = bytearray(np.zeros(2048, np.byte).tostring()) if "text_header_size" in params: header[0:4] = struct.pack('I', params["text_header_size"]) if "events_num" in params: header[8:12] = struct.pack('i', params["events_num"]) if "start_time" in params: start_time = dateutil.parser.parse(params["start_time"]).timestamp() header[16:24] = struct.pack('Q', int(start_time)) if "end_time" in params: end_time = dateutil.parser.parse(params["end_time"]).timestamp() header[24:32] = struct.pack('Q', int(end_time)) header[32:32 + len(params["filepath"]) ] = params['filepath'].encode('cp1251') header[288:292] = struct.pack('i', params["num_blocks"]) header[292:296] = struct.pack('i', int(params["aquisition_time"])) header[296:300] = struct.pack('i', params["blocks_in_file"]) header[300:304] = struct.pack('i', int(params["waitTime"])) header[312:320] = struct.pack('d', params["threshold"]) sync_params = params["synchro_control"] sync_params_num = len(sync_params) header[336:340] = struct.pack('I', sync_params_num) for i in range(sync_params_num): if sync_params[i] == 'Default': code = 0 else: code = synchro_control[sync_params[i]] header[320 + i * 4:320 + (i + 1) * 4] = struct.pack('I', code) header[344:352] = struct.pack('d', params["sample_freq"]) header[352:356] = struct.pack('I', params["pre_history"]) header[356:360] = struct.pack('i', params["packet_number"]) header[360:364] = struct.pack('I', params["b_size"]) header[364:368] = struct.pack('I', params["hysteresis"]) header[368:372] = struct.pack('I', params["channel_number"]) for i in range(params["channel_number"]): off = 372 + 56 * i ch_param = params['channel'][i] header[off + 44: off + 52] = struct.pack('d', ch_param["adjustment"]) header[off + 52: off + 56] = struct.pack('I', ch_param["gain"]) header[off + 36: off + 40] = struct.pack('I', len(ch_param['params'])) for j, param in enumerate(ch_param['params']): if param == 'Default': code = 0 else: code = channel_control[param] header[off + 4 + j * 4: off + 4 + (j + 1) * 4] = struct.pack('I', code) synchro_channel = params['synchro_channel'] header[632:636] = struct.pack('I', len(synchro_channel['params'])) for i, param in enumerate(synchro_channel['params']): if param == 'Default': code = 0 else: code = synchro_channel_control[param] header[600 + i * 4: 600 + (i + 1) * 4] = struct.pack('I', code) sync_type = synchro_channel_types[synchro_channel['type']] header[304:308] = struct.pack('I', sync_type) header[636:640] = struct.pack('I', synchro_channel["gain"]) if "err_lang" in params: header[640:644] = struct.pack('I', params["err_lang"]) if "board_name" in params: header[644:644 + len(params["board_name"])] = \ params['board_name'].encode('cp1251') if "board_id" in params: header[900: 904] = struct.pack('I', params["board_id"]) return bytes(header)
[ "def", "serialize_to_rsb", "(", "params", ":", "dict", ")", "->", "bytes", ":", "header", "=", "bytearray", "(", "np", ".", "zeros", "(", "2048", ",", "np", ".", "byte", ")", ".", "tostring", "(", ")", ")", "if", "\"text_header_size\"", "in", "params", ":", "header", "[", "0", ":", "4", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "params", "[", "\"text_header_size\"", "]", ")", "if", "\"events_num\"", "in", "params", ":", "header", "[", "8", ":", "12", "]", "=", "struct", ".", "pack", "(", "'i'", ",", "params", "[", "\"events_num\"", "]", ")", "if", "\"start_time\"", "in", "params", ":", "start_time", "=", "dateutil", ".", "parser", ".", "parse", "(", "params", "[", "\"start_time\"", "]", ")", ".", "timestamp", "(", ")", "header", "[", "16", ":", "24", "]", "=", "struct", ".", "pack", "(", "'Q'", ",", "int", "(", "start_time", ")", ")", "if", "\"end_time\"", "in", "params", ":", "end_time", "=", "dateutil", ".", "parser", ".", "parse", "(", "params", "[", "\"end_time\"", "]", ")", ".", "timestamp", "(", ")", "header", "[", "24", ":", "32", "]", "=", "struct", ".", "pack", "(", "'Q'", ",", "int", "(", "end_time", ")", ")", "header", "[", "32", ":", "32", "+", "len", "(", "params", "[", "\"filepath\"", "]", ")", "]", "=", "params", "[", "'filepath'", "]", ".", "encode", "(", "'cp1251'", ")", "header", "[", "288", ":", "292", "]", "=", "struct", ".", "pack", "(", "'i'", ",", "params", "[", "\"num_blocks\"", "]", ")", "header", "[", "292", ":", "296", "]", "=", "struct", ".", "pack", "(", "'i'", ",", "int", "(", "params", "[", "\"aquisition_time\"", "]", ")", ")", "header", "[", "296", ":", "300", "]", "=", "struct", ".", "pack", "(", "'i'", ",", "params", "[", "\"blocks_in_file\"", "]", ")", "header", "[", "300", ":", "304", "]", "=", "struct", ".", "pack", "(", "'i'", ",", "int", "(", "params", "[", "\"waitTime\"", "]", ")", ")", "header", "[", "312", ":", "320", "]", "=", "struct", ".", "pack", "(", "'d'", ",", "params", "[", "\"threshold\"", "]", ")", "sync_params", "=", "params", "[", "\"synchro_control\"", "]", "sync_params_num", "=", "len", "(", "sync_params", ")", "header", "[", "336", ":", "340", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "sync_params_num", ")", "for", "i", "in", "range", "(", "sync_params_num", ")", ":", "if", "sync_params", "[", "i", "]", "==", "'Default'", ":", "code", "=", "0", "else", ":", "code", "=", "synchro_control", "[", "sync_params", "[", "i", "]", "]", "header", "[", "320", "+", "i", "*", "4", ":", "320", "+", "(", "i", "+", "1", ")", "*", "4", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "code", ")", "header", "[", "344", ":", "352", "]", "=", "struct", ".", "pack", "(", "'d'", ",", "params", "[", "\"sample_freq\"", "]", ")", "header", "[", "352", ":", "356", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "params", "[", "\"pre_history\"", "]", ")", "header", "[", "356", ":", "360", "]", "=", "struct", ".", "pack", "(", "'i'", ",", "params", "[", "\"packet_number\"", "]", ")", "header", "[", "360", ":", "364", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "params", "[", "\"b_size\"", "]", ")", "header", "[", "364", ":", "368", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "params", "[", "\"hysteresis\"", "]", ")", "header", "[", "368", ":", "372", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "params", "[", "\"channel_number\"", "]", ")", "for", "i", "in", "range", "(", "params", "[", "\"channel_number\"", "]", ")", ":", "off", "=", "372", "+", "56", "*", "i", "ch_param", "=", "params", "[", "'channel'", "]", "[", "i", "]", "header", "[", "off", "+", "44", ":", "off", "+", "52", "]", "=", "struct", ".", "pack", "(", "'d'", ",", "ch_param", "[", "\"adjustment\"", "]", ")", "header", "[", "off", "+", "52", ":", "off", "+", "56", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "ch_param", "[", "\"gain\"", "]", ")", "header", "[", "off", "+", "36", ":", "off", "+", "40", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "len", "(", "ch_param", "[", "'params'", "]", ")", ")", "for", "j", ",", "param", "in", "enumerate", "(", "ch_param", "[", "'params'", "]", ")", ":", "if", "param", "==", "'Default'", ":", "code", "=", "0", "else", ":", "code", "=", "channel_control", "[", "param", "]", "header", "[", "off", "+", "4", "+", "j", "*", "4", ":", "off", "+", "4", "+", "(", "j", "+", "1", ")", "*", "4", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "code", ")", "synchro_channel", "=", "params", "[", "'synchro_channel'", "]", "header", "[", "632", ":", "636", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "len", "(", "synchro_channel", "[", "'params'", "]", ")", ")", "for", "i", ",", "param", "in", "enumerate", "(", "synchro_channel", "[", "'params'", "]", ")", ":", "if", "param", "==", "'Default'", ":", "code", "=", "0", "else", ":", "code", "=", "synchro_channel_control", "[", "param", "]", "header", "[", "600", "+", "i", "*", "4", ":", "600", "+", "(", "i", "+", "1", ")", "*", "4", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "code", ")", "sync_type", "=", "synchro_channel_types", "[", "synchro_channel", "[", "'type'", "]", "]", "header", "[", "304", ":", "308", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "sync_type", ")", "header", "[", "636", ":", "640", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "synchro_channel", "[", "\"gain\"", "]", ")", "if", "\"err_lang\"", "in", "params", ":", "header", "[", "640", ":", "644", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "params", "[", "\"err_lang\"", "]", ")", "if", "\"board_name\"", "in", "params", ":", "header", "[", "644", ":", "644", "+", "len", "(", "params", "[", "\"board_name\"", "]", ")", "]", "=", "params", "[", "'board_name'", "]", ".", "encode", "(", "'cp1251'", ")", "if", "\"board_id\"", "in", "params", ":", "header", "[", "900", ":", "904", "]", "=", "struct", ".", "pack", "(", "'I'", ",", "params", "[", "\"board_id\"", "]", ")", "return", "bytes", "(", "header", ")" ]
Сериализация JSON хедера rsb. @params -- параметры в формате JSON (dfparser.def_values.DEF_RSH_PARAMS) @return -- бинарный хедер (2048 bytes)
[ "Сериализация", "JSON", "хедера", "rsb", "." ]
python
train
Robpol86/libnl
libnl/linux_private/netlink.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/linux_private/netlink.py#L71-L73
def nl_family(self, value): """Family setter.""" self.bytearray[self._get_slicers(0)] = bytearray(c_uint(value or 0))
[ "def", "nl_family", "(", "self", ",", "value", ")", ":", "self", ".", "bytearray", "[", "self", ".", "_get_slicers", "(", "0", ")", "]", "=", "bytearray", "(", "c_uint", "(", "value", "or", "0", ")", ")" ]
Family setter.
[ "Family", "setter", "." ]
python
train
alexa/alexa-skills-kit-sdk-for-python
ask-sdk-runtime/ask_sdk_runtime/skill_builder.py
https://github.com/alexa/alexa-skills-kit-sdk-for-python/blob/097b6406aa12d5ca0b825b00c936861b530cbf39/ask-sdk-runtime/ask_sdk_runtime/skill_builder.py#L138-L180
def exception_handler(self, can_handle_func): # type: (Callable[[Input, Exception], bool]) -> Callable """Decorator that can be used to add exception handlers easily to the builder. The can_handle_func has to be a Callable instance, which takes two parameters and no varargs or kwargs. This is because of the ExceptionHandler class signature restrictions. The returned wrapper function can be applied as a decorator on any function that processes the exception raised during dispatcher and returns a response object by the skill. The function should follow the signature of the handle function in :py:class:`ask_sdk_runtime.dispatch_components.exception_components.AbstractExceptionHandler` class. :param can_handle_func: The function that validates if the exception can be handled. :type can_handle_func: Callable[[Input, Exception], bool] :return: Wrapper function that can be decorated on a handle function. """ def wrapper(handle_func): if not callable(can_handle_func) or not callable(handle_func): raise SkillBuilderException( "Exception Handler can_handle_func and handle_func input " "parameters should be callable") class_attributes = { "can_handle": ( lambda self, handler_input, exception: can_handle_func( handler_input, exception)), "handle": lambda self, handler_input, exception: handle_func( handler_input, exception) } exception_handler_class = type( "ExceptionHandler{}".format( handle_func.__name__.title().replace("_", "")), (AbstractExceptionHandler,), class_attributes) self.add_exception_handler( exception_handler=exception_handler_class()) return wrapper
[ "def", "exception_handler", "(", "self", ",", "can_handle_func", ")", ":", "# type: (Callable[[Input, Exception], bool]) -> Callable", "def", "wrapper", "(", "handle_func", ")", ":", "if", "not", "callable", "(", "can_handle_func", ")", "or", "not", "callable", "(", "handle_func", ")", ":", "raise", "SkillBuilderException", "(", "\"Exception Handler can_handle_func and handle_func input \"", "\"parameters should be callable\"", ")", "class_attributes", "=", "{", "\"can_handle\"", ":", "(", "lambda", "self", ",", "handler_input", ",", "exception", ":", "can_handle_func", "(", "handler_input", ",", "exception", ")", ")", ",", "\"handle\"", ":", "lambda", "self", ",", "handler_input", ",", "exception", ":", "handle_func", "(", "handler_input", ",", "exception", ")", "}", "exception_handler_class", "=", "type", "(", "\"ExceptionHandler{}\"", ".", "format", "(", "handle_func", ".", "__name__", ".", "title", "(", ")", ".", "replace", "(", "\"_\"", ",", "\"\"", ")", ")", ",", "(", "AbstractExceptionHandler", ",", ")", ",", "class_attributes", ")", "self", ".", "add_exception_handler", "(", "exception_handler", "=", "exception_handler_class", "(", ")", ")", "return", "wrapper" ]
Decorator that can be used to add exception handlers easily to the builder. The can_handle_func has to be a Callable instance, which takes two parameters and no varargs or kwargs. This is because of the ExceptionHandler class signature restrictions. The returned wrapper function can be applied as a decorator on any function that processes the exception raised during dispatcher and returns a response object by the skill. The function should follow the signature of the handle function in :py:class:`ask_sdk_runtime.dispatch_components.exception_components.AbstractExceptionHandler` class. :param can_handle_func: The function that validates if the exception can be handled. :type can_handle_func: Callable[[Input, Exception], bool] :return: Wrapper function that can be decorated on a handle function.
[ "Decorator", "that", "can", "be", "used", "to", "add", "exception", "handlers", "easily", "to", "the", "builder", "." ]
python
train
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L637-L643
def print_variables_info(self, output_file=sys.stdout): """Print variables information in human readble format.""" table = (' name | type size \n' + '---------+-------------------------\n') for name, var_info in list(self.variables.items()): table += '{:>8} | {:>6} {!s:<10}\n'.format(name, var_info[0], var_info[1]) print(prefix_indent('variables: ', table), file=output_file)
[ "def", "print_variables_info", "(", "self", ",", "output_file", "=", "sys", ".", "stdout", ")", ":", "table", "=", "(", "' name | type size \\n'", "+", "'---------+-------------------------\\n'", ")", "for", "name", ",", "var_info", "in", "list", "(", "self", ".", "variables", ".", "items", "(", ")", ")", ":", "table", "+=", "'{:>8} | {:>6} {!s:<10}\\n'", ".", "format", "(", "name", ",", "var_info", "[", "0", "]", ",", "var_info", "[", "1", "]", ")", "print", "(", "prefix_indent", "(", "'variables: '", ",", "table", ")", ",", "file", "=", "output_file", ")" ]
Print variables information in human readble format.
[ "Print", "variables", "information", "in", "human", "readble", "format", "." ]
python
test
pyrogram/pyrogram
pyrogram/client/client.py
https://github.com/pyrogram/pyrogram/blob/e7258a341ba905cfa86264c22040654db732ec1c/pyrogram/client/client.py#L1388-L1519
def save_file(self, path: str, file_id: int = None, file_part: int = 0, progress: callable = None, progress_args: tuple = ()): """Use this method to upload a file onto Telegram servers, without actually sending the message to anyone. This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an InputFile type is required. Args: path (``str``): The path of the file you want to upload that exists on your local machine. file_id (``int``, *optional*): In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk. file_part (``int``, *optional*): In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk. progress (``callable``, *optional*): Pass a callback function to view the upload progress. The function must take *(client, current, total, \*args)* as positional arguments (look at the section below for a detailed description). progress_args (``tuple``, *optional*): Extra custom arguments for the progress callback function. Useful, for example, if you want to pass a chat_id and a message_id in order to edit a message with the updated progress. Other Parameters: client (:obj:`Client <pyrogram.Client>`): The Client itself, useful when you want to call other API methods inside the callback function. current (``int``): The amount of bytes uploaded so far. total (``int``): The size of the file. *args (``tuple``, *optional*): Extra custom arguments as defined in the *progress_args* parameter. You can either keep *\*args* or add every single extra argument in your function signature. Returns: On success, the uploaded file is returned in form of an InputFile object. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error. """ part_size = 512 * 1024 file_size = os.path.getsize(path) if file_size == 0: raise ValueError("File size equals to 0 B") if file_size > 1500 * 1024 * 1024: raise ValueError("Telegram doesn't support uploading files bigger than 1500 MiB") file_total_parts = int(math.ceil(file_size / part_size)) is_big = True if file_size > 10 * 1024 * 1024 else False is_missing_part = True if file_id is not None else False file_id = file_id or self.rnd_id() md5_sum = md5() if not is_big and not is_missing_part else None session = Session(self, self.dc_id, self.auth_key, is_media=True) session.start() try: with open(path, "rb") as f: f.seek(part_size * file_part) while True: chunk = f.read(part_size) if not chunk: if not is_big: md5_sum = "".join([hex(i)[2:].zfill(2) for i in md5_sum.digest()]) break for _ in range(3): if is_big: rpc = functions.upload.SaveBigFilePart( file_id=file_id, file_part=file_part, file_total_parts=file_total_parts, bytes=chunk ) else: rpc = functions.upload.SaveFilePart( file_id=file_id, file_part=file_part, bytes=chunk ) if session.send(rpc): break else: raise AssertionError("Telegram didn't accept chunk #{} of {}".format(file_part, path)) if is_missing_part: return if not is_big: md5_sum.update(chunk) file_part += 1 if progress: progress(self, min(file_part * part_size, file_size), file_size, *progress_args) except Client.StopTransmission: raise except Exception as e: log.error(e, exc_info=True) else: if is_big: return types.InputFileBig( id=file_id, parts=file_total_parts, name=os.path.basename(path), ) else: return types.InputFile( id=file_id, parts=file_total_parts, name=os.path.basename(path), md5_checksum=md5_sum ) finally: session.stop()
[ "def", "save_file", "(", "self", ",", "path", ":", "str", ",", "file_id", ":", "int", "=", "None", ",", "file_part", ":", "int", "=", "0", ",", "progress", ":", "callable", "=", "None", ",", "progress_args", ":", "tuple", "=", "(", ")", ")", ":", "part_size", "=", "512", "*", "1024", "file_size", "=", "os", ".", "path", ".", "getsize", "(", "path", ")", "if", "file_size", "==", "0", ":", "raise", "ValueError", "(", "\"File size equals to 0 B\"", ")", "if", "file_size", ">", "1500", "*", "1024", "*", "1024", ":", "raise", "ValueError", "(", "\"Telegram doesn't support uploading files bigger than 1500 MiB\"", ")", "file_total_parts", "=", "int", "(", "math", ".", "ceil", "(", "file_size", "/", "part_size", ")", ")", "is_big", "=", "True", "if", "file_size", ">", "10", "*", "1024", "*", "1024", "else", "False", "is_missing_part", "=", "True", "if", "file_id", "is", "not", "None", "else", "False", "file_id", "=", "file_id", "or", "self", ".", "rnd_id", "(", ")", "md5_sum", "=", "md5", "(", ")", "if", "not", "is_big", "and", "not", "is_missing_part", "else", "None", "session", "=", "Session", "(", "self", ",", "self", ".", "dc_id", ",", "self", ".", "auth_key", ",", "is_media", "=", "True", ")", "session", ".", "start", "(", ")", "try", ":", "with", "open", "(", "path", ",", "\"rb\"", ")", "as", "f", ":", "f", ".", "seek", "(", "part_size", "*", "file_part", ")", "while", "True", ":", "chunk", "=", "f", ".", "read", "(", "part_size", ")", "if", "not", "chunk", ":", "if", "not", "is_big", ":", "md5_sum", "=", "\"\"", ".", "join", "(", "[", "hex", "(", "i", ")", "[", "2", ":", "]", ".", "zfill", "(", "2", ")", "for", "i", "in", "md5_sum", ".", "digest", "(", ")", "]", ")", "break", "for", "_", "in", "range", "(", "3", ")", ":", "if", "is_big", ":", "rpc", "=", "functions", ".", "upload", ".", "SaveBigFilePart", "(", "file_id", "=", "file_id", ",", "file_part", "=", "file_part", ",", "file_total_parts", "=", "file_total_parts", ",", "bytes", "=", "chunk", ")", "else", ":", "rpc", "=", "functions", ".", "upload", ".", "SaveFilePart", "(", "file_id", "=", "file_id", ",", "file_part", "=", "file_part", ",", "bytes", "=", "chunk", ")", "if", "session", ".", "send", "(", "rpc", ")", ":", "break", "else", ":", "raise", "AssertionError", "(", "\"Telegram didn't accept chunk #{} of {}\"", ".", "format", "(", "file_part", ",", "path", ")", ")", "if", "is_missing_part", ":", "return", "if", "not", "is_big", ":", "md5_sum", ".", "update", "(", "chunk", ")", "file_part", "+=", "1", "if", "progress", ":", "progress", "(", "self", ",", "min", "(", "file_part", "*", "part_size", ",", "file_size", ")", ",", "file_size", ",", "*", "progress_args", ")", "except", "Client", ".", "StopTransmission", ":", "raise", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "e", ",", "exc_info", "=", "True", ")", "else", ":", "if", "is_big", ":", "return", "types", ".", "InputFileBig", "(", "id", "=", "file_id", ",", "parts", "=", "file_total_parts", ",", "name", "=", "os", ".", "path", ".", "basename", "(", "path", ")", ",", ")", "else", ":", "return", "types", ".", "InputFile", "(", "id", "=", "file_id", ",", "parts", "=", "file_total_parts", ",", "name", "=", "os", ".", "path", ".", "basename", "(", "path", ")", ",", "md5_checksum", "=", "md5_sum", ")", "finally", ":", "session", ".", "stop", "(", ")" ]
Use this method to upload a file onto Telegram servers, without actually sending the message to anyone. This is a utility method intended to be used **only** when working with Raw Functions (i.e: a Telegram API method you wish to use which is not available yet in the Client class as an easy-to-use method), whenever an InputFile type is required. Args: path (``str``): The path of the file you want to upload that exists on your local machine. file_id (``int``, *optional*): In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk. file_part (``int``, *optional*): In case a file part expired, pass the file_id and the file_part to retry uploading that specific chunk. progress (``callable``, *optional*): Pass a callback function to view the upload progress. The function must take *(client, current, total, \*args)* as positional arguments (look at the section below for a detailed description). progress_args (``tuple``, *optional*): Extra custom arguments for the progress callback function. Useful, for example, if you want to pass a chat_id and a message_id in order to edit a message with the updated progress. Other Parameters: client (:obj:`Client <pyrogram.Client>`): The Client itself, useful when you want to call other API methods inside the callback function. current (``int``): The amount of bytes uploaded so far. total (``int``): The size of the file. *args (``tuple``, *optional*): Extra custom arguments as defined in the *progress_args* parameter. You can either keep *\*args* or add every single extra argument in your function signature. Returns: On success, the uploaded file is returned in form of an InputFile object. Raises: :class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
[ "Use", "this", "method", "to", "upload", "a", "file", "onto", "Telegram", "servers", "without", "actually", "sending", "the", "message", "to", "anyone", "." ]
python
train
diffeo/rejester
rejester/_registry.py
https://github.com/diffeo/rejester/blob/5438a4a18be2801d7826c46e2079ba9639d2ecb4/rejester/_registry.py#L104-L129
def _acquire_lock(self, identifier, atime=30, ltime=5): '''Acquire a lock for a given identifier. If the lock cannot be obtained immediately, keep trying at random intervals, up to 3 seconds, until `atime` has passed. Once the lock has been obtained, continue to hold it for `ltime`. :param str identifier: lock token to write :param int atime: maximum time (in seconds) to acquire lock :param int ltime: maximum time (in seconds) to own lock :return: `identifier` if the lock was obtained, :const:`False` otherwise ''' conn = redis.Redis(connection_pool=self.pool) end = time.time() + atime while end > time.time(): if conn.set(self._lock_name, identifier, ex=ltime, nx=True): # logger.debug("won lock %s" % self._lock_name) return identifier sleep_time = random.uniform(0, 3) time.sleep(sleep_time) logger.warn('failed to acquire lock %s for %f seconds', self._lock_name, atime) return False
[ "def", "_acquire_lock", "(", "self", ",", "identifier", ",", "atime", "=", "30", ",", "ltime", "=", "5", ")", ":", "conn", "=", "redis", ".", "Redis", "(", "connection_pool", "=", "self", ".", "pool", ")", "end", "=", "time", ".", "time", "(", ")", "+", "atime", "while", "end", ">", "time", ".", "time", "(", ")", ":", "if", "conn", ".", "set", "(", "self", ".", "_lock_name", ",", "identifier", ",", "ex", "=", "ltime", ",", "nx", "=", "True", ")", ":", "# logger.debug(\"won lock %s\" % self._lock_name)", "return", "identifier", "sleep_time", "=", "random", ".", "uniform", "(", "0", ",", "3", ")", "time", ".", "sleep", "(", "sleep_time", ")", "logger", ".", "warn", "(", "'failed to acquire lock %s for %f seconds'", ",", "self", ".", "_lock_name", ",", "atime", ")", "return", "False" ]
Acquire a lock for a given identifier. If the lock cannot be obtained immediately, keep trying at random intervals, up to 3 seconds, until `atime` has passed. Once the lock has been obtained, continue to hold it for `ltime`. :param str identifier: lock token to write :param int atime: maximum time (in seconds) to acquire lock :param int ltime: maximum time (in seconds) to own lock :return: `identifier` if the lock was obtained, :const:`False` otherwise
[ "Acquire", "a", "lock", "for", "a", "given", "identifier", "." ]
python
train
materialsproject/pymatgen
pymatgen/electronic_structure/cohp.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/electronic_structure/cohp.py#L599-L716
def from_file(cls, fmt, filename=None, structure_file=None, are_coops=False): """ Creates a CompleteCohp object from an output file of a COHP calculation. Valid formats are either LMTO (for the Stuttgart LMTO-ASA code) or LOBSTER (for the LOBSTER code). Args: cohp_file: Name of the COHP output file. Defaults to COPL for LMTO and COHPCAR.lobster/COOPCAR.lobster for LOBSTER. are_coops: Indicates whether the populations are COOPs or COHPs. Defaults to False for COHPs. fmt: A string for the code that was used to calculate the COHPs so that the output file can be handled correctly. Can take the values "LMTO" or "LOBSTER". structure_file: Name of the file containing the structure. If no file name is given, use CTRL for LMTO and POSCAR for LOBSTER. Returns: A CompleteCohp object. """ fmt = fmt.upper() if fmt == "LMTO": # LMTO COOPs and orbital-resolved COHP cannot be handled yet. are_coops = False orb_res_cohp = None if structure_file is None: structure_file = "CTRL" if filename is None: filename = "COPL" cohp_file = LMTOCopl(filename=filename, to_eV=True) elif fmt == "LOBSTER": if structure_file is None: structure_file = "POSCAR" if filename is None: filename = "COOPCAR.lobster" if are_coops \ else "COHPCAR.lobster" warnings.warn( "The bond labels are currently consistent with ICOHPLIST.lobster/ICOOPLIST.lobster, not with COHPCAR.lobster/COOPCAR.lobster. Please be aware!") cohp_file = Cohpcar(filename=filename, are_coops=are_coops) orb_res_cohp = cohp_file.orb_res_cohp else: raise ValueError("Unknown format %s. Valid formats are LMTO " "and LOBSTER." % fmt) structure = Structure.from_file(structure_file) efermi = cohp_file.efermi cohp_data = cohp_file.cohp_data energies = cohp_file.energies # Lobster shifts the energies so that the Fermi energy is at zero. # Shifting should be done by the plotter object though. spins = [Spin.up, Spin.down] if cohp_file.is_spin_polarized \ else [Spin.up] if fmt == "LOBSTER": energies += efermi if orb_res_cohp is not None: # If no total COHPs are present, calculate the total # COHPs from the single-orbital populations. Total COHPs # may not be present when the cohpgenerator keyword is used # in LOBSTER versions 2.2.0 and earlier. # TODO: Test this more extensively for label in orb_res_cohp: if cohp_file.cohp_data[label]["COHP"] is None: # print(label) cohp_data[label]["COHP"] = { sp: np.sum([orb_res_cohp[label][orbs]["COHP"][sp] for orbs in orb_res_cohp[label]], axis=0) for sp in spins} if cohp_file.cohp_data[label]["ICOHP"] is None: cohp_data[label]["ICOHP"] = \ {sp: np.sum([orb_res_cohp[label][orbs]["ICOHP"][sp] for orbs in orb_res_cohp[label]], axis=0) for sp in spins} if fmt == "LMTO": # Calculate the average COHP for the LMTO file to be # consistent with LOBSTER output. avg_data = {"COHP": {}, "ICOHP": {}} for i in avg_data: for spin in spins: rows = np.array([cohp_data[label][i][spin] for label in cohp_data]) avg = np.average(rows, axis=0) # LMTO COHPs have 5 significant figures avg_data[i].update({spin: np.array([round_to_sigfigs(a, 5) for a in avg], dtype=float)}) avg_cohp = Cohp(efermi, energies, avg_data["COHP"], icohp=avg_data["ICOHP"]) else: avg_cohp = Cohp(efermi, energies, cohp_data["average"]["COHP"], icohp=cohp_data["average"]["COHP"], are_coops=are_coops) del cohp_data["average"] cohp_dict = {label: Cohp(efermi, energies, cohp_data[label]["COHP"], icohp=cohp_data[label]["ICOHP"], are_coops=are_coops) for label in cohp_data} bond_dict = {label: {"length": cohp_data[label]["length"], "sites": [structure.sites[site] for site in cohp_data[label]["sites"]]} for label in cohp_data} return CompleteCohp(structure, avg_cohp, cohp_dict, bonds=bond_dict, are_coops=are_coops, orb_res_cohp=orb_res_cohp)
[ "def", "from_file", "(", "cls", ",", "fmt", ",", "filename", "=", "None", ",", "structure_file", "=", "None", ",", "are_coops", "=", "False", ")", ":", "fmt", "=", "fmt", ".", "upper", "(", ")", "if", "fmt", "==", "\"LMTO\"", ":", "# LMTO COOPs and orbital-resolved COHP cannot be handled yet.", "are_coops", "=", "False", "orb_res_cohp", "=", "None", "if", "structure_file", "is", "None", ":", "structure_file", "=", "\"CTRL\"", "if", "filename", "is", "None", ":", "filename", "=", "\"COPL\"", "cohp_file", "=", "LMTOCopl", "(", "filename", "=", "filename", ",", "to_eV", "=", "True", ")", "elif", "fmt", "==", "\"LOBSTER\"", ":", "if", "structure_file", "is", "None", ":", "structure_file", "=", "\"POSCAR\"", "if", "filename", "is", "None", ":", "filename", "=", "\"COOPCAR.lobster\"", "if", "are_coops", "else", "\"COHPCAR.lobster\"", "warnings", ".", "warn", "(", "\"The bond labels are currently consistent with ICOHPLIST.lobster/ICOOPLIST.lobster, not with COHPCAR.lobster/COOPCAR.lobster. Please be aware!\"", ")", "cohp_file", "=", "Cohpcar", "(", "filename", "=", "filename", ",", "are_coops", "=", "are_coops", ")", "orb_res_cohp", "=", "cohp_file", ".", "orb_res_cohp", "else", ":", "raise", "ValueError", "(", "\"Unknown format %s. Valid formats are LMTO \"", "\"and LOBSTER.\"", "%", "fmt", ")", "structure", "=", "Structure", ".", "from_file", "(", "structure_file", ")", "efermi", "=", "cohp_file", ".", "efermi", "cohp_data", "=", "cohp_file", ".", "cohp_data", "energies", "=", "cohp_file", ".", "energies", "# Lobster shifts the energies so that the Fermi energy is at zero.", "# Shifting should be done by the plotter object though.", "spins", "=", "[", "Spin", ".", "up", ",", "Spin", ".", "down", "]", "if", "cohp_file", ".", "is_spin_polarized", "else", "[", "Spin", ".", "up", "]", "if", "fmt", "==", "\"LOBSTER\"", ":", "energies", "+=", "efermi", "if", "orb_res_cohp", "is", "not", "None", ":", "# If no total COHPs are present, calculate the total", "# COHPs from the single-orbital populations. Total COHPs", "# may not be present when the cohpgenerator keyword is used", "# in LOBSTER versions 2.2.0 and earlier.", "# TODO: Test this more extensively", "for", "label", "in", "orb_res_cohp", ":", "if", "cohp_file", ".", "cohp_data", "[", "label", "]", "[", "\"COHP\"", "]", "is", "None", ":", "# print(label)", "cohp_data", "[", "label", "]", "[", "\"COHP\"", "]", "=", "{", "sp", ":", "np", ".", "sum", "(", "[", "orb_res_cohp", "[", "label", "]", "[", "orbs", "]", "[", "\"COHP\"", "]", "[", "sp", "]", "for", "orbs", "in", "orb_res_cohp", "[", "label", "]", "]", ",", "axis", "=", "0", ")", "for", "sp", "in", "spins", "}", "if", "cohp_file", ".", "cohp_data", "[", "label", "]", "[", "\"ICOHP\"", "]", "is", "None", ":", "cohp_data", "[", "label", "]", "[", "\"ICOHP\"", "]", "=", "{", "sp", ":", "np", ".", "sum", "(", "[", "orb_res_cohp", "[", "label", "]", "[", "orbs", "]", "[", "\"ICOHP\"", "]", "[", "sp", "]", "for", "orbs", "in", "orb_res_cohp", "[", "label", "]", "]", ",", "axis", "=", "0", ")", "for", "sp", "in", "spins", "}", "if", "fmt", "==", "\"LMTO\"", ":", "# Calculate the average COHP for the LMTO file to be", "# consistent with LOBSTER output.", "avg_data", "=", "{", "\"COHP\"", ":", "{", "}", ",", "\"ICOHP\"", ":", "{", "}", "}", "for", "i", "in", "avg_data", ":", "for", "spin", "in", "spins", ":", "rows", "=", "np", ".", "array", "(", "[", "cohp_data", "[", "label", "]", "[", "i", "]", "[", "spin", "]", "for", "label", "in", "cohp_data", "]", ")", "avg", "=", "np", ".", "average", "(", "rows", ",", "axis", "=", "0", ")", "# LMTO COHPs have 5 significant figures", "avg_data", "[", "i", "]", ".", "update", "(", "{", "spin", ":", "np", ".", "array", "(", "[", "round_to_sigfigs", "(", "a", ",", "5", ")", "for", "a", "in", "avg", "]", ",", "dtype", "=", "float", ")", "}", ")", "avg_cohp", "=", "Cohp", "(", "efermi", ",", "energies", ",", "avg_data", "[", "\"COHP\"", "]", ",", "icohp", "=", "avg_data", "[", "\"ICOHP\"", "]", ")", "else", ":", "avg_cohp", "=", "Cohp", "(", "efermi", ",", "energies", ",", "cohp_data", "[", "\"average\"", "]", "[", "\"COHP\"", "]", ",", "icohp", "=", "cohp_data", "[", "\"average\"", "]", "[", "\"COHP\"", "]", ",", "are_coops", "=", "are_coops", ")", "del", "cohp_data", "[", "\"average\"", "]", "cohp_dict", "=", "{", "label", ":", "Cohp", "(", "efermi", ",", "energies", ",", "cohp_data", "[", "label", "]", "[", "\"COHP\"", "]", ",", "icohp", "=", "cohp_data", "[", "label", "]", "[", "\"ICOHP\"", "]", ",", "are_coops", "=", "are_coops", ")", "for", "label", "in", "cohp_data", "}", "bond_dict", "=", "{", "label", ":", "{", "\"length\"", ":", "cohp_data", "[", "label", "]", "[", "\"length\"", "]", ",", "\"sites\"", ":", "[", "structure", ".", "sites", "[", "site", "]", "for", "site", "in", "cohp_data", "[", "label", "]", "[", "\"sites\"", "]", "]", "}", "for", "label", "in", "cohp_data", "}", "return", "CompleteCohp", "(", "structure", ",", "avg_cohp", ",", "cohp_dict", ",", "bonds", "=", "bond_dict", ",", "are_coops", "=", "are_coops", ",", "orb_res_cohp", "=", "orb_res_cohp", ")" ]
Creates a CompleteCohp object from an output file of a COHP calculation. Valid formats are either LMTO (for the Stuttgart LMTO-ASA code) or LOBSTER (for the LOBSTER code). Args: cohp_file: Name of the COHP output file. Defaults to COPL for LMTO and COHPCAR.lobster/COOPCAR.lobster for LOBSTER. are_coops: Indicates whether the populations are COOPs or COHPs. Defaults to False for COHPs. fmt: A string for the code that was used to calculate the COHPs so that the output file can be handled correctly. Can take the values "LMTO" or "LOBSTER". structure_file: Name of the file containing the structure. If no file name is given, use CTRL for LMTO and POSCAR for LOBSTER. Returns: A CompleteCohp object.
[ "Creates", "a", "CompleteCohp", "object", "from", "an", "output", "file", "of", "a", "COHP", "calculation", ".", "Valid", "formats", "are", "either", "LMTO", "(", "for", "the", "Stuttgart", "LMTO", "-", "ASA", "code", ")", "or", "LOBSTER", "(", "for", "the", "LOBSTER", "code", ")", "." ]
python
train
pypa/pipenv
pipenv/vendor/urllib3/util/request.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/urllib3/util/request.py#L95-L118
def rewind_body(body, body_pos): """ Attempt to rewind body to a certain position. Primarily used for request redirects and retries. :param body: File-like object that supports seek. :param int pos: Position to seek to in file. """ body_seek = getattr(body, 'seek', None) if body_seek is not None and isinstance(body_pos, integer_types): try: body_seek(body_pos) except (IOError, OSError): raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect/retry.") elif body_pos is _FAILEDTELL: raise UnrewindableBodyError("Unable to record file position for rewinding " "request body during a redirect/retry.") else: raise ValueError("body_pos must be of type integer, " "instead it was %s." % type(body_pos))
[ "def", "rewind_body", "(", "body", ",", "body_pos", ")", ":", "body_seek", "=", "getattr", "(", "body", ",", "'seek'", ",", "None", ")", "if", "body_seek", "is", "not", "None", "and", "isinstance", "(", "body_pos", ",", "integer_types", ")", ":", "try", ":", "body_seek", "(", "body_pos", ")", "except", "(", "IOError", ",", "OSError", ")", ":", "raise", "UnrewindableBodyError", "(", "\"An error occurred when rewinding request \"", "\"body for redirect/retry.\"", ")", "elif", "body_pos", "is", "_FAILEDTELL", ":", "raise", "UnrewindableBodyError", "(", "\"Unable to record file position for rewinding \"", "\"request body during a redirect/retry.\"", ")", "else", ":", "raise", "ValueError", "(", "\"body_pos must be of type integer, \"", "\"instead it was %s.\"", "%", "type", "(", "body_pos", ")", ")" ]
Attempt to rewind body to a certain position. Primarily used for request redirects and retries. :param body: File-like object that supports seek. :param int pos: Position to seek to in file.
[ "Attempt", "to", "rewind", "body", "to", "a", "certain", "position", ".", "Primarily", "used", "for", "request", "redirects", "and", "retries", "." ]
python
train
jroyal/pyIDS
pyIDS/ids.py
https://github.com/jroyal/pyIDS/blob/3c2d3ff4bdc7bfe116dfd02152dadd26f92f74b5/pyIDS/ids.py#L82-L92
def get_work_item_by_id(self, wi_id): ''' Retrieves a single work item based off of the supplied ID :param wi_id: The work item ID number :return: Workitem or None ''' work_items = self.get_work_items(id=wi_id) if work_items is not None: return work_items[0] return None
[ "def", "get_work_item_by_id", "(", "self", ",", "wi_id", ")", ":", "work_items", "=", "self", ".", "get_work_items", "(", "id", "=", "wi_id", ")", "if", "work_items", "is", "not", "None", ":", "return", "work_items", "[", "0", "]", "return", "None" ]
Retrieves a single work item based off of the supplied ID :param wi_id: The work item ID number :return: Workitem or None
[ "Retrieves", "a", "single", "work", "item", "based", "off", "of", "the", "supplied", "ID" ]
python
train
apache/incubator-heron
heron/statemgrs/src/python/config.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/statemgrs/src/python/config.py#L45-L50
def validate_state_locations(self): """ Names of all state locations must be unique. """ names = map(lambda loc: loc["name"], self.locations) assert len(names) == len(set(names)), "Names of state locations must be unique"
[ "def", "validate_state_locations", "(", "self", ")", ":", "names", "=", "map", "(", "lambda", "loc", ":", "loc", "[", "\"name\"", "]", ",", "self", ".", "locations", ")", "assert", "len", "(", "names", ")", "==", "len", "(", "set", "(", "names", ")", ")", ",", "\"Names of state locations must be unique\"" ]
Names of all state locations must be unique.
[ "Names", "of", "all", "state", "locations", "must", "be", "unique", "." ]
python
valid
edeposit/edeposit.amqp
edeposit/amqp/amqpdaemon.py
https://github.com/edeposit/edeposit.amqp/blob/7804b52028b90ab96302d54bc2430f88dc2ebf64/edeposit/amqp/amqpdaemon.py#L163-L182
def get_sendback(self, uuid, key): """ Return function for sending progress messages back to original caller. Args: uuid (str): UUID of the received message. key (str): Routing key. Returns: fn reference: Reference to function which takes only one data \ argument. """ def send_back_callback(data): self.sendResponse( serializers.serialize(data), uuid, key ) return send_back_callback
[ "def", "get_sendback", "(", "self", ",", "uuid", ",", "key", ")", ":", "def", "send_back_callback", "(", "data", ")", ":", "self", ".", "sendResponse", "(", "serializers", ".", "serialize", "(", "data", ")", ",", "uuid", ",", "key", ")", "return", "send_back_callback" ]
Return function for sending progress messages back to original caller. Args: uuid (str): UUID of the received message. key (str): Routing key. Returns: fn reference: Reference to function which takes only one data \ argument.
[ "Return", "function", "for", "sending", "progress", "messages", "back", "to", "original", "caller", "." ]
python
train
erdewit/ib_insync
ib_insync/ib.py
https://github.com/erdewit/ib_insync/blob/d0646a482590f5cb7bfddbd1f0870f8c4bc1df80/ib_insync/ib.py#L208-L228
def connect( self, host: str = '127.0.0.1', port: int = 7497, clientId: int = 1, timeout: float = 2): """ Connect to a running TWS or IB gateway application. After the connection is made the client is fully synchronized and ready to serve requests. This method is blocking. Args: host: Host name or IP address. port: Port number. clientId: ID number to use for this client; must be unique per connection. Setting clientId=0 will automatically merge manual TWS trading with this client. timeout: If establishing the connection takes longer than ``timeout`` seconds then the ``asyncio.TimeoutError`` exception is raised. Set to 0 to disable timeout. """ return self._run(self.connectAsync(host, port, clientId, timeout))
[ "def", "connect", "(", "self", ",", "host", ":", "str", "=", "'127.0.0.1'", ",", "port", ":", "int", "=", "7497", ",", "clientId", ":", "int", "=", "1", ",", "timeout", ":", "float", "=", "2", ")", ":", "return", "self", ".", "_run", "(", "self", ".", "connectAsync", "(", "host", ",", "port", ",", "clientId", ",", "timeout", ")", ")" ]
Connect to a running TWS or IB gateway application. After the connection is made the client is fully synchronized and ready to serve requests. This method is blocking. Args: host: Host name or IP address. port: Port number. clientId: ID number to use for this client; must be unique per connection. Setting clientId=0 will automatically merge manual TWS trading with this client. timeout: If establishing the connection takes longer than ``timeout`` seconds then the ``asyncio.TimeoutError`` exception is raised. Set to 0 to disable timeout.
[ "Connect", "to", "a", "running", "TWS", "or", "IB", "gateway", "application", ".", "After", "the", "connection", "is", "made", "the", "client", "is", "fully", "synchronized", "and", "ready", "to", "serve", "requests", "." ]
python
train
tjvr/kurt
kurt/__init__.py
https://github.com/tjvr/kurt/blob/fcccd80cae11dc233f6dd02b40ec9a388c62f259/kurt/__init__.py#L2358-L2371
def save(self, path): """Save the sound to a wave file at the given path. Uses :attr:`Waveform.save`, but if the path ends in a folder instead of a file, the filename is based on the project's :attr:`name`. :returns: Path to the saved file. """ (folder, filename) = os.path.split(path) if not filename: filename = _clean_filename(self.name) path = os.path.join(folder, filename) return self.waveform.save(path)
[ "def", "save", "(", "self", ",", "path", ")", ":", "(", "folder", ",", "filename", ")", "=", "os", ".", "path", ".", "split", "(", "path", ")", "if", "not", "filename", ":", "filename", "=", "_clean_filename", "(", "self", ".", "name", ")", "path", "=", "os", ".", "path", ".", "join", "(", "folder", ",", "filename", ")", "return", "self", ".", "waveform", ".", "save", "(", "path", ")" ]
Save the sound to a wave file at the given path. Uses :attr:`Waveform.save`, but if the path ends in a folder instead of a file, the filename is based on the project's :attr:`name`. :returns: Path to the saved file.
[ "Save", "the", "sound", "to", "a", "wave", "file", "at", "the", "given", "path", "." ]
python
train
UMIACS/qav
qav/validators.py
https://github.com/UMIACS/qav/blob/f92108855f9fcbe3ccea5fc6f683bd90a6e18e1b/qav/validators.py#L369-L383
def validate(self, value): """ Return True if the choice is an integer; False otherwise. If the value was cast successfully to an int, set the choice that will make its way into the answers dict to the cast int value, not the string representation. """ try: int_value = int(value) self._choice = int_value return True except ValueError: self.error_message = '%s is not a valid integer.' % value return False
[ "def", "validate", "(", "self", ",", "value", ")", ":", "try", ":", "int_value", "=", "int", "(", "value", ")", "self", ".", "_choice", "=", "int_value", "return", "True", "except", "ValueError", ":", "self", ".", "error_message", "=", "'%s is not a valid integer.'", "%", "value", "return", "False" ]
Return True if the choice is an integer; False otherwise. If the value was cast successfully to an int, set the choice that will make its way into the answers dict to the cast int value, not the string representation.
[ "Return", "True", "if", "the", "choice", "is", "an", "integer", ";", "False", "otherwise", "." ]
python
train
lepture/flask-oauthlib
flask_oauthlib/provider/oauth1.py
https://github.com/lepture/flask-oauthlib/blob/9e6f152a5bb360e7496210da21561c3e6d41b0e1/flask_oauthlib/provider/oauth1.py#L486-L518
def require_oauth(self, *realms, **kwargs): """Protect resource with specified scopes.""" def wrapper(f): @wraps(f) def decorated(*args, **kwargs): for func in self._before_request_funcs: func() if hasattr(request, 'oauth') and request.oauth: return f(*args, **kwargs) server = self.server uri, http_method, body, headers = extract_params() try: valid, req = server.validate_protected_resource_request( uri, http_method, body, headers, realms ) except Exception as e: log.warn('Exception: %r', e) e.urlencoded = urlencode([('error', 'unknown')]) e.status_code = 400 return _error_response(e) for func in self._after_request_funcs: valid, req = func(valid, req) if not valid: return abort(401) # alias user for convenience req.user = req.access_token.user request.oauth = req return f(*args, **kwargs) return decorated return wrapper
[ "def", "require_oauth", "(", "self", ",", "*", "realms", ",", "*", "*", "kwargs", ")", ":", "def", "wrapper", "(", "f", ")", ":", "@", "wraps", "(", "f", ")", "def", "decorated", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "for", "func", "in", "self", ".", "_before_request_funcs", ":", "func", "(", ")", "if", "hasattr", "(", "request", ",", "'oauth'", ")", "and", "request", ".", "oauth", ":", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "server", "=", "self", ".", "server", "uri", ",", "http_method", ",", "body", ",", "headers", "=", "extract_params", "(", ")", "try", ":", "valid", ",", "req", "=", "server", ".", "validate_protected_resource_request", "(", "uri", ",", "http_method", ",", "body", ",", "headers", ",", "realms", ")", "except", "Exception", "as", "e", ":", "log", ".", "warn", "(", "'Exception: %r'", ",", "e", ")", "e", ".", "urlencoded", "=", "urlencode", "(", "[", "(", "'error'", ",", "'unknown'", ")", "]", ")", "e", ".", "status_code", "=", "400", "return", "_error_response", "(", "e", ")", "for", "func", "in", "self", ".", "_after_request_funcs", ":", "valid", ",", "req", "=", "func", "(", "valid", ",", "req", ")", "if", "not", "valid", ":", "return", "abort", "(", "401", ")", "# alias user for convenience", "req", ".", "user", "=", "req", ".", "access_token", ".", "user", "request", ".", "oauth", "=", "req", "return", "f", "(", "*", "args", ",", "*", "*", "kwargs", ")", "return", "decorated", "return", "wrapper" ]
Protect resource with specified scopes.
[ "Protect", "resource", "with", "specified", "scopes", "." ]
python
test
Azure/azure-cli-extensions
src/alias/azext_alias/custom.py
https://github.com/Azure/azure-cli-extensions/blob/3d4854205b0f0d882f688cfa12383d14506c2e35/src/alias/azext_alias/custom.py#L80-L97
def list_alias(): """ List all registered aliases. Returns: An array of dictionary containing the alias and the command that it points to. """ alias_table = get_alias_table() output = [] for alias in alias_table.sections(): if alias_table.has_option(alias, 'command'): output.append({ 'alias': alias, # Remove unnecessary whitespaces 'command': ' '.join(alias_table.get(alias, 'command').split()) }) return output
[ "def", "list_alias", "(", ")", ":", "alias_table", "=", "get_alias_table", "(", ")", "output", "=", "[", "]", "for", "alias", "in", "alias_table", ".", "sections", "(", ")", ":", "if", "alias_table", ".", "has_option", "(", "alias", ",", "'command'", ")", ":", "output", ".", "append", "(", "{", "'alias'", ":", "alias", ",", "# Remove unnecessary whitespaces", "'command'", ":", "' '", ".", "join", "(", "alias_table", ".", "get", "(", "alias", ",", "'command'", ")", ".", "split", "(", ")", ")", "}", ")", "return", "output" ]
List all registered aliases. Returns: An array of dictionary containing the alias and the command that it points to.
[ "List", "all", "registered", "aliases", "." ]
python
train
INM-6/hybridLFPy
hybridLFPy/postproc.py
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/postproc.py#L233-L255
def calc_lfp_layer(self): """ Calculate the LFP from concatenated subpopulations residing in a certain layer, e.g all L4E pops are summed, according to the `mapping_Yy` attribute of the `hybridLFPy.Population` objects. """ LFPdict = {} lastY = None for Y, y in self.mapping_Yy: if lastY != Y: try: LFPdict.update({Y : self.LFPdict[y]}) except KeyError: pass else: try: LFPdict[Y] += self.LFPdict[y] except KeyError: pass lastY = Y return LFPdict
[ "def", "calc_lfp_layer", "(", "self", ")", ":", "LFPdict", "=", "{", "}", "lastY", "=", "None", "for", "Y", ",", "y", "in", "self", ".", "mapping_Yy", ":", "if", "lastY", "!=", "Y", ":", "try", ":", "LFPdict", ".", "update", "(", "{", "Y", ":", "self", ".", "LFPdict", "[", "y", "]", "}", ")", "except", "KeyError", ":", "pass", "else", ":", "try", ":", "LFPdict", "[", "Y", "]", "+=", "self", ".", "LFPdict", "[", "y", "]", "except", "KeyError", ":", "pass", "lastY", "=", "Y", "return", "LFPdict" ]
Calculate the LFP from concatenated subpopulations residing in a certain layer, e.g all L4E pops are summed, according to the `mapping_Yy` attribute of the `hybridLFPy.Population` objects.
[ "Calculate", "the", "LFP", "from", "concatenated", "subpopulations", "residing", "in", "a", "certain", "layer", "e", ".", "g", "all", "L4E", "pops", "are", "summed", "according", "to", "the", "mapping_Yy", "attribute", "of", "the", "hybridLFPy", ".", "Population", "objects", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/external_ca/models/create_certificate_issuer_config.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/external_ca/models/create_certificate_issuer_config.py#L91-L106
def reference(self, reference): """ Sets the reference of this CreateCertificateIssuerConfig. The certificate name, as created in the factory, to which the certificate issuer configuration applies. The following names are reserved and cannot be configured: LwM2M, BOOTSTRAP. :param reference: The reference of this CreateCertificateIssuerConfig. :type: str """ if reference is None: raise ValueError("Invalid value for `reference`, must not be `None`") if reference is not None and len(reference) > 50: raise ValueError("Invalid value for `reference`, length must be less than or equal to `50`") if reference is not None and not re.search('(?!mbed\\.)[\\w-_.]{1,50}', reference): raise ValueError("Invalid value for `reference`, must be a follow pattern or equal to `/(?!mbed\\.)[\\w-_.]{1,50}/`") self._reference = reference
[ "def", "reference", "(", "self", ",", "reference", ")", ":", "if", "reference", "is", "None", ":", "raise", "ValueError", "(", "\"Invalid value for `reference`, must not be `None`\"", ")", "if", "reference", "is", "not", "None", "and", "len", "(", "reference", ")", ">", "50", ":", "raise", "ValueError", "(", "\"Invalid value for `reference`, length must be less than or equal to `50`\"", ")", "if", "reference", "is", "not", "None", "and", "not", "re", ".", "search", "(", "'(?!mbed\\\\.)[\\\\w-_.]{1,50}'", ",", "reference", ")", ":", "raise", "ValueError", "(", "\"Invalid value for `reference`, must be a follow pattern or equal to `/(?!mbed\\\\.)[\\\\w-_.]{1,50}/`\"", ")", "self", ".", "_reference", "=", "reference" ]
Sets the reference of this CreateCertificateIssuerConfig. The certificate name, as created in the factory, to which the certificate issuer configuration applies. The following names are reserved and cannot be configured: LwM2M, BOOTSTRAP. :param reference: The reference of this CreateCertificateIssuerConfig. :type: str
[ "Sets", "the", "reference", "of", "this", "CreateCertificateIssuerConfig", ".", "The", "certificate", "name", "as", "created", "in", "the", "factory", "to", "which", "the", "certificate", "issuer", "configuration", "applies", ".", "The", "following", "names", "are", "reserved", "and", "cannot", "be", "configured", ":", "LwM2M", "BOOTSTRAP", "." ]
python
train
mitsei/dlkit
dlkit/json_/cataloging/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/cataloging/objects.py#L206-L221
def get_parent_catalog_nodes(self): """Gets the parents of this catalog. return: (osid.cataloging.CatalogNodeList) - the parents of the ``id`` *compliance: mandatory -- This method must be implemented.* """ parent_catalog_nodes = [] for node in self._my_map['parentNodes']: parent_catalog_nodes.append(CatalogNode( node._my_map, runtime=self._runtime, proxy=self._proxy, lookup_session=self._lookup_session)) return CatalogNodeList(parent_catalog_nodes)
[ "def", "get_parent_catalog_nodes", "(", "self", ")", ":", "parent_catalog_nodes", "=", "[", "]", "for", "node", "in", "self", ".", "_my_map", "[", "'parentNodes'", "]", ":", "parent_catalog_nodes", ".", "append", "(", "CatalogNode", "(", "node", ".", "_my_map", ",", "runtime", "=", "self", ".", "_runtime", ",", "proxy", "=", "self", ".", "_proxy", ",", "lookup_session", "=", "self", ".", "_lookup_session", ")", ")", "return", "CatalogNodeList", "(", "parent_catalog_nodes", ")" ]
Gets the parents of this catalog. return: (osid.cataloging.CatalogNodeList) - the parents of the ``id`` *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "parents", "of", "this", "catalog", "." ]
python
train
erikrose/more-itertools
more_itertools/more.py
https://github.com/erikrose/more-itertools/blob/6a91b4e25c8e12fcf9fc2b53cf8ee0fba293e6f9/more_itertools/more.py#L1035-L1053
def split_at(iterable, pred): """Yield lists of items from *iterable*, where each list is delimited by an item where callable *pred* returns ``True``. The lists do not include the delimiting items. >>> list(split_at('abcdcba', lambda x: x == 'b')) [['a'], ['c', 'd', 'c'], ['a']] >>> list(split_at(range(10), lambda n: n % 2 == 1)) [[0], [2], [4], [6], [8], []] """ buf = [] for item in iterable: if pred(item): yield buf buf = [] else: buf.append(item) yield buf
[ "def", "split_at", "(", "iterable", ",", "pred", ")", ":", "buf", "=", "[", "]", "for", "item", "in", "iterable", ":", "if", "pred", "(", "item", ")", ":", "yield", "buf", "buf", "=", "[", "]", "else", ":", "buf", ".", "append", "(", "item", ")", "yield", "buf" ]
Yield lists of items from *iterable*, where each list is delimited by an item where callable *pred* returns ``True``. The lists do not include the delimiting items. >>> list(split_at('abcdcba', lambda x: x == 'b')) [['a'], ['c', 'd', 'c'], ['a']] >>> list(split_at(range(10), lambda n: n % 2 == 1)) [[0], [2], [4], [6], [8], []]
[ "Yield", "lists", "of", "items", "from", "*", "iterable", "*", "where", "each", "list", "is", "delimited", "by", "an", "item", "where", "callable", "*", "pred", "*", "returns", "True", ".", "The", "lists", "do", "not", "include", "the", "delimiting", "items", "." ]
python
train
chimera0/accel-brain-code
Automatic-Summarization/pysummarization/vectorizabletoken/encoder_decoder.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Automatic-Summarization/pysummarization/vectorizabletoken/encoder_decoder.py#L54-L67
def vectorize(self, token_list): ''' Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...] ''' sentence_list = [token_list] test_observed_arr = self.__setup_dataset(sentence_list, self.__token_master_list) pred_arr = self.__controller.inference(test_observed_arr) return self.__controller.get_feature_points()
[ "def", "vectorize", "(", "self", ",", "token_list", ")", ":", "sentence_list", "=", "[", "token_list", "]", "test_observed_arr", "=", "self", ".", "__setup_dataset", "(", "sentence_list", ",", "self", ".", "__token_master_list", ")", "pred_arr", "=", "self", ".", "__controller", ".", "inference", "(", "test_observed_arr", ")", "return", "self", ".", "__controller", ".", "get_feature_points", "(", ")" ]
Tokenize token list. Args: token_list: The list of tokens.. Returns: [vector of token, vector of token, vector of token, ...]
[ "Tokenize", "token", "list", ".", "Args", ":", "token_list", ":", "The", "list", "of", "tokens", "..", "Returns", ":", "[", "vector", "of", "token", "vector", "of", "token", "vector", "of", "token", "...", "]" ]
python
train
mitsei/dlkit
dlkit/json_/logging_/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/logging_/sessions.py#L2166-L2188
def alias_log(self, log_id, alias_id): """Adds an ``Id`` to a ``Log`` for the purpose of creating compatibility. The primary ``Id`` of the ``Log`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another log, it is reassigned to the given log ``Id``. arg: log_id (osid.id.Id): the ``Id`` of a ``Log`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``log_id`` not found raise: NullArgument - ``log_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinLookupSession.alias_bin_template if self._catalog_session is not None: return self._catalog_session.alias_catalog(catalog_id=log_id, alias_id=alias_id) self._alias_id(primary_id=log_id, equivalent_id=alias_id)
[ "def", "alias_log", "(", "self", ",", "log_id", ",", "alias_id", ")", ":", "# Implemented from template for", "# osid.resource.BinLookupSession.alias_bin_template", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "alias_catalog", "(", "catalog_id", "=", "log_id", ",", "alias_id", "=", "alias_id", ")", "self", ".", "_alias_id", "(", "primary_id", "=", "log_id", ",", "equivalent_id", "=", "alias_id", ")" ]
Adds an ``Id`` to a ``Log`` for the purpose of creating compatibility. The primary ``Id`` of the ``Log`` is determined by the provider. The new ``Id`` performs as an alias to the primary ``Id``. If the alias is a pointer to another log, it is reassigned to the given log ``Id``. arg: log_id (osid.id.Id): the ``Id`` of a ``Log`` arg: alias_id (osid.id.Id): the alias ``Id`` raise: AlreadyExists - ``alias_id`` is already assigned raise: NotFound - ``log_id`` not found raise: NullArgument - ``log_id`` or ``alias_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Adds", "an", "Id", "to", "a", "Log", "for", "the", "purpose", "of", "creating", "compatibility", "." ]
python
train
bachiraoun/pyrep
OldRepository.py
https://github.com/bachiraoun/pyrep/blob/0449bf2fad3e3e8dda855d4686a8869efeefd433/OldRepository.py#L564-L583
def walk_files_relative_path(self, relativePath=""): """ Walk the repository and yield all found files relative path joined with file name. :parameters: #. relativePath (str): The relative path from which start the walk. """ def walk_files(directory, relativePath): directories = dict.__getitem__(directory, 'directories') files = dict.__getitem__(directory, 'files') for f in sorted(files): yield os.path.join(relativePath, f) for k in sorted(dict.keys(directories)): path = os.path.join(relativePath, k) dir = directories.__getitem__(k) for e in walk_files(dir, path): yield e dir, errorMessage = self.get_directory_info(relativePath) assert dir is not None, errorMessage return walk_files(dir, relativePath='')
[ "def", "walk_files_relative_path", "(", "self", ",", "relativePath", "=", "\"\"", ")", ":", "def", "walk_files", "(", "directory", ",", "relativePath", ")", ":", "directories", "=", "dict", ".", "__getitem__", "(", "directory", ",", "'directories'", ")", "files", "=", "dict", ".", "__getitem__", "(", "directory", ",", "'files'", ")", "for", "f", "in", "sorted", "(", "files", ")", ":", "yield", "os", ".", "path", ".", "join", "(", "relativePath", ",", "f", ")", "for", "k", "in", "sorted", "(", "dict", ".", "keys", "(", "directories", ")", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "relativePath", ",", "k", ")", "dir", "=", "directories", ".", "__getitem__", "(", "k", ")", "for", "e", "in", "walk_files", "(", "dir", ",", "path", ")", ":", "yield", "e", "dir", ",", "errorMessage", "=", "self", ".", "get_directory_info", "(", "relativePath", ")", "assert", "dir", "is", "not", "None", ",", "errorMessage", "return", "walk_files", "(", "dir", ",", "relativePath", "=", "''", ")" ]
Walk the repository and yield all found files relative path joined with file name. :parameters: #. relativePath (str): The relative path from which start the walk.
[ "Walk", "the", "repository", "and", "yield", "all", "found", "files", "relative", "path", "joined", "with", "file", "name", "." ]
python
valid
hadrianl/huobi
huobitrade/service.py
https://github.com/hadrianl/huobi/blob/bbfa2036703ee84a76d5d8e9f89c25fc8a55f2c7/huobitrade/service.py#L451-L472
def send_margin_order(self, acc_id, amount, symbol, _type, price=0, _async=False): """ 创建并执行借贷订单 :param amount: :param symbol: :param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖} :param price: :return: """ params = { 'account-id': acc_id, 'amount': amount, 'symbol': symbol, 'type': _type, 'source': 'margin-api' } if price: params['price'] = price path = '/v1/order/orders/place' return api_key_post(params, path, _async=_async)
[ "def", "send_margin_order", "(", "self", ",", "acc_id", ",", "amount", ",", "symbol", ",", "_type", ",", "price", "=", "0", ",", "_async", "=", "False", ")", ":", "params", "=", "{", "'account-id'", ":", "acc_id", ",", "'amount'", ":", "amount", ",", "'symbol'", ":", "symbol", ",", "'type'", ":", "_type", ",", "'source'", ":", "'margin-api'", "}", "if", "price", ":", "params", "[", "'price'", "]", "=", "price", "path", "=", "'/v1/order/orders/place'", "return", "api_key_post", "(", "params", ",", "path", ",", "_async", "=", "_async", ")" ]
创建并执行借贷订单 :param amount: :param symbol: :param _type: 可选值 {buy-market:市价买, sell-market:市价卖, buy-limit:限价买, sell-limit:限价卖} :param price: :return:
[ "创建并执行借贷订单", ":", "param", "amount", ":", ":", "param", "symbol", ":", ":", "param", "_type", ":", "可选值", "{", "buy", "-", "market:市价买", "sell", "-", "market:市价卖", "buy", "-", "limit:限价买", "sell", "-", "limit:限价卖", "}", ":", "param", "price", ":", ":", "return", ":" ]
python
train
fermiPy/fermipy
fermipy/jobs/chain.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/chain.py#L48-L53
def main(cls): """Hook to run this `Chain` from the command line """ chain = cls.create() args = chain._run_argparser(sys.argv[1:]) chain._run_chain(sys.stdout, args.dry_run) chain._finalize(args.dry_run)
[ "def", "main", "(", "cls", ")", ":", "chain", "=", "cls", ".", "create", "(", ")", "args", "=", "chain", ".", "_run_argparser", "(", "sys", ".", "argv", "[", "1", ":", "]", ")", "chain", ".", "_run_chain", "(", "sys", ".", "stdout", ",", "args", ".", "dry_run", ")", "chain", ".", "_finalize", "(", "args", ".", "dry_run", ")" ]
Hook to run this `Chain` from the command line
[ "Hook", "to", "run", "this", "Chain", "from", "the", "command", "line" ]
python
train
ssherar/hook
hook/model.py
https://github.com/ssherar/hook/blob/54160df554d8b2ed65d762168e5808487e873ed9/hook/model.py#L76-L91
def from_yaml(cls, defaults, **kwargs): """Creates a new instance of a rule by merging two dictionaries. This allows for independant configuration files to be merged into the defaults.""" # TODO: I hate myself for this. Fix it later mmkay? if "token" not in defaults: kwargs["token"] = None defaults = copy.deepcopy(defaults) return cls( defaults=defaults, token=kwargs.pop("token"), directory=kwargs.pop("directory"), **kwargs )
[ "def", "from_yaml", "(", "cls", ",", "defaults", ",", "*", "*", "kwargs", ")", ":", "# TODO: I hate myself for this. Fix it later mmkay?", "if", "\"token\"", "not", "in", "defaults", ":", "kwargs", "[", "\"token\"", "]", "=", "None", "defaults", "=", "copy", ".", "deepcopy", "(", "defaults", ")", "return", "cls", "(", "defaults", "=", "defaults", ",", "token", "=", "kwargs", ".", "pop", "(", "\"token\"", ")", ",", "directory", "=", "kwargs", ".", "pop", "(", "\"directory\"", ")", ",", "*", "*", "kwargs", ")" ]
Creates a new instance of a rule by merging two dictionaries. This allows for independant configuration files to be merged into the defaults.
[ "Creates", "a", "new", "instance", "of", "a", "rule", "by", "merging", "two", "dictionaries", "." ]
python
test
gwastro/pycbc-glue
pycbc_glue/markup.py
https://github.com/gwastro/pycbc-glue/blob/a3e906bae59fbfd707c3ff82e5d008d939ec5e24/pycbc_glue/markup.py#L396-L423
def _argsdicts( args, mydict ): """A utility generator that pads argument list and dictionary values, will only be called with len( args ) = 0, 1.""" if len( args ) == 0: args = None, elif len( args ) == 1: args = _totuple( args[0] ) else: raise Exception( "We should have never gotten here." ) mykeys = list( mydict.keys( ) ) myvalues = list( map( _totuple, list( mydict.values( ) ) ) ) maxlength = max( list( map( len, [ args ] + myvalues ) ) ) for i in range( maxlength ): thisdict = { } for key, value in zip( mykeys, myvalues ): try: thisdict[ key ] = value[i] except IndexError: thisdict[ key ] = value[-1] try: thisarg = args[i] except IndexError: thisarg = args[-1] yield thisarg, thisdict
[ "def", "_argsdicts", "(", "args", ",", "mydict", ")", ":", "if", "len", "(", "args", ")", "==", "0", ":", "args", "=", "None", ",", "elif", "len", "(", "args", ")", "==", "1", ":", "args", "=", "_totuple", "(", "args", "[", "0", "]", ")", "else", ":", "raise", "Exception", "(", "\"We should have never gotten here.\"", ")", "mykeys", "=", "list", "(", "mydict", ".", "keys", "(", ")", ")", "myvalues", "=", "list", "(", "map", "(", "_totuple", ",", "list", "(", "mydict", ".", "values", "(", ")", ")", ")", ")", "maxlength", "=", "max", "(", "list", "(", "map", "(", "len", ",", "[", "args", "]", "+", "myvalues", ")", ")", ")", "for", "i", "in", "range", "(", "maxlength", ")", ":", "thisdict", "=", "{", "}", "for", "key", ",", "value", "in", "zip", "(", "mykeys", ",", "myvalues", ")", ":", "try", ":", "thisdict", "[", "key", "]", "=", "value", "[", "i", "]", "except", "IndexError", ":", "thisdict", "[", "key", "]", "=", "value", "[", "-", "1", "]", "try", ":", "thisarg", "=", "args", "[", "i", "]", "except", "IndexError", ":", "thisarg", "=", "args", "[", "-", "1", "]", "yield", "thisarg", ",", "thisdict" ]
A utility generator that pads argument list and dictionary values, will only be called with len( args ) = 0, 1.
[ "A", "utility", "generator", "that", "pads", "argument", "list", "and", "dictionary", "values", "will", "only", "be", "called", "with", "len", "(", "args", ")", "=", "0", "1", "." ]
python
train
foliant-docs/foliantcontrib.mkdocs
foliant/backends/mkdocs.py
https://github.com/foliant-docs/foliantcontrib.mkdocs/blob/5f71a47139ab1cb630f1b61d4cef1c0657001272/foliant/backends/mkdocs.py#L77-L106
def _get_page_with_optional_heading(self, page_file_path: str) -> str or Dict: '''Get the content of first heading of source Markdown file, if the file contains any headings. Return a data element of ``pages`` section of ``mkdocs.yml`` file. :param page_file_path: path to source Markdown file :returns: Unchanged file path or a dictionary: content of first heading, file path ''' self.logger.debug(f'Looking for the first heading in {page_file_path}') if page_file_path.endswith('.md'): page_file_full_path = self.project_path / self.config['src_dir'] / page_file_path with open(page_file_full_path, encoding='utf8') as page_file: content = page_file.read() headings_found = search( r'^\s*#{1,6}[ \t]+([^\r\n]+?)(?:[ \t]+\{#\S+\})?\s*[\r\n]+', content ) if headings_found: first_heading = headings_found.group(1) self.logger.debug(f'Heading found: {first_heading}') return {first_heading: page_file_path} self.logger.debug(f'No heading found, returning original file path.') return page_file_path
[ "def", "_get_page_with_optional_heading", "(", "self", ",", "page_file_path", ":", "str", ")", "->", "str", "or", "Dict", ":", "self", ".", "logger", ".", "debug", "(", "f'Looking for the first heading in {page_file_path}'", ")", "if", "page_file_path", ".", "endswith", "(", "'.md'", ")", ":", "page_file_full_path", "=", "self", ".", "project_path", "/", "self", ".", "config", "[", "'src_dir'", "]", "/", "page_file_path", "with", "open", "(", "page_file_full_path", ",", "encoding", "=", "'utf8'", ")", "as", "page_file", ":", "content", "=", "page_file", ".", "read", "(", ")", "headings_found", "=", "search", "(", "r'^\\s*#{1,6}[ \\t]+([^\\r\\n]+?)(?:[ \\t]+\\{#\\S+\\})?\\s*[\\r\\n]+'", ",", "content", ")", "if", "headings_found", ":", "first_heading", "=", "headings_found", ".", "group", "(", "1", ")", "self", ".", "logger", ".", "debug", "(", "f'Heading found: {first_heading}'", ")", "return", "{", "first_heading", ":", "page_file_path", "}", "self", ".", "logger", ".", "debug", "(", "f'No heading found, returning original file path.'", ")", "return", "page_file_path" ]
Get the content of first heading of source Markdown file, if the file contains any headings. Return a data element of ``pages`` section of ``mkdocs.yml`` file. :param page_file_path: path to source Markdown file :returns: Unchanged file path or a dictionary: content of first heading, file path
[ "Get", "the", "content", "of", "first", "heading", "of", "source", "Markdown", "file", "if", "the", "file", "contains", "any", "headings", ".", "Return", "a", "data", "element", "of", "pages", "section", "of", "mkdocs", ".", "yml", "file", "." ]
python
train
glamp/bashplotlib
bashplotlib/utils/helpers.py
https://github.com/glamp/bashplotlib/blob/f7533172c4dc912b5accae42edd5c0f655d7468f/bashplotlib/utils/helpers.py#L67-L76
def abbreviate(labels, rfill=' '): """ Abbreviate labels without introducing ambiguities. """ max_len = max(len(l) for l in labels) for i in range(1, max_len): abbrev = [l[:i].ljust(i, rfill) for l in labels] if len(abbrev) == len(set(abbrev)): break return abbrev
[ "def", "abbreviate", "(", "labels", ",", "rfill", "=", "' '", ")", ":", "max_len", "=", "max", "(", "len", "(", "l", ")", "for", "l", "in", "labels", ")", "for", "i", "in", "range", "(", "1", ",", "max_len", ")", ":", "abbrev", "=", "[", "l", "[", ":", "i", "]", ".", "ljust", "(", "i", ",", "rfill", ")", "for", "l", "in", "labels", "]", "if", "len", "(", "abbrev", ")", "==", "len", "(", "set", "(", "abbrev", ")", ")", ":", "break", "return", "abbrev" ]
Abbreviate labels without introducing ambiguities.
[ "Abbreviate", "labels", "without", "introducing", "ambiguities", "." ]
python
train
calmjs/calmjs
src/calmjs/base.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/base.py#L448-L455
def get_records_for_package(self, package_name): """ Get all records identified by package. """ result = [] result.extend(self.package_module_map.get(package_name)) return result
[ "def", "get_records_for_package", "(", "self", ",", "package_name", ")", ":", "result", "=", "[", "]", "result", ".", "extend", "(", "self", ".", "package_module_map", ".", "get", "(", "package_name", ")", ")", "return", "result" ]
Get all records identified by package.
[ "Get", "all", "records", "identified", "by", "package", "." ]
python
train
michaelpb/omnic
omnic/cli/commands.py
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/cli/commands.py#L110-L122
async def _precache(url, to_type, force=False): ''' Helper function used by precache and precache-named which does the actual precaching ''' if force: cli.print('%s: force clearing' % url) _clear_cache(url) cli.print('%s: precaching "%s"' % (url, to_type)) with autodrain_worker(): await singletons.workers.async_enqueue_multiconvert(url, to_type) result = TypedResource(url, TypeString(to_type)) cli.print('%s: %s precached at: %s' % (url, to_type, result.cache_path))
[ "async", "def", "_precache", "(", "url", ",", "to_type", ",", "force", "=", "False", ")", ":", "if", "force", ":", "cli", ".", "print", "(", "'%s: force clearing'", "%", "url", ")", "_clear_cache", "(", "url", ")", "cli", ".", "print", "(", "'%s: precaching \"%s\"'", "%", "(", "url", ",", "to_type", ")", ")", "with", "autodrain_worker", "(", ")", ":", "await", "singletons", ".", "workers", ".", "async_enqueue_multiconvert", "(", "url", ",", "to_type", ")", "result", "=", "TypedResource", "(", "url", ",", "TypeString", "(", "to_type", ")", ")", "cli", ".", "print", "(", "'%s: %s precached at: %s'", "%", "(", "url", ",", "to_type", ",", "result", ".", "cache_path", ")", ")" ]
Helper function used by precache and precache-named which does the actual precaching
[ "Helper", "function", "used", "by", "precache", "and", "precache", "-", "named", "which", "does", "the", "actual", "precaching" ]
python
train
ultrabug/py3status
py3status/module.py
https://github.com/ultrabug/py3status/blob/4c105f1b44f7384ca4f7da5f821a47e468c7dee2/py3status/module.py#L232-L244
def force_update(self): """ Forces an update of the module. """ if self.disabled or self.terminated or not self.enabled: return # clear cached_until for each method to allow update for meth in self.methods: self.methods[meth]["cached_until"] = time() if self.config["debug"]: self._py3_wrapper.log("clearing cache for method {}".format(meth)) # set module to update self._py3_wrapper.timeout_queue_add(self)
[ "def", "force_update", "(", "self", ")", ":", "if", "self", ".", "disabled", "or", "self", ".", "terminated", "or", "not", "self", ".", "enabled", ":", "return", "# clear cached_until for each method to allow update", "for", "meth", "in", "self", ".", "methods", ":", "self", ".", "methods", "[", "meth", "]", "[", "\"cached_until\"", "]", "=", "time", "(", ")", "if", "self", ".", "config", "[", "\"debug\"", "]", ":", "self", ".", "_py3_wrapper", ".", "log", "(", "\"clearing cache for method {}\"", ".", "format", "(", "meth", ")", ")", "# set module to update", "self", ".", "_py3_wrapper", ".", "timeout_queue_add", "(", "self", ")" ]
Forces an update of the module.
[ "Forces", "an", "update", "of", "the", "module", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/data.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/data.py#L101-L104
def chop(seq, size): """Chop a sequence into chunks of the given size.""" chunk = lambda i: seq[i:i+size] return map(chunk,xrange(0,len(seq),size))
[ "def", "chop", "(", "seq", ",", "size", ")", ":", "chunk", "=", "lambda", "i", ":", "seq", "[", "i", ":", "i", "+", "size", "]", "return", "map", "(", "chunk", ",", "xrange", "(", "0", ",", "len", "(", "seq", ")", ",", "size", ")", ")" ]
Chop a sequence into chunks of the given size.
[ "Chop", "a", "sequence", "into", "chunks", "of", "the", "given", "size", "." ]
python
test
wummel/patool
patoolib/util.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/util.py#L430-L441
def set_mode (filename, flags): """Set mode flags for given filename if not already set.""" try: mode = os.lstat(filename).st_mode except OSError: # ignore return if not (mode & flags): try: os.chmod(filename, flags | mode) except OSError as msg: log_error("could not set mode flags for `%s': %s" % (filename, msg))
[ "def", "set_mode", "(", "filename", ",", "flags", ")", ":", "try", ":", "mode", "=", "os", ".", "lstat", "(", "filename", ")", ".", "st_mode", "except", "OSError", ":", "# ignore", "return", "if", "not", "(", "mode", "&", "flags", ")", ":", "try", ":", "os", ".", "chmod", "(", "filename", ",", "flags", "|", "mode", ")", "except", "OSError", "as", "msg", ":", "log_error", "(", "\"could not set mode flags for `%s': %s\"", "%", "(", "filename", ",", "msg", ")", ")" ]
Set mode flags for given filename if not already set.
[ "Set", "mode", "flags", "for", "given", "filename", "if", "not", "already", "set", "." ]
python
train
lemieuxl/pyplink
pyplink/pyplink.py
https://github.com/lemieuxl/pyplink/blob/31d47c86f589064bda98206314a2d0b20e7fd2f0/pyplink/pyplink.py#L198-L202
def _read_current_marker(self): """Reads the current marker and returns its genotypes.""" return self._geno_values[ np.frombuffer(self._bed.read(self._nb_bytes), dtype=np.uint8) ].flatten(order="C")[:self._nb_samples]
[ "def", "_read_current_marker", "(", "self", ")", ":", "return", "self", ".", "_geno_values", "[", "np", ".", "frombuffer", "(", "self", ".", "_bed", ".", "read", "(", "self", ".", "_nb_bytes", ")", ",", "dtype", "=", "np", ".", "uint8", ")", "]", ".", "flatten", "(", "order", "=", "\"C\"", ")", "[", ":", "self", ".", "_nb_samples", "]" ]
Reads the current marker and returns its genotypes.
[ "Reads", "the", "current", "marker", "and", "returns", "its", "genotypes", "." ]
python
train
codeinn/vcs
vcs/backends/base.py
https://github.com/codeinn/vcs/blob/e6cd94188e9c36d273411bf3adc0584ac6ab92a0/vcs/backends/base.py#L755-L770
def add(self, *filenodes): """ Marks given ``FileNode`` objects as *to be committed*. :raises ``NodeAlreadyExistsError``: if node with same path exists at latest changeset :raises ``NodeAlreadyAddedError``: if node with same path is already marked as *added* """ # Check if not already marked as *added* first for node in filenodes: if node.path in (n.path for n in self.added): raise NodeAlreadyAddedError("Such FileNode %s is already " "marked for addition" % node.path) for node in filenodes: self.added.append(node)
[ "def", "add", "(", "self", ",", "*", "filenodes", ")", ":", "# Check if not already marked as *added* first", "for", "node", "in", "filenodes", ":", "if", "node", ".", "path", "in", "(", "n", ".", "path", "for", "n", "in", "self", ".", "added", ")", ":", "raise", "NodeAlreadyAddedError", "(", "\"Such FileNode %s is already \"", "\"marked for addition\"", "%", "node", ".", "path", ")", "for", "node", "in", "filenodes", ":", "self", ".", "added", ".", "append", "(", "node", ")" ]
Marks given ``FileNode`` objects as *to be committed*. :raises ``NodeAlreadyExistsError``: if node with same path exists at latest changeset :raises ``NodeAlreadyAddedError``: if node with same path is already marked as *added*
[ "Marks", "given", "FileNode", "objects", "as", "*", "to", "be", "committed", "*", "." ]
python
train
tensorflow/cleverhans
cleverhans/attacks/spsa.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/cleverhans/attacks/spsa.py#L613-L758
def projected_optimization(loss_fn, input_image, label, epsilon, num_steps, clip_min=None, clip_max=None, optimizer=TensorAdam(), project_perturbation=_project_perturbation, early_stop_loss_threshold=None, is_debug=False): """Generic projected optimization, generalized to work with approximate gradients. Used for e.g. the SPSA attack. Args: :param loss_fn: A callable which takes `input_image` and `label` as arguments, and returns a batch of loss values. Same interface as TensorOptimizer. :param input_image: Tensor, a batch of images :param label: Tensor, a batch of labels :param epsilon: float, the L-infinity norm of the maximum allowable perturbation :param num_steps: int, the number of steps of gradient descent :param clip_min: float, minimum pixel value :param clip_max: float, maximum pixel value :param optimizer: A `TensorOptimizer` object :param project_perturbation: A function, which will be used to enforce some constraint. It should have the same signature as `_project_perturbation`. :param early_stop_loss_threshold: A float or None. If specified, the attack will end if the loss is below `early_stop_loss_threshold`. Enabling this option can have several different effects: - Setting the threshold to 0. guarantees that if a successful attack is found, it is returned. This increases the attack success rate, because without early stopping the optimizer can accidentally bounce back to a point where the attack fails. - Early stopping can make the attack run faster because it may run for fewer steps. - Early stopping can make the attack run slower because the loss must be calculated at each step. The loss is not calculated as part of the normal SPSA optimization procedure. For most reasonable choices of hyperparameters, early stopping makes the attack much faster because it decreases the number of steps dramatically. :param is_debug: A bool. If True, print debug info for attack progress. Returns: adversarial version of `input_image`, with L-infinity difference less than epsilon, which tries to minimize loss_fn. Note that this function is not intended as an Attack by itself. Rather, it is designed as a helper function which you can use to write your own attack methods. The method uses a tf.while_loop to optimize a loss function in a single sess.run() call. """ assert num_steps is not None if is_debug: with tf.device("/cpu:0"): input_image = tf.Print( input_image, [], "Starting PGD attack with epsilon: %s" % epsilon) init_perturbation = tf.random_uniform( tf.shape(input_image), minval=tf.cast(-epsilon, input_image.dtype), maxval=tf.cast(epsilon, input_image.dtype), dtype=input_image.dtype) init_perturbation = project_perturbation(init_perturbation, epsilon, input_image, clip_min=clip_min, clip_max=clip_max) init_optim_state = optimizer.init_state([init_perturbation]) nest = tf.contrib.framework.nest def loop_body(i, perturbation, flat_optim_state): """Update perturbation to input image.""" optim_state = nest.pack_sequence_as( structure=init_optim_state, flat_sequence=flat_optim_state) def wrapped_loss_fn(x): return loss_fn(input_image + x, label) new_perturbation_list, new_optim_state = optimizer.minimize( wrapped_loss_fn, [perturbation], optim_state) projected_perturbation = project_perturbation(new_perturbation_list[0], epsilon, input_image, clip_min=clip_min, clip_max=clip_max) # Be careful with this bool. A value of 0. is a valid threshold but evaluates to False, so we must explicitly # check whether the value is None. early_stop = early_stop_loss_threshold is not None compute_loss = is_debug or early_stop # Don't waste time building the loss graph if we're not going to use it if compute_loss: # NOTE: this step is not actually redundant with the optimizer step. # SPSA calculates the loss at randomly perturbed points but doesn't calculate the loss at the current point. loss = reduce_mean(wrapped_loss_fn(projected_perturbation), axis=0) if is_debug: with tf.device("/cpu:0"): loss = tf.Print(loss, [loss], "Total batch loss") if early_stop: i = tf.cond(tf.less(loss, early_stop_loss_threshold), lambda: float(num_steps), lambda: i) return i + 1, projected_perturbation, nest.flatten(new_optim_state) def cond(i, *_): return tf.less(i, num_steps) flat_init_optim_state = nest.flatten(init_optim_state) _, final_perturbation, _ = tf.while_loop( cond, loop_body, loop_vars=(tf.constant(0.), init_perturbation, flat_init_optim_state), parallel_iterations=1, back_prop=False, maximum_iterations=num_steps) if project_perturbation is _project_perturbation: # TODO: this assert looks totally wrong. # Not bothering to fix it now because it's only an assert. # 1) Multiplying by 1.1 gives a huge margin of error. This should probably # take the difference and allow a tolerance of 1e-6 or something like # that. # 2) I think it should probably check the *absolute value* of # final_perturbation perturbation_max = epsilon * 1.1 check_diff = utils_tf.assert_less_equal( final_perturbation, tf.cast(perturbation_max, final_perturbation.dtype), message="final_perturbation must change no pixel by more than " "%s" % perturbation_max) else: # TODO: let caller pass in a check_diff function as well as # project_perturbation check_diff = tf.no_op() if clip_min is None or clip_max is None: raise NotImplementedError("This function only supports clipping for now") check_range = [utils_tf.assert_less_equal(input_image, tf.cast(clip_max, input_image.dtype)), utils_tf.assert_greater_equal(input_image, tf.cast(clip_min, input_image.dtype))] with tf.control_dependencies([check_diff] + check_range): adversarial_image = input_image + final_perturbation return tf.stop_gradient(adversarial_image)
[ "def", "projected_optimization", "(", "loss_fn", ",", "input_image", ",", "label", ",", "epsilon", ",", "num_steps", ",", "clip_min", "=", "None", ",", "clip_max", "=", "None", ",", "optimizer", "=", "TensorAdam", "(", ")", ",", "project_perturbation", "=", "_project_perturbation", ",", "early_stop_loss_threshold", "=", "None", ",", "is_debug", "=", "False", ")", ":", "assert", "num_steps", "is", "not", "None", "if", "is_debug", ":", "with", "tf", ".", "device", "(", "\"/cpu:0\"", ")", ":", "input_image", "=", "tf", ".", "Print", "(", "input_image", ",", "[", "]", ",", "\"Starting PGD attack with epsilon: %s\"", "%", "epsilon", ")", "init_perturbation", "=", "tf", ".", "random_uniform", "(", "tf", ".", "shape", "(", "input_image", ")", ",", "minval", "=", "tf", ".", "cast", "(", "-", "epsilon", ",", "input_image", ".", "dtype", ")", ",", "maxval", "=", "tf", ".", "cast", "(", "epsilon", ",", "input_image", ".", "dtype", ")", ",", "dtype", "=", "input_image", ".", "dtype", ")", "init_perturbation", "=", "project_perturbation", "(", "init_perturbation", ",", "epsilon", ",", "input_image", ",", "clip_min", "=", "clip_min", ",", "clip_max", "=", "clip_max", ")", "init_optim_state", "=", "optimizer", ".", "init_state", "(", "[", "init_perturbation", "]", ")", "nest", "=", "tf", ".", "contrib", ".", "framework", ".", "nest", "def", "loop_body", "(", "i", ",", "perturbation", ",", "flat_optim_state", ")", ":", "\"\"\"Update perturbation to input image.\"\"\"", "optim_state", "=", "nest", ".", "pack_sequence_as", "(", "structure", "=", "init_optim_state", ",", "flat_sequence", "=", "flat_optim_state", ")", "def", "wrapped_loss_fn", "(", "x", ")", ":", "return", "loss_fn", "(", "input_image", "+", "x", ",", "label", ")", "new_perturbation_list", ",", "new_optim_state", "=", "optimizer", ".", "minimize", "(", "wrapped_loss_fn", ",", "[", "perturbation", "]", ",", "optim_state", ")", "projected_perturbation", "=", "project_perturbation", "(", "new_perturbation_list", "[", "0", "]", ",", "epsilon", ",", "input_image", ",", "clip_min", "=", "clip_min", ",", "clip_max", "=", "clip_max", ")", "# Be careful with this bool. A value of 0. is a valid threshold but evaluates to False, so we must explicitly", "# check whether the value is None.", "early_stop", "=", "early_stop_loss_threshold", "is", "not", "None", "compute_loss", "=", "is_debug", "or", "early_stop", "# Don't waste time building the loss graph if we're not going to use it", "if", "compute_loss", ":", "# NOTE: this step is not actually redundant with the optimizer step.", "# SPSA calculates the loss at randomly perturbed points but doesn't calculate the loss at the current point.", "loss", "=", "reduce_mean", "(", "wrapped_loss_fn", "(", "projected_perturbation", ")", ",", "axis", "=", "0", ")", "if", "is_debug", ":", "with", "tf", ".", "device", "(", "\"/cpu:0\"", ")", ":", "loss", "=", "tf", ".", "Print", "(", "loss", ",", "[", "loss", "]", ",", "\"Total batch loss\"", ")", "if", "early_stop", ":", "i", "=", "tf", ".", "cond", "(", "tf", ".", "less", "(", "loss", ",", "early_stop_loss_threshold", ")", ",", "lambda", ":", "float", "(", "num_steps", ")", ",", "lambda", ":", "i", ")", "return", "i", "+", "1", ",", "projected_perturbation", ",", "nest", ".", "flatten", "(", "new_optim_state", ")", "def", "cond", "(", "i", ",", "*", "_", ")", ":", "return", "tf", ".", "less", "(", "i", ",", "num_steps", ")", "flat_init_optim_state", "=", "nest", ".", "flatten", "(", "init_optim_state", ")", "_", ",", "final_perturbation", ",", "_", "=", "tf", ".", "while_loop", "(", "cond", ",", "loop_body", ",", "loop_vars", "=", "(", "tf", ".", "constant", "(", "0.", ")", ",", "init_perturbation", ",", "flat_init_optim_state", ")", ",", "parallel_iterations", "=", "1", ",", "back_prop", "=", "False", ",", "maximum_iterations", "=", "num_steps", ")", "if", "project_perturbation", "is", "_project_perturbation", ":", "# TODO: this assert looks totally wrong.", "# Not bothering to fix it now because it's only an assert.", "# 1) Multiplying by 1.1 gives a huge margin of error. This should probably", "# take the difference and allow a tolerance of 1e-6 or something like", "# that.", "# 2) I think it should probably check the *absolute value* of", "# final_perturbation", "perturbation_max", "=", "epsilon", "*", "1.1", "check_diff", "=", "utils_tf", ".", "assert_less_equal", "(", "final_perturbation", ",", "tf", ".", "cast", "(", "perturbation_max", ",", "final_perturbation", ".", "dtype", ")", ",", "message", "=", "\"final_perturbation must change no pixel by more than \"", "\"%s\"", "%", "perturbation_max", ")", "else", ":", "# TODO: let caller pass in a check_diff function as well as", "# project_perturbation", "check_diff", "=", "tf", ".", "no_op", "(", ")", "if", "clip_min", "is", "None", "or", "clip_max", "is", "None", ":", "raise", "NotImplementedError", "(", "\"This function only supports clipping for now\"", ")", "check_range", "=", "[", "utils_tf", ".", "assert_less_equal", "(", "input_image", ",", "tf", ".", "cast", "(", "clip_max", ",", "input_image", ".", "dtype", ")", ")", ",", "utils_tf", ".", "assert_greater_equal", "(", "input_image", ",", "tf", ".", "cast", "(", "clip_min", ",", "input_image", ".", "dtype", ")", ")", "]", "with", "tf", ".", "control_dependencies", "(", "[", "check_diff", "]", "+", "check_range", ")", ":", "adversarial_image", "=", "input_image", "+", "final_perturbation", "return", "tf", ".", "stop_gradient", "(", "adversarial_image", ")" ]
Generic projected optimization, generalized to work with approximate gradients. Used for e.g. the SPSA attack. Args: :param loss_fn: A callable which takes `input_image` and `label` as arguments, and returns a batch of loss values. Same interface as TensorOptimizer. :param input_image: Tensor, a batch of images :param label: Tensor, a batch of labels :param epsilon: float, the L-infinity norm of the maximum allowable perturbation :param num_steps: int, the number of steps of gradient descent :param clip_min: float, minimum pixel value :param clip_max: float, maximum pixel value :param optimizer: A `TensorOptimizer` object :param project_perturbation: A function, which will be used to enforce some constraint. It should have the same signature as `_project_perturbation`. :param early_stop_loss_threshold: A float or None. If specified, the attack will end if the loss is below `early_stop_loss_threshold`. Enabling this option can have several different effects: - Setting the threshold to 0. guarantees that if a successful attack is found, it is returned. This increases the attack success rate, because without early stopping the optimizer can accidentally bounce back to a point where the attack fails. - Early stopping can make the attack run faster because it may run for fewer steps. - Early stopping can make the attack run slower because the loss must be calculated at each step. The loss is not calculated as part of the normal SPSA optimization procedure. For most reasonable choices of hyperparameters, early stopping makes the attack much faster because it decreases the number of steps dramatically. :param is_debug: A bool. If True, print debug info for attack progress. Returns: adversarial version of `input_image`, with L-infinity difference less than epsilon, which tries to minimize loss_fn. Note that this function is not intended as an Attack by itself. Rather, it is designed as a helper function which you can use to write your own attack methods. The method uses a tf.while_loop to optimize a loss function in a single sess.run() call.
[ "Generic", "projected", "optimization", "generalized", "to", "work", "with", "approximate", "gradients", ".", "Used", "for", "e", ".", "g", ".", "the", "SPSA", "attack", "." ]
python
train
orbingol/NURBS-Python
geomdl/utilities.py
https://github.com/orbingol/NURBS-Python/blob/b1c6a8b51cf143ff58761438e93ba6baef470627/geomdl/utilities.py#L192-L216
def evaluate_bounding_box(ctrlpts): """ Computes the minimum bounding box of the point set. The (minimum) bounding box is the smallest enclosure in which all the input points lie. :param ctrlpts: points :type ctrlpts: list, tuple :return: bounding box in the format [min, max] :rtype: tuple """ # Estimate dimension from the first element of the control points dimension = len(ctrlpts[0]) # Evaluate bounding box bbmin = [float('inf') for _ in range(0, dimension)] bbmax = [float('-inf') for _ in range(0, dimension)] for cpt in ctrlpts: for i, arr in enumerate(zip(cpt, bbmin)): if arr[0] < arr[1]: bbmin[i] = arr[0] for i, arr in enumerate(zip(cpt, bbmax)): if arr[0] > arr[1]: bbmax[i] = arr[0] return tuple(bbmin), tuple(bbmax)
[ "def", "evaluate_bounding_box", "(", "ctrlpts", ")", ":", "# Estimate dimension from the first element of the control points", "dimension", "=", "len", "(", "ctrlpts", "[", "0", "]", ")", "# Evaluate bounding box", "bbmin", "=", "[", "float", "(", "'inf'", ")", "for", "_", "in", "range", "(", "0", ",", "dimension", ")", "]", "bbmax", "=", "[", "float", "(", "'-inf'", ")", "for", "_", "in", "range", "(", "0", ",", "dimension", ")", "]", "for", "cpt", "in", "ctrlpts", ":", "for", "i", ",", "arr", "in", "enumerate", "(", "zip", "(", "cpt", ",", "bbmin", ")", ")", ":", "if", "arr", "[", "0", "]", "<", "arr", "[", "1", "]", ":", "bbmin", "[", "i", "]", "=", "arr", "[", "0", "]", "for", "i", ",", "arr", "in", "enumerate", "(", "zip", "(", "cpt", ",", "bbmax", ")", ")", ":", "if", "arr", "[", "0", "]", ">", "arr", "[", "1", "]", ":", "bbmax", "[", "i", "]", "=", "arr", "[", "0", "]", "return", "tuple", "(", "bbmin", ")", ",", "tuple", "(", "bbmax", ")" ]
Computes the minimum bounding box of the point set. The (minimum) bounding box is the smallest enclosure in which all the input points lie. :param ctrlpts: points :type ctrlpts: list, tuple :return: bounding box in the format [min, max] :rtype: tuple
[ "Computes", "the", "minimum", "bounding", "box", "of", "the", "point", "set", "." ]
python
train
OpenGeoVis/espatools
espatools/read.py
https://github.com/OpenGeoVis/espatools/blob/5c04daae0f035c7efcb4096bb85a26c6959ac9ea/espatools/read.py#L82-L134
def GenerateBand(self, band, meta_only=False, cast=False): """Genreate a Band object given band metadata Args: band (dict): dictionary containing metadata for a given band Return: Band : the loaded Band onject""" # Read the band data and add it to dictionary if not meta_only: fname = band.get('file_name') data = self.ReadTif('%s/%s' % (os.path.dirname(self.filename), fname)) # band['data'] = data # TODO: data is not a properties object so do not set yet def FixBitmap(d): p = d.get('bitmap_description') if p: lis = p.get('bit') bm = dict() # Fix bitmap_description from list of dicts to one dict for i in lis: key = i['num'] value = i['text'] bm[key] = value del d['bitmap_description'] d['bitmap_description'] = bm return d band = SetProperties(Band, FixBitmap(self.CleanDict(band))) if not meta_only: if cast: # cast as floats and fill bad values with nans data = data.astype(np.float32) data[data==band.fill_value] = -9999 if band.valid_range is not None: data[data<band.valid_range.min] = -9999 data[data>band.valid_range.max] = -9999 data[data==-9999] = np.nan else: data = np.ma.masked_where(data==band.fill_value, data) if band.valid_range is not None: data = np.ma.masked_where(data<band.valid_range.min, data) data = np.ma.masked_where(data>band.valid_range.max, data) # Flip y axis if requested if self.yflip: data = np.flip(data, 0) band.data = data if not meta_only: band.validate() return band
[ "def", "GenerateBand", "(", "self", ",", "band", ",", "meta_only", "=", "False", ",", "cast", "=", "False", ")", ":", "# Read the band data and add it to dictionary", "if", "not", "meta_only", ":", "fname", "=", "band", ".", "get", "(", "'file_name'", ")", "data", "=", "self", ".", "ReadTif", "(", "'%s/%s'", "%", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "filename", ")", ",", "fname", ")", ")", "# band['data'] = data # TODO: data is not a properties object so do not set yet", "def", "FixBitmap", "(", "d", ")", ":", "p", "=", "d", ".", "get", "(", "'bitmap_description'", ")", "if", "p", ":", "lis", "=", "p", ".", "get", "(", "'bit'", ")", "bm", "=", "dict", "(", ")", "# Fix bitmap_description from list of dicts to one dict", "for", "i", "in", "lis", ":", "key", "=", "i", "[", "'num'", "]", "value", "=", "i", "[", "'text'", "]", "bm", "[", "key", "]", "=", "value", "del", "d", "[", "'bitmap_description'", "]", "d", "[", "'bitmap_description'", "]", "=", "bm", "return", "d", "band", "=", "SetProperties", "(", "Band", ",", "FixBitmap", "(", "self", ".", "CleanDict", "(", "band", ")", ")", ")", "if", "not", "meta_only", ":", "if", "cast", ":", "# cast as floats and fill bad values with nans", "data", "=", "data", ".", "astype", "(", "np", ".", "float32", ")", "data", "[", "data", "==", "band", ".", "fill_value", "]", "=", "-", "9999", "if", "band", ".", "valid_range", "is", "not", "None", ":", "data", "[", "data", "<", "band", ".", "valid_range", ".", "min", "]", "=", "-", "9999", "data", "[", "data", ">", "band", ".", "valid_range", ".", "max", "]", "=", "-", "9999", "data", "[", "data", "==", "-", "9999", "]", "=", "np", ".", "nan", "else", ":", "data", "=", "np", ".", "ma", ".", "masked_where", "(", "data", "==", "band", ".", "fill_value", ",", "data", ")", "if", "band", ".", "valid_range", "is", "not", "None", ":", "data", "=", "np", ".", "ma", ".", "masked_where", "(", "data", "<", "band", ".", "valid_range", ".", "min", ",", "data", ")", "data", "=", "np", ".", "ma", ".", "masked_where", "(", "data", ">", "band", ".", "valid_range", ".", "max", ",", "data", ")", "# Flip y axis if requested", "if", "self", ".", "yflip", ":", "data", "=", "np", ".", "flip", "(", "data", ",", "0", ")", "band", ".", "data", "=", "data", "if", "not", "meta_only", ":", "band", ".", "validate", "(", ")", "return", "band" ]
Genreate a Band object given band metadata Args: band (dict): dictionary containing metadata for a given band Return: Band : the loaded Band onject
[ "Genreate", "a", "Band", "object", "given", "band", "metadata" ]
python
train
joshspeagle/dynesty
dynesty/utils.py
https://github.com/joshspeagle/dynesty/blob/9e482aafeb5cf84bedb896fa6f07a761d917983e/dynesty/utils.py#L704-L846
def unravel_run(res, save_proposals=True, print_progress=True): """ Unravels a run with `K` live points into `K` "strands" (a nested sampling run with only 1 live point). **WARNING: the anciliary quantities provided with each unraveled "strand" are only valid if the point was initialized from the prior.** Parameters ---------- res : :class:`~dynesty.results.Results` instance The :class:`~dynesty.results.Results` instance taken from a previous nested sampling run. save_proposals : bool, optional Whether to save a reference to the proposal distributions from the original run in each unraveled strand. Default is `True`. print_progress : bool, optional Whether to output the current progress to `~sys.stderr`. Default is `True`. Returns ------- new_res : list of :class:`~dynesty.results.Results` instances A list of new :class:`~dynesty.results.Results` instances for each individual strand. """ idxs = res.samples_id # label for each live/dead point # Check if we added in the last set of dead points. added_live = True try: if len(idxs) != (res.niter + res.nlive): added_live = False except: pass # Recreate the nested sampling run for each strand. new_res = [] nstrands = len(np.unique(idxs)) for counter, idx in enumerate(np.unique(idxs)): # Select strand `idx`. strand = (idxs == idx) nsamps = sum(strand) logl = res.logl[strand] # Assign log(volume) to samples. With K=1 live point, the expected # shrinking in `logvol` at each iteration is `-log(2)` (i.e. # shrinking by 1/2). If the final set of live points were added, # the expected value of the final live point is a uniform # sample and so has an expected value of half the volume # of the final dead point. if added_live: niter = nsamps - 1 logvol_dead = -math.log(2) * (1. + np.arange(niter)) if niter > 0: logvol_live = logvol_dead[-1] + math.log(0.5) logvol = np.append(logvol_dead, logvol_live) else: # point always live logvol = np.array([math.log(0.5)]) else: niter = nsamps logvol = -math.log(2) * (1. + np.arange(niter)) # Compute weights using quadratic estimator. h = 0. logz = -1.e300 loglstar = -1.e300 logzvar = 0. logvols_pad = np.concatenate(([0.], logvol)) logdvols = misc.logsumexp(a=np.c_[logvols_pad[:-1], logvols_pad[1:]], axis=1, b=np.c_[np.ones(nsamps), -np.ones(nsamps)]) logdvols += math.log(0.5) dlvs = logvols_pad[:-1] - logvols_pad[1:] saved_logwt, saved_logz, saved_logzvar, saved_h = [], [], [], [] for i in range(nsamps): loglstar_new = logl[i] logdvol, dlv = logdvols[i], dlvs[i] logwt = np.logaddexp(loglstar_new, loglstar) + logdvol logz_new = np.logaddexp(logz, logwt) lzterm = (math.exp(loglstar - logz_new) * loglstar + math.exp(loglstar_new - logz_new) * loglstar_new) h_new = (math.exp(logdvol) * lzterm + math.exp(logz - logz_new) * (h + logz) - logz_new) dh = h_new - h h = h_new logz = logz_new logzvar += dh * dlv loglstar = loglstar_new saved_logwt.append(logwt) saved_logz.append(logz) saved_logzvar.append(logzvar) saved_h.append(h) # Compute sampling efficiency. eff = 100. * nsamps / sum(res.ncall[strand]) # Save results. r = [('nlive', 1), ('niter', niter), ('ncall', res.ncall[strand]), ('eff', eff), ('samples', res.samples[strand]), ('samples_id', res.samples_id[strand]), ('samples_it', res.samples_it[strand]), ('samples_u', res.samples_u[strand]), ('logwt', np.array(saved_logwt)), ('logl', logl), ('logvol', logvol), ('logz', np.array(saved_logz)), ('logzerr', np.sqrt(np.array(saved_logzvar))), ('h', np.array(saved_h))] # Add proposal information (if available). if save_proposals: try: r.append(('prop', res.prop)) r.append(('prop_iter', res.prop_iter[strand])) r.append(('samples_prop', res.samples_prop[strand])) r.append(('scale', res.scale[strand])) except: pass # Add on batch information (if available). try: r.append(('samples_batch', res.samples_batch[strand])) r.append(('batch_bounds', res.batch_bounds)) except: pass # Append to list of strands. new_res.append(Results(r)) # Print progress. if print_progress: sys.stderr.write('\rStrand: {0}/{1} ' .format(counter + 1, nstrands)) return new_res
[ "def", "unravel_run", "(", "res", ",", "save_proposals", "=", "True", ",", "print_progress", "=", "True", ")", ":", "idxs", "=", "res", ".", "samples_id", "# label for each live/dead point", "# Check if we added in the last set of dead points.", "added_live", "=", "True", "try", ":", "if", "len", "(", "idxs", ")", "!=", "(", "res", ".", "niter", "+", "res", ".", "nlive", ")", ":", "added_live", "=", "False", "except", ":", "pass", "# Recreate the nested sampling run for each strand.", "new_res", "=", "[", "]", "nstrands", "=", "len", "(", "np", ".", "unique", "(", "idxs", ")", ")", "for", "counter", ",", "idx", "in", "enumerate", "(", "np", ".", "unique", "(", "idxs", ")", ")", ":", "# Select strand `idx`.", "strand", "=", "(", "idxs", "==", "idx", ")", "nsamps", "=", "sum", "(", "strand", ")", "logl", "=", "res", ".", "logl", "[", "strand", "]", "# Assign log(volume) to samples. With K=1 live point, the expected", "# shrinking in `logvol` at each iteration is `-log(2)` (i.e.", "# shrinking by 1/2). If the final set of live points were added,", "# the expected value of the final live point is a uniform", "# sample and so has an expected value of half the volume", "# of the final dead point.", "if", "added_live", ":", "niter", "=", "nsamps", "-", "1", "logvol_dead", "=", "-", "math", ".", "log", "(", "2", ")", "*", "(", "1.", "+", "np", ".", "arange", "(", "niter", ")", ")", "if", "niter", ">", "0", ":", "logvol_live", "=", "logvol_dead", "[", "-", "1", "]", "+", "math", ".", "log", "(", "0.5", ")", "logvol", "=", "np", ".", "append", "(", "logvol_dead", ",", "logvol_live", ")", "else", ":", "# point always live", "logvol", "=", "np", ".", "array", "(", "[", "math", ".", "log", "(", "0.5", ")", "]", ")", "else", ":", "niter", "=", "nsamps", "logvol", "=", "-", "math", ".", "log", "(", "2", ")", "*", "(", "1.", "+", "np", ".", "arange", "(", "niter", ")", ")", "# Compute weights using quadratic estimator.", "h", "=", "0.", "logz", "=", "-", "1.e300", "loglstar", "=", "-", "1.e300", "logzvar", "=", "0.", "logvols_pad", "=", "np", ".", "concatenate", "(", "(", "[", "0.", "]", ",", "logvol", ")", ")", "logdvols", "=", "misc", ".", "logsumexp", "(", "a", "=", "np", ".", "c_", "[", "logvols_pad", "[", ":", "-", "1", "]", ",", "logvols_pad", "[", "1", ":", "]", "]", ",", "axis", "=", "1", ",", "b", "=", "np", ".", "c_", "[", "np", ".", "ones", "(", "nsamps", ")", ",", "-", "np", ".", "ones", "(", "nsamps", ")", "]", ")", "logdvols", "+=", "math", ".", "log", "(", "0.5", ")", "dlvs", "=", "logvols_pad", "[", ":", "-", "1", "]", "-", "logvols_pad", "[", "1", ":", "]", "saved_logwt", ",", "saved_logz", ",", "saved_logzvar", ",", "saved_h", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "for", "i", "in", "range", "(", "nsamps", ")", ":", "loglstar_new", "=", "logl", "[", "i", "]", "logdvol", ",", "dlv", "=", "logdvols", "[", "i", "]", ",", "dlvs", "[", "i", "]", "logwt", "=", "np", ".", "logaddexp", "(", "loglstar_new", ",", "loglstar", ")", "+", "logdvol", "logz_new", "=", "np", ".", "logaddexp", "(", "logz", ",", "logwt", ")", "lzterm", "=", "(", "math", ".", "exp", "(", "loglstar", "-", "logz_new", ")", "*", "loglstar", "+", "math", ".", "exp", "(", "loglstar_new", "-", "logz_new", ")", "*", "loglstar_new", ")", "h_new", "=", "(", "math", ".", "exp", "(", "logdvol", ")", "*", "lzterm", "+", "math", ".", "exp", "(", "logz", "-", "logz_new", ")", "*", "(", "h", "+", "logz", ")", "-", "logz_new", ")", "dh", "=", "h_new", "-", "h", "h", "=", "h_new", "logz", "=", "logz_new", "logzvar", "+=", "dh", "*", "dlv", "loglstar", "=", "loglstar_new", "saved_logwt", ".", "append", "(", "logwt", ")", "saved_logz", ".", "append", "(", "logz", ")", "saved_logzvar", ".", "append", "(", "logzvar", ")", "saved_h", ".", "append", "(", "h", ")", "# Compute sampling efficiency.", "eff", "=", "100.", "*", "nsamps", "/", "sum", "(", "res", ".", "ncall", "[", "strand", "]", ")", "# Save results.", "r", "=", "[", "(", "'nlive'", ",", "1", ")", ",", "(", "'niter'", ",", "niter", ")", ",", "(", "'ncall'", ",", "res", ".", "ncall", "[", "strand", "]", ")", ",", "(", "'eff'", ",", "eff", ")", ",", "(", "'samples'", ",", "res", ".", "samples", "[", "strand", "]", ")", ",", "(", "'samples_id'", ",", "res", ".", "samples_id", "[", "strand", "]", ")", ",", "(", "'samples_it'", ",", "res", ".", "samples_it", "[", "strand", "]", ")", ",", "(", "'samples_u'", ",", "res", ".", "samples_u", "[", "strand", "]", ")", ",", "(", "'logwt'", ",", "np", ".", "array", "(", "saved_logwt", ")", ")", ",", "(", "'logl'", ",", "logl", ")", ",", "(", "'logvol'", ",", "logvol", ")", ",", "(", "'logz'", ",", "np", ".", "array", "(", "saved_logz", ")", ")", ",", "(", "'logzerr'", ",", "np", ".", "sqrt", "(", "np", ".", "array", "(", "saved_logzvar", ")", ")", ")", ",", "(", "'h'", ",", "np", ".", "array", "(", "saved_h", ")", ")", "]", "# Add proposal information (if available).", "if", "save_proposals", ":", "try", ":", "r", ".", "append", "(", "(", "'prop'", ",", "res", ".", "prop", ")", ")", "r", ".", "append", "(", "(", "'prop_iter'", ",", "res", ".", "prop_iter", "[", "strand", "]", ")", ")", "r", ".", "append", "(", "(", "'samples_prop'", ",", "res", ".", "samples_prop", "[", "strand", "]", ")", ")", "r", ".", "append", "(", "(", "'scale'", ",", "res", ".", "scale", "[", "strand", "]", ")", ")", "except", ":", "pass", "# Add on batch information (if available).", "try", ":", "r", ".", "append", "(", "(", "'samples_batch'", ",", "res", ".", "samples_batch", "[", "strand", "]", ")", ")", "r", ".", "append", "(", "(", "'batch_bounds'", ",", "res", ".", "batch_bounds", ")", ")", "except", ":", "pass", "# Append to list of strands.", "new_res", ".", "append", "(", "Results", "(", "r", ")", ")", "# Print progress.", "if", "print_progress", ":", "sys", ".", "stderr", ".", "write", "(", "'\\rStrand: {0}/{1} '", ".", "format", "(", "counter", "+", "1", ",", "nstrands", ")", ")", "return", "new_res" ]
Unravels a run with `K` live points into `K` "strands" (a nested sampling run with only 1 live point). **WARNING: the anciliary quantities provided with each unraveled "strand" are only valid if the point was initialized from the prior.** Parameters ---------- res : :class:`~dynesty.results.Results` instance The :class:`~dynesty.results.Results` instance taken from a previous nested sampling run. save_proposals : bool, optional Whether to save a reference to the proposal distributions from the original run in each unraveled strand. Default is `True`. print_progress : bool, optional Whether to output the current progress to `~sys.stderr`. Default is `True`. Returns ------- new_res : list of :class:`~dynesty.results.Results` instances A list of new :class:`~dynesty.results.Results` instances for each individual strand.
[ "Unravels", "a", "run", "with", "K", "live", "points", "into", "K", "strands", "(", "a", "nested", "sampling", "run", "with", "only", "1", "live", "point", ")", ".", "**", "WARNING", ":", "the", "anciliary", "quantities", "provided", "with", "each", "unraveled", "strand", "are", "only", "valid", "if", "the", "point", "was", "initialized", "from", "the", "prior", ".", "**" ]
python
train
bmweiner/skillful
skillful/interface.py
https://github.com/bmweiner/skillful/blob/8646f54faf62cb63f165f7699b8ace5b4a08233c/skillful/interface.py#L399-L419
def set_card_standard(self, title, text, smallImageUrl=None, largeImageUrl=None): """Set response card as standard type. title, text, and image cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. text: str. Content of Standard type card. smallImageUrl: str. URL of small image. Cannot exceed 2,000 characters. Recommended pixel size: 720w x 480h. largeImageUrl: str. URL of large image. Cannot exceed 2,000 characters. Recommended pixel size: 1200w x 800h. """ self.response.card.type = 'Standard' self.response.card.title = title self.response.card.text = text if smallImageUrl: self.response.card.image.smallImageUrl = smallImageUrl if largeImageUrl: self.response.card.image.largeImageUrl = largeImageUrl
[ "def", "set_card_standard", "(", "self", ",", "title", ",", "text", ",", "smallImageUrl", "=", "None", ",", "largeImageUrl", "=", "None", ")", ":", "self", ".", "response", ".", "card", ".", "type", "=", "'Standard'", "self", ".", "response", ".", "card", ".", "title", "=", "title", "self", ".", "response", ".", "card", ".", "text", "=", "text", "if", "smallImageUrl", ":", "self", ".", "response", ".", "card", ".", "image", ".", "smallImageUrl", "=", "smallImageUrl", "if", "largeImageUrl", ":", "self", ".", "response", ".", "card", ".", "image", ".", "largeImageUrl", "=", "largeImageUrl" ]
Set response card as standard type. title, text, and image cannot exceed 8,000 characters. Args: title: str. Title of Simple or Standard type card. text: str. Content of Standard type card. smallImageUrl: str. URL of small image. Cannot exceed 2,000 characters. Recommended pixel size: 720w x 480h. largeImageUrl: str. URL of large image. Cannot exceed 2,000 characters. Recommended pixel size: 1200w x 800h.
[ "Set", "response", "card", "as", "standard", "type", "." ]
python
train
MechanicalSoup/MechanicalSoup
mechanicalsoup/form.py
https://github.com/MechanicalSoup/MechanicalSoup/blob/027a270febf5bcda6a75db60ea9838d631370f4b/mechanicalsoup/form.py#L190-L231
def set_select(self, data): """Set the *selected*-attribute of the first option element specified by ``data`` (i.e. select an option from a dropdown). :param data: Dict of ``{name: value, ...}``. Find the select element whose *name*-attribute is ``name``. Then select from among its children the option element whose *value*-attribute is ``value``. If no matching *value*-attribute is found, this will search for an option whose text matches ``value``. If the select element's *multiple*-attribute is set, then ``value`` can be a list or tuple to select multiple options. """ for (name, value) in data.items(): select = self.form.find("select", {"name": name}) if not select: raise InvalidFormMethod("No select named " + name) # Deselect all options first for option in select.find_all("option"): if "selected" in option.attrs: del option.attrs["selected"] # Wrap individual values in a 1-element tuple. # If value is a list/tuple, select must be a <select multiple>. if not isinstance(value, list) and not isinstance(value, tuple): value = (value,) elif "multiple" not in select.attrs: raise LinkNotFoundError("Cannot select multiple options!") for choice in value: option = select.find("option", {"value": choice}) # try to find with text instead of value if not option: option = select.find("option", string=choice) if not option: raise LinkNotFoundError( 'Option %s not found for select %s' % (choice, name) ) option.attrs["selected"] = "selected"
[ "def", "set_select", "(", "self", ",", "data", ")", ":", "for", "(", "name", ",", "value", ")", "in", "data", ".", "items", "(", ")", ":", "select", "=", "self", ".", "form", ".", "find", "(", "\"select\"", ",", "{", "\"name\"", ":", "name", "}", ")", "if", "not", "select", ":", "raise", "InvalidFormMethod", "(", "\"No select named \"", "+", "name", ")", "# Deselect all options first", "for", "option", "in", "select", ".", "find_all", "(", "\"option\"", ")", ":", "if", "\"selected\"", "in", "option", ".", "attrs", ":", "del", "option", ".", "attrs", "[", "\"selected\"", "]", "# Wrap individual values in a 1-element tuple.", "# If value is a list/tuple, select must be a <select multiple>.", "if", "not", "isinstance", "(", "value", ",", "list", ")", "and", "not", "isinstance", "(", "value", ",", "tuple", ")", ":", "value", "=", "(", "value", ",", ")", "elif", "\"multiple\"", "not", "in", "select", ".", "attrs", ":", "raise", "LinkNotFoundError", "(", "\"Cannot select multiple options!\"", ")", "for", "choice", "in", "value", ":", "option", "=", "select", ".", "find", "(", "\"option\"", ",", "{", "\"value\"", ":", "choice", "}", ")", "# try to find with text instead of value", "if", "not", "option", ":", "option", "=", "select", ".", "find", "(", "\"option\"", ",", "string", "=", "choice", ")", "if", "not", "option", ":", "raise", "LinkNotFoundError", "(", "'Option %s not found for select %s'", "%", "(", "choice", ",", "name", ")", ")", "option", ".", "attrs", "[", "\"selected\"", "]", "=", "\"selected\"" ]
Set the *selected*-attribute of the first option element specified by ``data`` (i.e. select an option from a dropdown). :param data: Dict of ``{name: value, ...}``. Find the select element whose *name*-attribute is ``name``. Then select from among its children the option element whose *value*-attribute is ``value``. If no matching *value*-attribute is found, this will search for an option whose text matches ``value``. If the select element's *multiple*-attribute is set, then ``value`` can be a list or tuple to select multiple options.
[ "Set", "the", "*", "selected", "*", "-", "attribute", "of", "the", "first", "option", "element", "specified", "by", "data", "(", "i", ".", "e", ".", "select", "an", "option", "from", "a", "dropdown", ")", "." ]
python
train
Visgean/urljects
urljects/routemap.py
https://github.com/Visgean/urljects/blob/29a3ca03f639ea7a9ee2f795ed17941c86b278ba/urljects/routemap.py#L43-L58
def include(self, location, namespace=None, app_name=None): """ Return an object suitable for url_patterns. :param location: root URL for all URLs from this router :param namespace: passed to url() :param app_name: passed to url() """ sorted_entries = sorted(self.routes, key=operator.itemgetter(0), reverse=True) arg = [u for _, u in sorted_entries] return url(location, urls.include( arg=arg, namespace=namespace, app_name=app_name))
[ "def", "include", "(", "self", ",", "location", ",", "namespace", "=", "None", ",", "app_name", "=", "None", ")", ":", "sorted_entries", "=", "sorted", "(", "self", ".", "routes", ",", "key", "=", "operator", ".", "itemgetter", "(", "0", ")", ",", "reverse", "=", "True", ")", "arg", "=", "[", "u", "for", "_", ",", "u", "in", "sorted_entries", "]", "return", "url", "(", "location", ",", "urls", ".", "include", "(", "arg", "=", "arg", ",", "namespace", "=", "namespace", ",", "app_name", "=", "app_name", ")", ")" ]
Return an object suitable for url_patterns. :param location: root URL for all URLs from this router :param namespace: passed to url() :param app_name: passed to url()
[ "Return", "an", "object", "suitable", "for", "url_patterns", "." ]
python
train
biocore/burrito-fillings
bfillings/mothur.py
https://github.com/biocore/burrito-fillings/blob/02ab71a46119b40793bd56a4ae00ca15f6dc3329/bfillings/mothur.py#L348-L366
def _input_as_lines(self, data): """Write sequence of lines to temp file, return filename data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: '\n' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file """ self._input_filename = self.getTmpFilename( self.WorkingDir, suffix='.fasta') with open(self._input_filename, 'w') as f: # Use lazy iteration instead of list comprehension to # prevent reading entire file into memory for line in data: f.write(str(line).strip('\n')) f.write('\n') return self._input_filename
[ "def", "_input_as_lines", "(", "self", ",", "data", ")", ":", "self", ".", "_input_filename", "=", "self", ".", "getTmpFilename", "(", "self", ".", "WorkingDir", ",", "suffix", "=", "'.fasta'", ")", "with", "open", "(", "self", ".", "_input_filename", ",", "'w'", ")", "as", "f", ":", "# Use lazy iteration instead of list comprehension to", "# prevent reading entire file into memory", "for", "line", "in", "data", ":", "f", ".", "write", "(", "str", "(", "line", ")", ".", "strip", "(", "'\\n'", ")", ")", "f", ".", "write", "(", "'\\n'", ")", "return", "self", ".", "_input_filename" ]
Write sequence of lines to temp file, return filename data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: '\n' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file
[ "Write", "sequence", "of", "lines", "to", "temp", "file", "return", "filename" ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10053-L10075
def local_position_ned_cov_send(self, time_boot_ms, time_utc, estimator_type, x, y, z, vx, vy, vz, ax, ay, az, covariance, force_mavlink1=False): ''' The filtered local position (e.g. fused computer vision and accelerometers). Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) time_boot_ms : Timestamp (milliseconds since system boot). 0 for system without monotonic timestamp (uint32_t) time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t) estimator_type : Class id of the estimator this estimate originated from. (uint8_t) x : X Position (float) y : Y Position (float) z : Z Position (float) vx : X Speed (m/s) (float) vy : Y Speed (m/s) (float) vz : Z Speed (m/s) (float) ax : X Acceleration (m/s^2) (float) ay : Y Acceleration (m/s^2) (float) az : Z Acceleration (m/s^2) (float) covariance : Covariance matrix upper right triangular (first nine entries are the first ROW, next eight entries are the second row, etc.) (float) ''' return self.send(self.local_position_ned_cov_encode(time_boot_ms, time_utc, estimator_type, x, y, z, vx, vy, vz, ax, ay, az, covariance), force_mavlink1=force_mavlink1)
[ "def", "local_position_ned_cov_send", "(", "self", ",", "time_boot_ms", ",", "time_utc", ",", "estimator_type", ",", "x", ",", "y", ",", "z", ",", "vx", ",", "vy", ",", "vz", ",", "ax", ",", "ay", ",", "az", ",", "covariance", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "local_position_ned_cov_encode", "(", "time_boot_ms", ",", "time_utc", ",", "estimator_type", ",", "x", ",", "y", ",", "z", ",", "vx", ",", "vy", ",", "vz", ",", "ax", ",", "ay", ",", "az", ",", "covariance", ")", ",", "force_mavlink1", "=", "force_mavlink1", ")" ]
The filtered local position (e.g. fused computer vision and accelerometers). Coordinate frame is right-handed, Z-axis down (aeronautical frame, NED / north-east-down convention) time_boot_ms : Timestamp (milliseconds since system boot). 0 for system without monotonic timestamp (uint32_t) time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t) estimator_type : Class id of the estimator this estimate originated from. (uint8_t) x : X Position (float) y : Y Position (float) z : Z Position (float) vx : X Speed (m/s) (float) vy : Y Speed (m/s) (float) vz : Z Speed (m/s) (float) ax : X Acceleration (m/s^2) (float) ay : Y Acceleration (m/s^2) (float) az : Z Acceleration (m/s^2) (float) covariance : Covariance matrix upper right triangular (first nine entries are the first ROW, next eight entries are the second row, etc.) (float)
[ "The", "filtered", "local", "position", "(", "e", ".", "g", ".", "fused", "computer", "vision", "and", "accelerometers", ")", ".", "Coordinate", "frame", "is", "right", "-", "handed", "Z", "-", "axis", "down", "(", "aeronautical", "frame", "NED", "/", "north", "-", "east", "-", "down", "convention", ")" ]
python
train
square/pylink
pylink/jlink.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/pylink/jlink.py#L110-L142
def open_required(func): """Decorator to specify that the J-Link DLL must be opened, and a J-Link connection must be established. Args: func (function): function being decorated Returns: The wrapper function. """ @functools.wraps(func) def wrapper(self, *args, **kwargs): """Wrapper function to check that the given ``JLink`` has been opened. Args: self (JLink): the ``JLink`` instance args: list of arguments to pass to the wrapped function kwargs: key-word arguments dict to pass to the wrapped function Returns: The return value of the wrapped function. Raises: JLinkException: if the J-Link DLL is not open or the J-Link is disconnected. """ if not self.opened(): raise errors.JLinkException('J-Link DLL is not open.') elif not self.connected(): raise errors.JLinkException('J-Link connection has been lost.') return func(self, *args, **kwargs) return wrapper
[ "def", "open_required", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Wrapper function to check that the given ``JLink`` has been\n opened.\n\n Args:\n self (JLink): the ``JLink`` instance\n args: list of arguments to pass to the wrapped function\n kwargs: key-word arguments dict to pass to the wrapped function\n\n Returns:\n The return value of the wrapped function.\n\n Raises:\n JLinkException: if the J-Link DLL is not open or the J-Link is\n disconnected.\n \"\"\"", "if", "not", "self", ".", "opened", "(", ")", ":", "raise", "errors", ".", "JLinkException", "(", "'J-Link DLL is not open.'", ")", "elif", "not", "self", ".", "connected", "(", ")", ":", "raise", "errors", ".", "JLinkException", "(", "'J-Link connection has been lost.'", ")", "return", "func", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapper" ]
Decorator to specify that the J-Link DLL must be opened, and a J-Link connection must be established. Args: func (function): function being decorated Returns: The wrapper function.
[ "Decorator", "to", "specify", "that", "the", "J", "-", "Link", "DLL", "must", "be", "opened", "and", "a", "J", "-", "Link", "connection", "must", "be", "established", "." ]
python
train
totalgood/nlpia
src/nlpia/anki.py
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L33-L46
def get_anki_phrases_english(limit=None): """ Return all the English phrases in the Anki translation flashcards >>> len(get_anki_phrases_english(limit=100)) > 700 True """ texts = set() for lang in ANKI_LANGUAGES: df = get_data(lang) phrases = df.eng.str.strip().values texts = texts.union(set(phrases)) if limit and len(texts) >= limit: break return sorted(texts)
[ "def", "get_anki_phrases_english", "(", "limit", "=", "None", ")", ":", "texts", "=", "set", "(", ")", "for", "lang", "in", "ANKI_LANGUAGES", ":", "df", "=", "get_data", "(", "lang", ")", "phrases", "=", "df", ".", "eng", ".", "str", ".", "strip", "(", ")", ".", "values", "texts", "=", "texts", ".", "union", "(", "set", "(", "phrases", ")", ")", "if", "limit", "and", "len", "(", "texts", ")", ">=", "limit", ":", "break", "return", "sorted", "(", "texts", ")" ]
Return all the English phrases in the Anki translation flashcards >>> len(get_anki_phrases_english(limit=100)) > 700 True
[ "Return", "all", "the", "English", "phrases", "in", "the", "Anki", "translation", "flashcards" ]
python
train
scikit-hep/probfit
probfit/oneshot.py
https://github.com/scikit-hep/probfit/blob/de3593798ea3877dd2785062bed6877dd9058a02/probfit/oneshot.py#L11-L30
def fit_uml(f, data, quiet=False, print_level=0, *arg, **kwd): """ perform unbinned likelihood fit :param f: pdf :param data: data :param quiet: if not quite draw latest fit on fail fit :param printlevel: minuit printlevel :return: """ uml = UnbinnedLH(f, data) minuit = Minuit(uml, print_level=print_level, **kwd) minuit.set_strategy(2) minuit.migrad() if not minuit.migrad_ok() or not minuit.matrix_accurate(): if not quiet: from matplotlib import pyplot as plt plt.figure() uml.show() print(minuit.values) return (uml, minuit)
[ "def", "fit_uml", "(", "f", ",", "data", ",", "quiet", "=", "False", ",", "print_level", "=", "0", ",", "*", "arg", ",", "*", "*", "kwd", ")", ":", "uml", "=", "UnbinnedLH", "(", "f", ",", "data", ")", "minuit", "=", "Minuit", "(", "uml", ",", "print_level", "=", "print_level", ",", "*", "*", "kwd", ")", "minuit", ".", "set_strategy", "(", "2", ")", "minuit", ".", "migrad", "(", ")", "if", "not", "minuit", ".", "migrad_ok", "(", ")", "or", "not", "minuit", ".", "matrix_accurate", "(", ")", ":", "if", "not", "quiet", ":", "from", "matplotlib", "import", "pyplot", "as", "plt", "plt", ".", "figure", "(", ")", "uml", ".", "show", "(", ")", "print", "(", "minuit", ".", "values", ")", "return", "(", "uml", ",", "minuit", ")" ]
perform unbinned likelihood fit :param f: pdf :param data: data :param quiet: if not quite draw latest fit on fail fit :param printlevel: minuit printlevel :return:
[ "perform", "unbinned", "likelihood", "fit", ":", "param", "f", ":", "pdf", ":", "param", "data", ":", "data", ":", "param", "quiet", ":", "if", "not", "quite", "draw", "latest", "fit", "on", "fail", "fit", ":", "param", "printlevel", ":", "minuit", "printlevel", ":", "return", ":" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L5573-L5592
def getmsg(option, lenout=_default_len_out): """ Retrieve the current short error message, the explanation of the short error message, or the long error message. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/getmsg_c.html :param option: Indicates type of error message. :type option: str :param lenout: Available space in the output string msg. :type lenout: int :return: The error message to be retrieved. :rtype: str """ option = stypes.stringToCharP(option) lenout = ctypes.c_int(lenout) msg = stypes.stringToCharP(lenout) libspice.getmsg_c(option, lenout, msg) return stypes.toPythonString(msg)
[ "def", "getmsg", "(", "option", ",", "lenout", "=", "_default_len_out", ")", ":", "option", "=", "stypes", ".", "stringToCharP", "(", "option", ")", "lenout", "=", "ctypes", ".", "c_int", "(", "lenout", ")", "msg", "=", "stypes", ".", "stringToCharP", "(", "lenout", ")", "libspice", ".", "getmsg_c", "(", "option", ",", "lenout", ",", "msg", ")", "return", "stypes", ".", "toPythonString", "(", "msg", ")" ]
Retrieve the current short error message, the explanation of the short error message, or the long error message. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/getmsg_c.html :param option: Indicates type of error message. :type option: str :param lenout: Available space in the output string msg. :type lenout: int :return: The error message to be retrieved. :rtype: str
[ "Retrieve", "the", "current", "short", "error", "message", "the", "explanation", "of", "the", "short", "error", "message", "or", "the", "long", "error", "message", "." ]
python
train
davidfokkema/artist
artist/utils.py
https://github.com/davidfokkema/artist/blob/26ae7987522622710f2910980770c50012fda47d/artist/utils.py#L86-L98
def save_data(data, suffix='', dirname=None): """Save a dataset using caller's name. :param data: a list or numpy array containing the data :param suffix: optional suffix to add to name :param dirname: optional directory name """ if type(data) == list: data = np.array(data).T name = create_graph_name(suffix, dirname) + '.txt' np.savetxt(name, data)
[ "def", "save_data", "(", "data", ",", "suffix", "=", "''", ",", "dirname", "=", "None", ")", ":", "if", "type", "(", "data", ")", "==", "list", ":", "data", "=", "np", ".", "array", "(", "data", ")", ".", "T", "name", "=", "create_graph_name", "(", "suffix", ",", "dirname", ")", "+", "'.txt'", "np", ".", "savetxt", "(", "name", ",", "data", ")" ]
Save a dataset using caller's name. :param data: a list or numpy array containing the data :param suffix: optional suffix to add to name :param dirname: optional directory name
[ "Save", "a", "dataset", "using", "caller", "s", "name", "." ]
python
train
zetaops/pyoko
pyoko/manage.py
https://github.com/zetaops/pyoko/blob/236c509ad85640933ac0f89ad8f7ed95f62adf07/pyoko/manage.py#L934-L947
def _format_links_fields(self, links): """ Format the fields containing links into 4-tuples printable by _print_fields(). """ fields = list() for link in links: linked_model = link['mdl'](super_context) null = self._marker_true if link['null'] is True else self._marker_false # In LinkProxy, if reverse_name is empty then only reverse has the name # of the field on the link_source side field_name = link['field'] or link['reverse'] fields.append((self._field_prefix, field_name, '%s()' % linked_model.title, null)) fields.sort(key=lambda f: f[1]) return fields
[ "def", "_format_links_fields", "(", "self", ",", "links", ")", ":", "fields", "=", "list", "(", ")", "for", "link", "in", "links", ":", "linked_model", "=", "link", "[", "'mdl'", "]", "(", "super_context", ")", "null", "=", "self", ".", "_marker_true", "if", "link", "[", "'null'", "]", "is", "True", "else", "self", ".", "_marker_false", "# In LinkProxy, if reverse_name is empty then only reverse has the name", "# of the field on the link_source side", "field_name", "=", "link", "[", "'field'", "]", "or", "link", "[", "'reverse'", "]", "fields", ".", "append", "(", "(", "self", ".", "_field_prefix", ",", "field_name", ",", "'%s()'", "%", "linked_model", ".", "title", ",", "null", ")", ")", "fields", ".", "sort", "(", "key", "=", "lambda", "f", ":", "f", "[", "1", "]", ")", "return", "fields" ]
Format the fields containing links into 4-tuples printable by _print_fields().
[ "Format", "the", "fields", "containing", "links", "into", "4", "-", "tuples", "printable", "by", "_print_fields", "()", "." ]
python
train
saltstack/salt
salt/modules/bcache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/bcache.py#L128-L155
def detach(dev=None): ''' Detach a backing device(s) from a cache set If no dev is given, all backing devices will be attached. Detaching a backing device will flush it's write cache. This should leave the underlying device in a consistent state, but might take a while. CLI example: .. code-block:: bash salt '*' bcache.detach sdc salt '*' bcache.detach bcache1 ''' if dev is None: res = {} for dev, data in status(alldevs=True).items(): if 'cache' in data: res[dev] = detach(dev) return res if res else None log.debug('Detaching %s', dev) if not _bcsys(dev, 'detach', 'goaway', 'error', 'Error detaching {0}'.format(dev)): return False return _wait(lambda: uuid(dev) is False, 'error', '{0} received detach, but did not comply'.format(dev), 300)
[ "def", "detach", "(", "dev", "=", "None", ")", ":", "if", "dev", "is", "None", ":", "res", "=", "{", "}", "for", "dev", ",", "data", "in", "status", "(", "alldevs", "=", "True", ")", ".", "items", "(", ")", ":", "if", "'cache'", "in", "data", ":", "res", "[", "dev", "]", "=", "detach", "(", "dev", ")", "return", "res", "if", "res", "else", "None", "log", ".", "debug", "(", "'Detaching %s'", ",", "dev", ")", "if", "not", "_bcsys", "(", "dev", ",", "'detach'", ",", "'goaway'", ",", "'error'", ",", "'Error detaching {0}'", ".", "format", "(", "dev", ")", ")", ":", "return", "False", "return", "_wait", "(", "lambda", ":", "uuid", "(", "dev", ")", "is", "False", ",", "'error'", ",", "'{0} received detach, but did not comply'", ".", "format", "(", "dev", ")", ",", "300", ")" ]
Detach a backing device(s) from a cache set If no dev is given, all backing devices will be attached. Detaching a backing device will flush it's write cache. This should leave the underlying device in a consistent state, but might take a while. CLI example: .. code-block:: bash salt '*' bcache.detach sdc salt '*' bcache.detach bcache1
[ "Detach", "a", "backing", "device", "(", "s", ")", "from", "a", "cache", "set", "If", "no", "dev", "is", "given", "all", "backing", "devices", "will", "be", "attached", "." ]
python
train
hatemile/hatemile-for-python
hatemile/implementation/navig.py
https://github.com/hatemile/hatemile-for-python/blob/1e914f9aa09f6f8d78282af131311546ecba9fb8/hatemile/implementation/navig.py#L291-L314
def _is_valid_heading(self): """ Check that the headings of page are sintatic correct. :return: True if the headings of page are sintatic correct or False if not. :rtype: bool """ elements = self.parser.find('h1,h2,h3,h4,h5,h6').list_results() last_level = 0 count_main_heading = 0 self.validate_heading = True for element in elements: level = self._get_heading_level(element) if level == 1: if count_main_heading == 1: return False else: count_main_heading = 1 if (level - last_level) > 1: return False last_level = level return True
[ "def", "_is_valid_heading", "(", "self", ")", ":", "elements", "=", "self", ".", "parser", ".", "find", "(", "'h1,h2,h3,h4,h5,h6'", ")", ".", "list_results", "(", ")", "last_level", "=", "0", "count_main_heading", "=", "0", "self", ".", "validate_heading", "=", "True", "for", "element", "in", "elements", ":", "level", "=", "self", ".", "_get_heading_level", "(", "element", ")", "if", "level", "==", "1", ":", "if", "count_main_heading", "==", "1", ":", "return", "False", "else", ":", "count_main_heading", "=", "1", "if", "(", "level", "-", "last_level", ")", ">", "1", ":", "return", "False", "last_level", "=", "level", "return", "True" ]
Check that the headings of page are sintatic correct. :return: True if the headings of page are sintatic correct or False if not. :rtype: bool
[ "Check", "that", "the", "headings", "of", "page", "are", "sintatic", "correct", "." ]
python
train
ishikota/PyPokerEngine
pypokerengine/players.py
https://github.com/ishikota/PyPokerEngine/blob/a52a048a15da276005eca4acae96fb6eeb4dc034/pypokerengine/players.py#L45-L48
def respond_to_ask(self, message): """Called from Dealer when ask message received from RoundManager""" valid_actions, hole_card, round_state = self.__parse_ask_message(message) return self.declare_action(valid_actions, hole_card, round_state)
[ "def", "respond_to_ask", "(", "self", ",", "message", ")", ":", "valid_actions", ",", "hole_card", ",", "round_state", "=", "self", ".", "__parse_ask_message", "(", "message", ")", "return", "self", ".", "declare_action", "(", "valid_actions", ",", "hole_card", ",", "round_state", ")" ]
Called from Dealer when ask message received from RoundManager
[ "Called", "from", "Dealer", "when", "ask", "message", "received", "from", "RoundManager" ]
python
train
jterrace/pyssim
ssim/utils.py
https://github.com/jterrace/pyssim/blob/ff9bd90c3eb7525013ad46babf66b7cc78391e89/ssim/utils.py#L31-L47
def to_grayscale(img): """Convert PIL image to numpy grayscale array and numpy alpha array. Args: img (PIL.Image): PIL Image object. Returns: (gray, alpha): both numpy arrays. """ gray = numpy.asarray(ImageOps.grayscale(img)).astype(numpy.float) imbands = img.getbands() alpha = None if 'A' in imbands: alpha = numpy.asarray(img.split()[-1]).astype(numpy.float) return gray, alpha
[ "def", "to_grayscale", "(", "img", ")", ":", "gray", "=", "numpy", ".", "asarray", "(", "ImageOps", ".", "grayscale", "(", "img", ")", ")", ".", "astype", "(", "numpy", ".", "float", ")", "imbands", "=", "img", ".", "getbands", "(", ")", "alpha", "=", "None", "if", "'A'", "in", "imbands", ":", "alpha", "=", "numpy", ".", "asarray", "(", "img", ".", "split", "(", ")", "[", "-", "1", "]", ")", ".", "astype", "(", "numpy", ".", "float", ")", "return", "gray", ",", "alpha" ]
Convert PIL image to numpy grayscale array and numpy alpha array. Args: img (PIL.Image): PIL Image object. Returns: (gray, alpha): both numpy arrays.
[ "Convert", "PIL", "image", "to", "numpy", "grayscale", "array", "and", "numpy", "alpha", "array", "." ]
python
test
StagPython/StagPy
stagpy/stagyyparsers.py
https://github.com/StagPython/StagPy/blob/18c4416cc4a1011db2fd736ee8b0ec29aa6e4fd4/stagpy/stagyyparsers.py#L259-L386
def fields(fieldfile, only_header=False, only_istep=False): """Extract fields data. Args: fieldfile (:class:`pathlib.Path`): path of the binary field file. only_header (bool): when True (and :data:`only_istep` is False), only :data:`header` is returned. only_istep (bool): when True, only :data:`istep` is returned. Returns: depends on flags.: :obj:`int`: istep If :data:`only_istep` is True, this function returns the time step at which the binary file was written. :obj:`dict`: header Else, if :data:`only_header` is True, this function returns a dict containing the header informations of the binary file. :class:`numpy.array`: fields Else, this function returns the tuple :data:`(header, fields)`. :data:`fields` is an array of scalar fields indexed by variable, x-direction, y-direction, z-direction, block. """ # something to skip header? if not fieldfile.is_file(): return None header = {} with fieldfile.open('rb') as fid: readbin = partial(_readbin, fid) magic = readbin() if magic > 8000: # 64 bits magic -= 8000 readbin() # need to read 4 more bytes readbin = partial(readbin, file64=True) # check nb components nval = 1 if magic > 400: nval = 4 elif magic > 300: nval = 3 magic %= 100 # extra ghost point in horizontal direction header['xyp'] = int(magic >= 9 and nval == 4) # total number of values in relevant space basis # (e1, e2, e3) = (theta, phi, radius) in spherical geometry # = (x, y, z) in cartesian geometry header['nts'] = readbin(nwords=3) # number of blocks, 2 for yinyang or cubed sphere header['ntb'] = readbin() if magic >= 7 else 1 # aspect ratio header['aspect'] = readbin('f', 2) # number of parallel subdomains header['ncs'] = readbin(nwords=3) # (e1, e2, e3) space header['ncb'] = readbin() if magic >= 8 else 1 # blocks # r - coordinates # rgeom[0:self.nrtot+1, 0] are edge radial position # rgeom[0:self.nrtot, 1] are cell-center radial position if magic >= 2: header['rgeom'] = readbin('f', header['nts'][2] * 2 + 1) else: header['rgeom'] = np.array(range(0, header['nts'][2] * 2 + 1))\ * 0.5 / header['nts'][2] header['rgeom'] = np.resize(header['rgeom'], (header['nts'][2] + 1, 2)) header['rcmb'] = readbin('f') if magic >= 7 else None header['ti_step'] = readbin() if magic >= 3 else 0 if only_istep: return header['ti_step'] header['ti_ad'] = readbin('f') if magic >= 3 else 0 header['erupta_total'] = readbin('f') if magic >= 5 else 0 header['bot_temp'] = readbin('f') if magic >= 6 else 1 if magic >= 4: header['e1_coord'] = readbin('f', header['nts'][0]) header['e2_coord'] = readbin('f', header['nts'][1]) header['e3_coord'] = readbin('f', header['nts'][2]) else: # could construct them from other info raise ParsingError(fieldfile, 'magic >= 4 expected to get grid geometry') if only_header: return header # READ FIELDS # number of points in (e1, e2, e3) directions PER CPU npc = header['nts'] // header['ncs'] # number of blocks per cpu nbk = header['ntb'] // header['ncb'] # number of values per 'read' block npi = (npc[0] + header['xyp']) * (npc[1] + header['xyp']) * npc[2] * \ nbk * nval header['scalefac'] = readbin('f') if nval > 1 else 1 flds = np.zeros((nval, header['nts'][0] + header['xyp'], header['nts'][1] + header['xyp'], header['nts'][2], header['ntb'])) # loop over parallel subdomains for icpu in product(range(header['ncb']), range(header['ncs'][2]), range(header['ncs'][1]), range(header['ncs'][0])): # read the data for one CPU data_cpu = readbin('f', npi) * header['scalefac'] # icpu is (icpu block, icpu z, icpu y, icpu x) # data from file is transposed to obtained a field # array indexed with (x, y, z, block), as in StagYY flds[:, icpu[3] * npc[0]:(icpu[3] + 1) * npc[0] + header['xyp'], # x icpu[2] * npc[1]:(icpu[2] + 1) * npc[1] + header['xyp'], # y icpu[1] * npc[2]:(icpu[1] + 1) * npc[2], # z icpu[0] * nbk:(icpu[0] + 1) * nbk # block ] = np.transpose(data_cpu.reshape( (nbk, npc[2], npc[1] + header['xyp'], npc[0] + header['xyp'], nval))) return header, flds
[ "def", "fields", "(", "fieldfile", ",", "only_header", "=", "False", ",", "only_istep", "=", "False", ")", ":", "# something to skip header?", "if", "not", "fieldfile", ".", "is_file", "(", ")", ":", "return", "None", "header", "=", "{", "}", "with", "fieldfile", ".", "open", "(", "'rb'", ")", "as", "fid", ":", "readbin", "=", "partial", "(", "_readbin", ",", "fid", ")", "magic", "=", "readbin", "(", ")", "if", "magic", ">", "8000", ":", "# 64 bits", "magic", "-=", "8000", "readbin", "(", ")", "# need to read 4 more bytes", "readbin", "=", "partial", "(", "readbin", ",", "file64", "=", "True", ")", "# check nb components", "nval", "=", "1", "if", "magic", ">", "400", ":", "nval", "=", "4", "elif", "magic", ">", "300", ":", "nval", "=", "3", "magic", "%=", "100", "# extra ghost point in horizontal direction", "header", "[", "'xyp'", "]", "=", "int", "(", "magic", ">=", "9", "and", "nval", "==", "4", ")", "# total number of values in relevant space basis", "# (e1, e2, e3) = (theta, phi, radius) in spherical geometry", "# = (x, y, z) in cartesian geometry", "header", "[", "'nts'", "]", "=", "readbin", "(", "nwords", "=", "3", ")", "# number of blocks, 2 for yinyang or cubed sphere", "header", "[", "'ntb'", "]", "=", "readbin", "(", ")", "if", "magic", ">=", "7", "else", "1", "# aspect ratio", "header", "[", "'aspect'", "]", "=", "readbin", "(", "'f'", ",", "2", ")", "# number of parallel subdomains", "header", "[", "'ncs'", "]", "=", "readbin", "(", "nwords", "=", "3", ")", "# (e1, e2, e3) space", "header", "[", "'ncb'", "]", "=", "readbin", "(", ")", "if", "magic", ">=", "8", "else", "1", "# blocks", "# r - coordinates", "# rgeom[0:self.nrtot+1, 0] are edge radial position", "# rgeom[0:self.nrtot, 1] are cell-center radial position", "if", "magic", ">=", "2", ":", "header", "[", "'rgeom'", "]", "=", "readbin", "(", "'f'", ",", "header", "[", "'nts'", "]", "[", "2", "]", "*", "2", "+", "1", ")", "else", ":", "header", "[", "'rgeom'", "]", "=", "np", ".", "array", "(", "range", "(", "0", ",", "header", "[", "'nts'", "]", "[", "2", "]", "*", "2", "+", "1", ")", ")", "*", "0.5", "/", "header", "[", "'nts'", "]", "[", "2", "]", "header", "[", "'rgeom'", "]", "=", "np", ".", "resize", "(", "header", "[", "'rgeom'", "]", ",", "(", "header", "[", "'nts'", "]", "[", "2", "]", "+", "1", ",", "2", ")", ")", "header", "[", "'rcmb'", "]", "=", "readbin", "(", "'f'", ")", "if", "magic", ">=", "7", "else", "None", "header", "[", "'ti_step'", "]", "=", "readbin", "(", ")", "if", "magic", ">=", "3", "else", "0", "if", "only_istep", ":", "return", "header", "[", "'ti_step'", "]", "header", "[", "'ti_ad'", "]", "=", "readbin", "(", "'f'", ")", "if", "magic", ">=", "3", "else", "0", "header", "[", "'erupta_total'", "]", "=", "readbin", "(", "'f'", ")", "if", "magic", ">=", "5", "else", "0", "header", "[", "'bot_temp'", "]", "=", "readbin", "(", "'f'", ")", "if", "magic", ">=", "6", "else", "1", "if", "magic", ">=", "4", ":", "header", "[", "'e1_coord'", "]", "=", "readbin", "(", "'f'", ",", "header", "[", "'nts'", "]", "[", "0", "]", ")", "header", "[", "'e2_coord'", "]", "=", "readbin", "(", "'f'", ",", "header", "[", "'nts'", "]", "[", "1", "]", ")", "header", "[", "'e3_coord'", "]", "=", "readbin", "(", "'f'", ",", "header", "[", "'nts'", "]", "[", "2", "]", ")", "else", ":", "# could construct them from other info", "raise", "ParsingError", "(", "fieldfile", ",", "'magic >= 4 expected to get grid geometry'", ")", "if", "only_header", ":", "return", "header", "# READ FIELDS", "# number of points in (e1, e2, e3) directions PER CPU", "npc", "=", "header", "[", "'nts'", "]", "//", "header", "[", "'ncs'", "]", "# number of blocks per cpu", "nbk", "=", "header", "[", "'ntb'", "]", "//", "header", "[", "'ncb'", "]", "# number of values per 'read' block", "npi", "=", "(", "npc", "[", "0", "]", "+", "header", "[", "'xyp'", "]", ")", "*", "(", "npc", "[", "1", "]", "+", "header", "[", "'xyp'", "]", ")", "*", "npc", "[", "2", "]", "*", "nbk", "*", "nval", "header", "[", "'scalefac'", "]", "=", "readbin", "(", "'f'", ")", "if", "nval", ">", "1", "else", "1", "flds", "=", "np", ".", "zeros", "(", "(", "nval", ",", "header", "[", "'nts'", "]", "[", "0", "]", "+", "header", "[", "'xyp'", "]", ",", "header", "[", "'nts'", "]", "[", "1", "]", "+", "header", "[", "'xyp'", "]", ",", "header", "[", "'nts'", "]", "[", "2", "]", ",", "header", "[", "'ntb'", "]", ")", ")", "# loop over parallel subdomains", "for", "icpu", "in", "product", "(", "range", "(", "header", "[", "'ncb'", "]", ")", ",", "range", "(", "header", "[", "'ncs'", "]", "[", "2", "]", ")", ",", "range", "(", "header", "[", "'ncs'", "]", "[", "1", "]", ")", ",", "range", "(", "header", "[", "'ncs'", "]", "[", "0", "]", ")", ")", ":", "# read the data for one CPU", "data_cpu", "=", "readbin", "(", "'f'", ",", "npi", ")", "*", "header", "[", "'scalefac'", "]", "# icpu is (icpu block, icpu z, icpu y, icpu x)", "# data from file is transposed to obtained a field", "# array indexed with (x, y, z, block), as in StagYY", "flds", "[", ":", ",", "icpu", "[", "3", "]", "*", "npc", "[", "0", "]", ":", "(", "icpu", "[", "3", "]", "+", "1", ")", "*", "npc", "[", "0", "]", "+", "header", "[", "'xyp'", "]", ",", "# x", "icpu", "[", "2", "]", "*", "npc", "[", "1", "]", ":", "(", "icpu", "[", "2", "]", "+", "1", ")", "*", "npc", "[", "1", "]", "+", "header", "[", "'xyp'", "]", ",", "# y", "icpu", "[", "1", "]", "*", "npc", "[", "2", "]", ":", "(", "icpu", "[", "1", "]", "+", "1", ")", "*", "npc", "[", "2", "]", ",", "# z", "icpu", "[", "0", "]", "*", "nbk", ":", "(", "icpu", "[", "0", "]", "+", "1", ")", "*", "nbk", "# block", "]", "=", "np", ".", "transpose", "(", "data_cpu", ".", "reshape", "(", "(", "nbk", ",", "npc", "[", "2", "]", ",", "npc", "[", "1", "]", "+", "header", "[", "'xyp'", "]", ",", "npc", "[", "0", "]", "+", "header", "[", "'xyp'", "]", ",", "nval", ")", ")", ")", "return", "header", ",", "flds" ]
Extract fields data. Args: fieldfile (:class:`pathlib.Path`): path of the binary field file. only_header (bool): when True (and :data:`only_istep` is False), only :data:`header` is returned. only_istep (bool): when True, only :data:`istep` is returned. Returns: depends on flags.: :obj:`int`: istep If :data:`only_istep` is True, this function returns the time step at which the binary file was written. :obj:`dict`: header Else, if :data:`only_header` is True, this function returns a dict containing the header informations of the binary file. :class:`numpy.array`: fields Else, this function returns the tuple :data:`(header, fields)`. :data:`fields` is an array of scalar fields indexed by variable, x-direction, y-direction, z-direction, block.
[ "Extract", "fields", "data", "." ]
python
train
robin900/gspread-dataframe
gspread_dataframe.py
https://github.com/robin900/gspread-dataframe/blob/b64fef7ec196bfed69362aa35c593f448830a735/gspread_dataframe.py#L118-L135
def get_as_dataframe(worksheet, evaluate_formulas=False, **options): """ Returns the worksheet contents as a DataFrame. :param worksheet: the worksheet. :param evaluate_formulas: if True, get the value of a cell after formula evaluation; otherwise get the formula itself if present. Defaults to False. :param \*\*options: all the options for pandas.io.parsers.TextParser, according to the version of pandas that is installed. (Note: TextParser supports only the default 'python' parser engine, not the C engine.) :returns: pandas.DataFrame """ all_values = _get_all_values(worksheet, evaluate_formulas) return TextParser(all_values, **options).read()
[ "def", "get_as_dataframe", "(", "worksheet", ",", "evaluate_formulas", "=", "False", ",", "*", "*", "options", ")", ":", "all_values", "=", "_get_all_values", "(", "worksheet", ",", "evaluate_formulas", ")", "return", "TextParser", "(", "all_values", ",", "*", "*", "options", ")", ".", "read", "(", ")" ]
Returns the worksheet contents as a DataFrame. :param worksheet: the worksheet. :param evaluate_formulas: if True, get the value of a cell after formula evaluation; otherwise get the formula itself if present. Defaults to False. :param \*\*options: all the options for pandas.io.parsers.TextParser, according to the version of pandas that is installed. (Note: TextParser supports only the default 'python' parser engine, not the C engine.) :returns: pandas.DataFrame
[ "Returns", "the", "worksheet", "contents", "as", "a", "DataFrame", "." ]
python
train
google/grumpy
third_party/stdlib/quopri.py
https://github.com/google/grumpy/blob/3ec87959189cfcdeae82eb68a47648ac25ceb10b/third_party/stdlib/quopri.py#L21-L33
def needsquoting(c, quotetabs, header): """Decide whether a particular character needs to be quoted. The 'quotetabs' flag indicates whether embedded tabs and spaces should be quoted. Note that line-ending tabs and spaces are always encoded, as per RFC 1521. """ if c in ' \t': return quotetabs # if header, we have to escape _ because _ is used to escape space if c == '_': return header return c == ESCAPE or not (' ' <= c <= '~')
[ "def", "needsquoting", "(", "c", ",", "quotetabs", ",", "header", ")", ":", "if", "c", "in", "' \\t'", ":", "return", "quotetabs", "# if header, we have to escape _ because _ is used to escape space", "if", "c", "==", "'_'", ":", "return", "header", "return", "c", "==", "ESCAPE", "or", "not", "(", "' '", "<=", "c", "<=", "'~'", ")" ]
Decide whether a particular character needs to be quoted. The 'quotetabs' flag indicates whether embedded tabs and spaces should be quoted. Note that line-ending tabs and spaces are always encoded, as per RFC 1521.
[ "Decide", "whether", "a", "particular", "character", "needs", "to", "be", "quoted", "." ]
python
valid
Autodesk/aomi
aomi/helpers.py
https://github.com/Autodesk/aomi/blob/84da2dfb0424837adf9c4ddc1aa352e942bb7a4a/aomi/helpers.py#L140-L155
def mount_for_path(path, client): """Returns the mountpoint for this path""" backend_data = client.list_secret_backends()['data'] backends = [mnt for mnt in backend_data.keys()] path_bits = path.split('/') if len(path_bits) == 1: vault_path = "%s/" % path if vault_path in backends: return vault_path[0:len(vault_path) - 1] else: for i in range(1, len(path_bits) + 1): vault_path = "%s/" % '/'.join(path_bits[0:i]) if vault_path in backends: return vault_path[0:len(vault_path) - 1] return None
[ "def", "mount_for_path", "(", "path", ",", "client", ")", ":", "backend_data", "=", "client", ".", "list_secret_backends", "(", ")", "[", "'data'", "]", "backends", "=", "[", "mnt", "for", "mnt", "in", "backend_data", ".", "keys", "(", ")", "]", "path_bits", "=", "path", ".", "split", "(", "'/'", ")", "if", "len", "(", "path_bits", ")", "==", "1", ":", "vault_path", "=", "\"%s/\"", "%", "path", "if", "vault_path", "in", "backends", ":", "return", "vault_path", "[", "0", ":", "len", "(", "vault_path", ")", "-", "1", "]", "else", ":", "for", "i", "in", "range", "(", "1", ",", "len", "(", "path_bits", ")", "+", "1", ")", ":", "vault_path", "=", "\"%s/\"", "%", "'/'", ".", "join", "(", "path_bits", "[", "0", ":", "i", "]", ")", "if", "vault_path", "in", "backends", ":", "return", "vault_path", "[", "0", ":", "len", "(", "vault_path", ")", "-", "1", "]", "return", "None" ]
Returns the mountpoint for this path
[ "Returns", "the", "mountpoint", "for", "this", "path" ]
python
train
BlackEarth/bxml
bxml/xt.py
https://github.com/BlackEarth/bxml/blob/8fbea5dad7fadc7b854ddbeff6ecfb55aaceeb77/bxml/xt.py#L45-L51
def get_match(self, elem): """for the given elem, return the @match function that will be applied""" for m in self.matches: if (m.expression is not None and eval(m.expression)==True) \ or (m.xpath is not None and len(elem.xpath(m.xpath, namespaces=m.namespaces)) > 0): LOG.debug("=> match: %r" % m.expression) return m
[ "def", "get_match", "(", "self", ",", "elem", ")", ":", "for", "m", "in", "self", ".", "matches", ":", "if", "(", "m", ".", "expression", "is", "not", "None", "and", "eval", "(", "m", ".", "expression", ")", "==", "True", ")", "or", "(", "m", ".", "xpath", "is", "not", "None", "and", "len", "(", "elem", ".", "xpath", "(", "m", ".", "xpath", ",", "namespaces", "=", "m", ".", "namespaces", ")", ")", ">", "0", ")", ":", "LOG", ".", "debug", "(", "\"=> match: %r\"", "%", "m", ".", "expression", ")", "return", "m" ]
for the given elem, return the @match function that will be applied
[ "for", "the", "given", "elem", "return", "the" ]
python
train
pkkid/python-plexapi
plexapi/media.py
https://github.com/pkkid/python-plexapi/blob/9efbde96441c2bfbf410eacfb46e811e108e8bbc/plexapi/media.py#L177-L188
def _loadData(self, data): """ Load attribute values from Plex XML response. """ self._data = data self.codec = data.attrib.get('codec') self.codecID = data.attrib.get('codecID') self.id = cast(int, data.attrib.get('id')) self.index = cast(int, data.attrib.get('index', '-1')) self.language = data.attrib.get('language') self.languageCode = data.attrib.get('languageCode') self.selected = cast(bool, data.attrib.get('selected', '0')) self.streamType = cast(int, data.attrib.get('streamType')) self.type = cast(int, data.attrib.get('streamType'))
[ "def", "_loadData", "(", "self", ",", "data", ")", ":", "self", ".", "_data", "=", "data", "self", ".", "codec", "=", "data", ".", "attrib", ".", "get", "(", "'codec'", ")", "self", ".", "codecID", "=", "data", ".", "attrib", ".", "get", "(", "'codecID'", ")", "self", ".", "id", "=", "cast", "(", "int", ",", "data", ".", "attrib", ".", "get", "(", "'id'", ")", ")", "self", ".", "index", "=", "cast", "(", "int", ",", "data", ".", "attrib", ".", "get", "(", "'index'", ",", "'-1'", ")", ")", "self", ".", "language", "=", "data", ".", "attrib", ".", "get", "(", "'language'", ")", "self", ".", "languageCode", "=", "data", ".", "attrib", ".", "get", "(", "'languageCode'", ")", "self", ".", "selected", "=", "cast", "(", "bool", ",", "data", ".", "attrib", ".", "get", "(", "'selected'", ",", "'0'", ")", ")", "self", ".", "streamType", "=", "cast", "(", "int", ",", "data", ".", "attrib", ".", "get", "(", "'streamType'", ")", ")", "self", ".", "type", "=", "cast", "(", "int", ",", "data", ".", "attrib", ".", "get", "(", "'streamType'", ")", ")" ]
Load attribute values from Plex XML response.
[ "Load", "attribute", "values", "from", "Plex", "XML", "response", "." ]
python
train
aboSamoor/polyglot
polyglot/base.py
https://github.com/aboSamoor/polyglot/blob/d0d2aa8d06cec4e03bd96618ae960030f7069a17/polyglot/base.py#L108-L132
def iter_delimiter(self, byte_size=8192): """ Generalization of the default iter file delimited by '\n'. Note: The newline string can be arbitrarily long; it need not be restricted to a single character. You can also set the read size and control whether or not the newline string is left on the end of the iterated lines. Setting newline to '\0' is particularly good for use with an input file created with something like "os.popen('find -print0')". Args: byte_size (integer): Number of bytes to be read at each time. """ partial = u'' while True: read_chars = self.read(byte_size) if not read_chars: break partial += read_chars lines = partial.split(self.delimiter) partial = lines.pop() for line in lines: yield line + self.delimiter if partial: yield partial
[ "def", "iter_delimiter", "(", "self", ",", "byte_size", "=", "8192", ")", ":", "partial", "=", "u''", "while", "True", ":", "read_chars", "=", "self", ".", "read", "(", "byte_size", ")", "if", "not", "read_chars", ":", "break", "partial", "+=", "read_chars", "lines", "=", "partial", ".", "split", "(", "self", ".", "delimiter", ")", "partial", "=", "lines", ".", "pop", "(", ")", "for", "line", "in", "lines", ":", "yield", "line", "+", "self", ".", "delimiter", "if", "partial", ":", "yield", "partial" ]
Generalization of the default iter file delimited by '\n'. Note: The newline string can be arbitrarily long; it need not be restricted to a single character. You can also set the read size and control whether or not the newline string is left on the end of the iterated lines. Setting newline to '\0' is particularly good for use with an input file created with something like "os.popen('find -print0')". Args: byte_size (integer): Number of bytes to be read at each time.
[ "Generalization", "of", "the", "default", "iter", "file", "delimited", "by", "\\", "n", ".", "Note", ":", "The", "newline", "string", "can", "be", "arbitrarily", "long", ";", "it", "need", "not", "be", "restricted", "to", "a", "single", "character", ".", "You", "can", "also", "set", "the", "read", "size", "and", "control", "whether", "or", "not", "the", "newline", "string", "is", "left", "on", "the", "end", "of", "the", "iterated", "lines", ".", "Setting", "newline", "to", "\\", "0", "is", "particularly", "good", "for", "use", "with", "an", "input", "file", "created", "with", "something", "like", "os", ".", "popen", "(", "find", "-", "print0", ")", "." ]
python
train
dls-controls/pymalcolm
malcolm/core/concurrency.py
https://github.com/dls-controls/pymalcolm/blob/80ea667e4da26365a6cebc0249f52fdc744bd983/malcolm/core/concurrency.py#L61-L67
def get(self, timeout=None): # type: (float) -> T """Return the result or raise the error the function has produced""" self.wait(timeout) if isinstance(self._result, Exception): raise self._result return self._result
[ "def", "get", "(", "self", ",", "timeout", "=", "None", ")", ":", "# type: (float) -> T", "self", ".", "wait", "(", "timeout", ")", "if", "isinstance", "(", "self", ".", "_result", ",", "Exception", ")", ":", "raise", "self", ".", "_result", "return", "self", ".", "_result" ]
Return the result or raise the error the function has produced
[ "Return", "the", "result", "or", "raise", "the", "error", "the", "function", "has", "produced" ]
python
train
Capitains/MyCapytain
MyCapytain/resolvers/cts/local.py
https://github.com/Capitains/MyCapytain/blob/b11bbf6b6ae141fc02be70471e3fbf6907be6593/MyCapytain/resolvers/cts/local.py#L111-L123
def read(self, identifier, path): """ Retrieve and parse a text given an identifier :param identifier: Identifier of the text :type identifier: str :param path: Path of the text :type path: str :return: Parsed Text :rtype: CapitainsCtsText """ with open(path) as f: o = self.classes["text"](urn=identifier, resource=self.xmlparse(f)) return o
[ "def", "read", "(", "self", ",", "identifier", ",", "path", ")", ":", "with", "open", "(", "path", ")", "as", "f", ":", "o", "=", "self", ".", "classes", "[", "\"text\"", "]", "(", "urn", "=", "identifier", ",", "resource", "=", "self", ".", "xmlparse", "(", "f", ")", ")", "return", "o" ]
Retrieve and parse a text given an identifier :param identifier: Identifier of the text :type identifier: str :param path: Path of the text :type path: str :return: Parsed Text :rtype: CapitainsCtsText
[ "Retrieve", "and", "parse", "a", "text", "given", "an", "identifier" ]
python
train
tanghaibao/goatools
goatools/cli/gosubdag_plot.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/cli/gosubdag_plot.py#L139-L157
def _rdtxt_gos(self, ret, go_file): """Read GO IDs from a file.""" if not os.path.exists(go_file): raise RuntimeError("CAN NOT READ: {FILE}\n".format(FILE=go_file)) goids = set() go2color = {} with open(go_file) as ifstrm: for line in ifstrm: goids_found = self.re_goids.findall(line) if goids_found: goids.update(goids_found) colors = self.re_color.findall(line) if colors: if len(goids_found) == len(colors): for goid, color in zip(goids_found, colors): go2color[goid] = color else: print("IGNORING: {L}".format(L=line),) self._update_ret(ret, goids, go2color)
[ "def", "_rdtxt_gos", "(", "self", ",", "ret", ",", "go_file", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "go_file", ")", ":", "raise", "RuntimeError", "(", "\"CAN NOT READ: {FILE}\\n\"", ".", "format", "(", "FILE", "=", "go_file", ")", ")", "goids", "=", "set", "(", ")", "go2color", "=", "{", "}", "with", "open", "(", "go_file", ")", "as", "ifstrm", ":", "for", "line", "in", "ifstrm", ":", "goids_found", "=", "self", ".", "re_goids", ".", "findall", "(", "line", ")", "if", "goids_found", ":", "goids", ".", "update", "(", "goids_found", ")", "colors", "=", "self", ".", "re_color", ".", "findall", "(", "line", ")", "if", "colors", ":", "if", "len", "(", "goids_found", ")", "==", "len", "(", "colors", ")", ":", "for", "goid", ",", "color", "in", "zip", "(", "goids_found", ",", "colors", ")", ":", "go2color", "[", "goid", "]", "=", "color", "else", ":", "print", "(", "\"IGNORING: {L}\"", ".", "format", "(", "L", "=", "line", ")", ",", ")", "self", ".", "_update_ret", "(", "ret", ",", "goids", ",", "go2color", ")" ]
Read GO IDs from a file.
[ "Read", "GO", "IDs", "from", "a", "file", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/view.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/view.py#L59-L115
def get_port_at_point(self, vpos, distance=10, exclude=None, exclude_port_fun=None): """ Find item with port closest to specified position. List of items to be ignored can be specified with `exclude` parameter. Tuple is returned - found item - closest, connectable port - closest point on found port (in view coordinates) :Parameters: vpos Position specified in view coordinates. distance Max distance from point to a port (default 10) exclude Set of items to ignore. """ # Method had to be inherited, as the base method has a bug: # It misses the statement max_dist = d v2i = self.get_matrix_v2i vx, vy = vpos max_dist = distance port = None glue_pos = None item = None rect = (vx - distance, vy - distance, distance * 2, distance * 2) items = self.get_items_in_rectangle(rect, reverse=True) for i in items: if exclude and i in exclude: continue for p in i.ports(): if not p.connectable: continue if exclude_port_fun and exclude_port_fun(p): continue ix, iy = v2i(i).transform_point(vx, vy) pg, d = p.glue((ix, iy)) if d > max_dist: continue max_dist = d item = i port = p # transform coordinates from connectable item space to view # space i2v = self.get_matrix_i2v(i).transform_point glue_pos = i2v(*pg) return item, port, glue_pos
[ "def", "get_port_at_point", "(", "self", ",", "vpos", ",", "distance", "=", "10", ",", "exclude", "=", "None", ",", "exclude_port_fun", "=", "None", ")", ":", "# Method had to be inherited, as the base method has a bug:", "# It misses the statement max_dist = d", "v2i", "=", "self", ".", "get_matrix_v2i", "vx", ",", "vy", "=", "vpos", "max_dist", "=", "distance", "port", "=", "None", "glue_pos", "=", "None", "item", "=", "None", "rect", "=", "(", "vx", "-", "distance", ",", "vy", "-", "distance", ",", "distance", "*", "2", ",", "distance", "*", "2", ")", "items", "=", "self", ".", "get_items_in_rectangle", "(", "rect", ",", "reverse", "=", "True", ")", "for", "i", "in", "items", ":", "if", "exclude", "and", "i", "in", "exclude", ":", "continue", "for", "p", "in", "i", ".", "ports", "(", ")", ":", "if", "not", "p", ".", "connectable", ":", "continue", "if", "exclude_port_fun", "and", "exclude_port_fun", "(", "p", ")", ":", "continue", "ix", ",", "iy", "=", "v2i", "(", "i", ")", ".", "transform_point", "(", "vx", ",", "vy", ")", "pg", ",", "d", "=", "p", ".", "glue", "(", "(", "ix", ",", "iy", ")", ")", "if", "d", ">", "max_dist", ":", "continue", "max_dist", "=", "d", "item", "=", "i", "port", "=", "p", "# transform coordinates from connectable item space to view", "# space", "i2v", "=", "self", ".", "get_matrix_i2v", "(", "i", ")", ".", "transform_point", "glue_pos", "=", "i2v", "(", "*", "pg", ")", "return", "item", ",", "port", ",", "glue_pos" ]
Find item with port closest to specified position. List of items to be ignored can be specified with `exclude` parameter. Tuple is returned - found item - closest, connectable port - closest point on found port (in view coordinates) :Parameters: vpos Position specified in view coordinates. distance Max distance from point to a port (default 10) exclude Set of items to ignore.
[ "Find", "item", "with", "port", "closest", "to", "specified", "position", "." ]
python
train
aio-libs/aioredis
aioredis/commands/cluster.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/cluster.py#L74-L78
def cluster_reset(self, *, hard=False): """Reset a Redis Cluster node.""" reset = hard and b'HARD' or b'SOFT' fut = self.execute(b'CLUSTER', b'RESET', reset) return wait_ok(fut)
[ "def", "cluster_reset", "(", "self", ",", "*", ",", "hard", "=", "False", ")", ":", "reset", "=", "hard", "and", "b'HARD'", "or", "b'SOFT'", "fut", "=", "self", ".", "execute", "(", "b'CLUSTER'", ",", "b'RESET'", ",", "reset", ")", "return", "wait_ok", "(", "fut", ")" ]
Reset a Redis Cluster node.
[ "Reset", "a", "Redis", "Cluster", "node", "." ]
python
train
Samreay/ChainConsumer
chainconsumer/chainconsumer.py
https://github.com/Samreay/ChainConsumer/blob/902288e4d85c2677a9051a2172e03128a6169ad7/chainconsumer/chainconsumer.py#L783-L816
def divide_chain(self, chain=0): """ Returns a ChainConsumer instance containing all the walks of a given chain as individual chains themselves. This method might be useful if, for example, your chain was made using MCMC with 4 walkers. To check the sampling of all 4 walkers agree, you could call this to get a ChainConsumer instance with one chain for ech of the four walks. If you then plot, hopefully all four contours you would see agree. Parameters ---------- chain : int|str, optional The index or name of the chain you want divided Returns ------- ChainConsumer A new ChainConsumer instance with the same settings as the parent instance, containing ``num_walker`` chains. """ indexes = self._get_chain(chain) con = ChainConsumer() for index in indexes: chain = self.chains[index] assert chain.walkers is not None, "The chain you have selected was not added with any walkers!" num_walkers = chain.walkers data = np.split(chain.chain, num_walkers) ws = np.split(chain.weights, num_walkers) for j, (c, w) in enumerate(zip(data, ws)): con.add_chain(c, weights=w, name="Chain %d" % j, parameters=chain.parameters) return con
[ "def", "divide_chain", "(", "self", ",", "chain", "=", "0", ")", ":", "indexes", "=", "self", ".", "_get_chain", "(", "chain", ")", "con", "=", "ChainConsumer", "(", ")", "for", "index", "in", "indexes", ":", "chain", "=", "self", ".", "chains", "[", "index", "]", "assert", "chain", ".", "walkers", "is", "not", "None", ",", "\"The chain you have selected was not added with any walkers!\"", "num_walkers", "=", "chain", ".", "walkers", "data", "=", "np", ".", "split", "(", "chain", ".", "chain", ",", "num_walkers", ")", "ws", "=", "np", ".", "split", "(", "chain", ".", "weights", ",", "num_walkers", ")", "for", "j", ",", "(", "c", ",", "w", ")", "in", "enumerate", "(", "zip", "(", "data", ",", "ws", ")", ")", ":", "con", ".", "add_chain", "(", "c", ",", "weights", "=", "w", ",", "name", "=", "\"Chain %d\"", "%", "j", ",", "parameters", "=", "chain", ".", "parameters", ")", "return", "con" ]
Returns a ChainConsumer instance containing all the walks of a given chain as individual chains themselves. This method might be useful if, for example, your chain was made using MCMC with 4 walkers. To check the sampling of all 4 walkers agree, you could call this to get a ChainConsumer instance with one chain for ech of the four walks. If you then plot, hopefully all four contours you would see agree. Parameters ---------- chain : int|str, optional The index or name of the chain you want divided Returns ------- ChainConsumer A new ChainConsumer instance with the same settings as the parent instance, containing ``num_walker`` chains.
[ "Returns", "a", "ChainConsumer", "instance", "containing", "all", "the", "walks", "of", "a", "given", "chain", "as", "individual", "chains", "themselves", "." ]
python
train
ming060/robotframework-uiautomatorlibrary
uiautomatorlibrary/Mobile.py
https://github.com/ming060/robotframework-uiautomatorlibrary/blob/b70202b6a8aa68b4efd9d029c2845407fb33451a/uiautomatorlibrary/Mobile.py#L288-L294
def swipe_top(self, steps=10, *args, **selectors): """ Swipe the UI object with *selectors* from center to top See `Swipe Left` for more details. """ self.device(**selectors).swipe.up(steps=steps)
[ "def", "swipe_top", "(", "self", ",", "steps", "=", "10", ",", "*", "args", ",", "*", "*", "selectors", ")", ":", "self", ".", "device", "(", "*", "*", "selectors", ")", ".", "swipe", ".", "up", "(", "steps", "=", "steps", ")" ]
Swipe the UI object with *selectors* from center to top See `Swipe Left` for more details.
[ "Swipe", "the", "UI", "object", "with", "*", "selectors", "*", "from", "center", "to", "top" ]
python
train
scanny/python-pptx
pptx/oxml/table.py
https://github.com/scanny/python-pptx/blob/d6ab8234f8b03953d2f831ff9394b1852db34130/pptx/oxml/table.py#L479-L482
def dimensions(self): """(row_count, col_count) pair describing size of range.""" _, _, width, height = self._extents return height, width
[ "def", "dimensions", "(", "self", ")", ":", "_", ",", "_", ",", "width", ",", "height", "=", "self", ".", "_extents", "return", "height", ",", "width" ]
(row_count, col_count) pair describing size of range.
[ "(", "row_count", "col_count", ")", "pair", "describing", "size", "of", "range", "." ]
python
train
twisted/mantissa
xmantissa/website.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/website.py#L194-L223
def _produceIt(self, segments, thunk): """ Underlying implmeentation of L{PrefixURLMixin.produceResource} and L{PrefixURLMixin.sessionlessProduceResource}. @param segments: the URL segments to dispatch. @param thunk: a 0-argument callable which returns an L{IResource} provider, or None. @return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}. """ if not self.prefixURL: needle = () else: needle = tuple(self.prefixURL.split('/')) S = len(needle) if segments[:S] == needle: if segments == JUST_SLASH: # I *HATE* THE WEB subsegments = segments else: subsegments = segments[S:] res = thunk() # Even though the URL matched up, sometimes we might still # decide to not handle this request (eg, some prerequisite # for our function is not met by the store). Allow None # to be returned by createResource to indicate this case. if res is not None: return res, subsegments
[ "def", "_produceIt", "(", "self", ",", "segments", ",", "thunk", ")", ":", "if", "not", "self", ".", "prefixURL", ":", "needle", "=", "(", ")", "else", ":", "needle", "=", "tuple", "(", "self", ".", "prefixURL", ".", "split", "(", "'/'", ")", ")", "S", "=", "len", "(", "needle", ")", "if", "segments", "[", ":", "S", "]", "==", "needle", ":", "if", "segments", "==", "JUST_SLASH", ":", "# I *HATE* THE WEB", "subsegments", "=", "segments", "else", ":", "subsegments", "=", "segments", "[", "S", ":", "]", "res", "=", "thunk", "(", ")", "# Even though the URL matched up, sometimes we might still", "# decide to not handle this request (eg, some prerequisite", "# for our function is not met by the store). Allow None", "# to be returned by createResource to indicate this case.", "if", "res", "is", "not", "None", ":", "return", "res", ",", "subsegments" ]
Underlying implmeentation of L{PrefixURLMixin.produceResource} and L{PrefixURLMixin.sessionlessProduceResource}. @param segments: the URL segments to dispatch. @param thunk: a 0-argument callable which returns an L{IResource} provider, or None. @return: a 2-tuple of C{(resource, remainingSegments)}, or L{None}.
[ "Underlying", "implmeentation", "of", "L", "{", "PrefixURLMixin", ".", "produceResource", "}", "and", "L", "{", "PrefixURLMixin", ".", "sessionlessProduceResource", "}", "." ]
python
train
yamins81/tabular
tabular/fast.py
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/fast.py#L308-L354
def recarrayisin(X,Y,weak=True): """ Indices of elements in a numpy record array (or ndarray with structured dtype) that appear in another. Fast routine for determining indices of elements in numpy record array `X` that appear in numpy record array `Y`, returning a boolean array `Z` such that:: Z[i] = X[i] in Y Record array version of func:`tabular.fast.isin`. **Parameters** **X** : numpy recarray Numpy recarray to comapare to numpy recarray `Y`. For each element of `X`, ask if it is in `Y`. **Y** : numpy recarray Numpy recarray to which numpy recarray `X` is compared. For each element of `X`, ask if it is in `Y`. **Returns** **b** : numpy array (bool) Boolean numpy array, `len(b) = len(X)`. **See Also:** :func:`tabular.fast.isin`, :func:`tabular.fast.recarraydifference` """ if (weak and set(X.dtype.names) != set(Y.dtype.names)) or \ (not weak and X.dtype.names != Y.dtype.names): return np.zeros((len(X),),bool) else: if X.dtype.names != Y.dtype.names: Y = np.rec.fromarrays([Y[a] for a in X.dtype.names], names=X.dtype.names) NewX = np.array([str(l) for l in X]) NewY = np.array([str(l) for l in Y]) NewY.sort() return isin(NewX,NewY)
[ "def", "recarrayisin", "(", "X", ",", "Y", ",", "weak", "=", "True", ")", ":", "if", "(", "weak", "and", "set", "(", "X", ".", "dtype", ".", "names", ")", "!=", "set", "(", "Y", ".", "dtype", ".", "names", ")", ")", "or", "(", "not", "weak", "and", "X", ".", "dtype", ".", "names", "!=", "Y", ".", "dtype", ".", "names", ")", ":", "return", "np", ".", "zeros", "(", "(", "len", "(", "X", ")", ",", ")", ",", "bool", ")", "else", ":", "if", "X", ".", "dtype", ".", "names", "!=", "Y", ".", "dtype", ".", "names", ":", "Y", "=", "np", ".", "rec", ".", "fromarrays", "(", "[", "Y", "[", "a", "]", "for", "a", "in", "X", ".", "dtype", ".", "names", "]", ",", "names", "=", "X", ".", "dtype", ".", "names", ")", "NewX", "=", "np", ".", "array", "(", "[", "str", "(", "l", ")", "for", "l", "in", "X", "]", ")", "NewY", "=", "np", ".", "array", "(", "[", "str", "(", "l", ")", "for", "l", "in", "Y", "]", ")", "NewY", ".", "sort", "(", ")", "return", "isin", "(", "NewX", ",", "NewY", ")" ]
Indices of elements in a numpy record array (or ndarray with structured dtype) that appear in another. Fast routine for determining indices of elements in numpy record array `X` that appear in numpy record array `Y`, returning a boolean array `Z` such that:: Z[i] = X[i] in Y Record array version of func:`tabular.fast.isin`. **Parameters** **X** : numpy recarray Numpy recarray to comapare to numpy recarray `Y`. For each element of `X`, ask if it is in `Y`. **Y** : numpy recarray Numpy recarray to which numpy recarray `X` is compared. For each element of `X`, ask if it is in `Y`. **Returns** **b** : numpy array (bool) Boolean numpy array, `len(b) = len(X)`. **See Also:** :func:`tabular.fast.isin`, :func:`tabular.fast.recarraydifference`
[ "Indices", "of", "elements", "in", "a", "numpy", "record", "array", "(", "or", "ndarray", "with", "structured", "dtype", ")", "that", "appear", "in", "another", "." ]
python
train
JoelBender/bacpypes
py25/bacpypes/bsllservice.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/bsllservice.py#L1006-L1039
def process_npdu(self, npdu): """encode NPDUs from the service access point and send them downstream.""" if _debug: RouterToRouterService._debug("process_npdu %r", npdu) # encode the npdu as if it was about to be delivered to the network pdu = PDU() npdu.encode(pdu) if _debug: RouterToRouterService._debug(" - pdu: %r", pdu) # broadcast messages go to everyone if pdu.pduDestination.addrType == Address.localBroadcastAddr: destList = self.connections.keys() else: conn = self.connections.get(pdu.pduDestination, None) if not conn: if _debug: RouterToRouterService._debug(" - not a connected client") # start a connection attempt conn = self.connect(pdu.pduDestination) if not conn.connected: # keep a reference to this pdu to send after the ack comes back conn.pendingNPDU.append(pdu) return destList = [pdu.pduDestination] if _debug: RouterToRouterService._debug(" - destList: %r", destList) for dest in destList: # make a router-to-router NPDU xpdu = RouterToRouterNPDU(pdu) xpdu.pduDestination = dest # send it to the multiplexer self.service_request(xpdu)
[ "def", "process_npdu", "(", "self", ",", "npdu", ")", ":", "if", "_debug", ":", "RouterToRouterService", ".", "_debug", "(", "\"process_npdu %r\"", ",", "npdu", ")", "# encode the npdu as if it was about to be delivered to the network", "pdu", "=", "PDU", "(", ")", "npdu", ".", "encode", "(", "pdu", ")", "if", "_debug", ":", "RouterToRouterService", ".", "_debug", "(", "\" - pdu: %r\"", ",", "pdu", ")", "# broadcast messages go to everyone", "if", "pdu", ".", "pduDestination", ".", "addrType", "==", "Address", ".", "localBroadcastAddr", ":", "destList", "=", "self", ".", "connections", ".", "keys", "(", ")", "else", ":", "conn", "=", "self", ".", "connections", ".", "get", "(", "pdu", ".", "pduDestination", ",", "None", ")", "if", "not", "conn", ":", "if", "_debug", ":", "RouterToRouterService", ".", "_debug", "(", "\" - not a connected client\"", ")", "# start a connection attempt", "conn", "=", "self", ".", "connect", "(", "pdu", ".", "pduDestination", ")", "if", "not", "conn", ".", "connected", ":", "# keep a reference to this pdu to send after the ack comes back", "conn", ".", "pendingNPDU", ".", "append", "(", "pdu", ")", "return", "destList", "=", "[", "pdu", ".", "pduDestination", "]", "if", "_debug", ":", "RouterToRouterService", ".", "_debug", "(", "\" - destList: %r\"", ",", "destList", ")", "for", "dest", "in", "destList", ":", "# make a router-to-router NPDU", "xpdu", "=", "RouterToRouterNPDU", "(", "pdu", ")", "xpdu", ".", "pduDestination", "=", "dest", "# send it to the multiplexer", "self", ".", "service_request", "(", "xpdu", ")" ]
encode NPDUs from the service access point and send them downstream.
[ "encode", "NPDUs", "from", "the", "service", "access", "point", "and", "send", "them", "downstream", "." ]
python
train
klmitch/turnstile
turnstile/limits.py
https://github.com/klmitch/turnstile/blob/8fe9a359b45e505d3192ab193ecf9be177ab1a17/turnstile/limits.py#L464-L472
def dehydrate(self): """Return a dict representing this bucket.""" # Only concerned about very specific attributes result = {} for attr in self.attrs: result[attr] = getattr(self, attr) return result
[ "def", "dehydrate", "(", "self", ")", ":", "# Only concerned about very specific attributes", "result", "=", "{", "}", "for", "attr", "in", "self", ".", "attrs", ":", "result", "[", "attr", "]", "=", "getattr", "(", "self", ",", "attr", ")", "return", "result" ]
Return a dict representing this bucket.
[ "Return", "a", "dict", "representing", "this", "bucket", "." ]
python
train