Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
26,200
def inet_ntoa(address): if len(address) != 16: raise ValueError("IPv6 addresses are 16 bytes long") hex = address.encode() chunks = [] i = 0 l = len(hex) while i < l: chunk = hex[i : i + 4] return hex
Convert a network format IPv6 address into text. @param address: the binary address @type address: string @rtype: string @raises ValueError: the address isn't 16 bytes long
26,201
def initialize_sentry_integration(): try: import sentry_sdk from sentry_sdk.integrations.pyramid import PyramidIntegration from sentry_sdk.integrations.celery import CeleryIntegration except ImportError: warnings.warn( "Sentry is not configured because the Sentry SDK " "(sentry_sdk package) is not installed", UserWarning, ) return try: dsn = os.environ[] except KeyError: warnings.warn( "Sentry is not configured because SENTRY_DSN " "was not supplied.", UserWarning, ) else: sentry_sdk.init( dsn=dsn, integrations=[PyramidIntegration(), CeleryIntegration()], )
\ Used to optionally initialize the Sentry service with this app. See https://docs.sentry.io/platforms/python/pyramid/
26,202
def _parse_effects(self, effects_json=None): if isinstance(effects_json, list): return [ValidatorEffect.parse(effect) for effect in effects_json] elif isinstance(effects_json, dict): return ValidatorEffect.parse(effects_json) else: raise Exception("The provided json, should be a list of valid effects, " "or a single effect. Got ".format(effects_json))
Parse multiple effects from an effects(list) json.
26,203
def query_invitations(cls, user, eager=False): if eager: eager = [Membership.group] return cls.query_by_user(user, state=MembershipState.PENDING_USER, eager=eager)
Get all invitations for given user.
26,204
def _erase_vm_info(name): try: vm_ = get_vm_info(name) if vm_[]: key = _build_machine_uri(vm_[], vm_.get(, )) try: __utils__[](key, __opts__) except KeyError: __utils__[](key, None, __opts__) except Exception: pass uri = _build_sdb_uri(name) try: __utils__[](uri, __opts__) except KeyError: __utils__[](uri, {}, __opts__) except Exception: pass
erase the information for a VM the we are destroying. some sdb drivers (such as the SQLite driver we expect to use) do not have a `delete` method, so if the delete fails, we have to replace the with a blank entry.
26,205
def _baseplot(cls, session, type, *args, **kwargs): if not type: raise Exception("Must provide a plot type") options, description = cls._clean_options(**kwargs) data = cls._clean_data(*args) if in data and len(data) > 1: images = data[] del data[] viz = cls._create(session, data=data, type=type, options=options, description=description) first_image, remaining_images = images[0], images[1:] viz._append_image(first_image) for image in remaining_images: viz._append_image(image) elif in data: images = data[] viz = cls._create(session, images=images, type=type, options=options, description=description) else: viz = cls._create(session, data=data, type=type, options=options, description=description) return viz
Base method for plotting data and images. Applies a plot-type specific cleaning operation to generate a dictionary with the data, then creates a visualization with the data. Expects a session and a type, followed by all plot-type specific positional and keyword arguments, which will be handled by the clean method of the given plot type. If the dictionary contains only images, or only non-image data, they will be passed on their own. If the dictionary contains both images and non-image data, the images will be appended to the visualization.
26,206
def read_list_from_csv(filepath, dict_form=False, headers=None, **kwargs): stream = Stream(filepath, headers=headers, **kwargs) stream.open() result = stream.read(keyed=dict_form) stream.close() return result
Read a list of rows in dict or list form from a csv. (The headers argument is either a row number or list of row numbers (in case of multi-line headers) to be considered as headers (rows start counting at 1), or the actual headers defined a list of strings. If not set, all rows will be treated as containing values.) Args: filepath (str): Path to read from dict_form (bool): Return in dict form. Defaults to False. headers (Union[int, List[int], List[str], None]): Row number of headers. Defaults to None. **kwargs: Other arguments to pass to Tabulator Stream Returns: List[Union[Dict, List]]: List of rows in dict or list form
26,207
def _mkpda(self, nonterms, productions, productions_struct, terminals, splitstring=1): pda = PDA(self.alphabet) pda.nonterminals = nonterms pda.terminals = terminals pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].sym = pda.s[pda.n].type = 1 pda.s[pda.n].trans[1] = [0] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 1 pda.s[pda.n].sym = nonterms[0] pda.s[pda.n].trans[2] = [0] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 2 pda.s[pda.n].trans[0] = [] counter = 0 i = 0 while i < len(nonterms): j = 0 while j < len(productions[nonterms[i]]): if productions_struct[counter].type == 1: pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n if pda.n not in pda.s[2].trans: pda.s[2].trans[pda.n] = [] pda.s[2].trans[pda.n].append(nonterms[i]) if splitstring == 0: pda.s[pda.n].type = 3 pda.s[pda.n].trans[2] = [productions_struct[counter].a] else: if productions_struct[counter].a not in terminals or \ len(productions_struct[counter].a) == 1: pda.s[pda.n].type = 3 pda.s[pda.n].trans[pda.n + 1] = [productions_struct[counter].a.lower()] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 3 pda.s[pda.n].trans[2] = [] else: pda.s[pda.n].type = 3 pda.s[pda.n].trans[pda.n + 1] = \ [productions_struct[counter].a[0].lower()] k = 1 while k < len(productions_struct[counter].a) - 1: pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 3 pda.s[pda.n].trans[pda.n +1] = \ [productions_struct[counter].a[k].lower()] k = k + 1 pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 3 pda.s[pda.n].trans[pda.n + 1] = \ [productions_struct[counter].a[-1].lower()] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 3 pda.s[pda.n].trans[2] = [] else: pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n if pda.n not in pda.s[2].trans: pda.s[2].trans[pda.n] = [] pda.s[2].trans[pda.n].append(nonterms[i]) pda.s[pda.n].type = 1 pda.s[pda.n].sym = productions_struct[counter].b1 pda.s[pda.n].trans[(pda.n) + 1] = [0] pda.n = pda.n + 1 pda.s[pda.n] = PDAState() pda.s[pda.n].id = pda.n pda.s[pda.n].type = 1 pda.s[pda.n].sym = productions_struct[counter].b0 pda.s[pda.n].trans[2] = [0] j = j + 1 counter = counter + 1 i = i + 1 return pda
This function generates a PDA from a CNF grammar as described in: - http://www.oit.edu/faculty/sherry.yang/CST229/Lectures/7_pda.pdf - http://www.eng.utah.edu/~cs3100/lectures/l18/pda-notes.pdf If all of the grammar productions are in the Chomsky Normal Form, then follow the template for constructing a pushdown symautomata: 1. Start 2. Push S 3. Pop 4. Case: Nonterminal A: For every production rule of this form: A: BC, Push C and then Push B Args: nonterms (list): Non terminals list productions (dict): productions in the CNF form: A -> a or A -> b0b1, or S -> e productions_struct (dict): productions in the CNF form in structure form object.a for A -> a, object.b0 and object.b1 for A -> b0b1 and object.type where type is 1 for A-->a and 2 for A-->b0b1 terminals (list): All terminals splitstring (bool): If enabled an extra space is added after each symbol. Returns: PDA: The generated PDA
26,208
def early_create_objects(self, raw_objects): types_creations = self.__class__.types_creations early_created_types = self.__class__.early_created_types logger.info("Creating objects...") for o_type in sorted(types_creations): if o_type in early_created_types: self.create_objects_for_type(raw_objects, o_type) logger.info("Done")
Create the objects needed for the post configuration file initialization :param raw_objects: dict with all object with str values :type raw_objects: dict :return: None
26,209
def _attach(self, instruction, qargs, cargs): self.append(instruction, qargs, cargs)
DEPRECATED after 0.8
26,210
def pitch(self, shift, use_tree=False, segment=82, search=14.68, overlap=12): self.command.append("pitch") if use_tree: self.command.append() self.command.append(shift) self.command.append(segment) self.command.append(search) self.command.append(overlap) return self
pitch takes 4 parameters: user_tree (True or False), segment, search and overlap.
26,211
def p_property_decl(self, p): l = len(p) p[0] = Property(list(p)[1:-1], p.lineno(l - 1))
property_decl : prop_open style_list t_semicolon | prop_open style_list css_important t_semicolon | prop_open empty t_semicolon
26,212
def emboss_pepstats_on_fasta(infile, outfile=, outdir=, outext=, force_rerun=False): outfile = ssbio.utils.outfile_maker(inname=infile, outname=outfile, outdir=outdir, outext=outext) program = pepstats_args = .format(infile, outfile) cmd_string = .format(program, pepstats_args) ssbio.utils.command_runner(cmd_string, force_rerun_flag=force_rerun, outfile_checker=outfile, silent=True) return outfile
Run EMBOSS pepstats on a FASTA file. Args: infile: Path to FASTA file outfile: Name of output file without extension outdir: Path to output directory outext: Extension of results file, default is ".pepstats" force_rerun: Flag to rerun pepstats Returns: str: Path to output file.
26,213
def query_by_post(postid): return TabPost2Tag.select().where( TabPost2Tag.post_id == postid ).order_by(TabPost2Tag.order)
Query records by post.
26,214
def prepare(self, engine, mode, items) -> None: self.tx_id = str(uuid.uuid4()).replace("-", "") self.engine = engine self.mode = mode self.items = items self._prepare_request()
Create a unique transaction id and dumps the items into a cached request object.
26,215
def user_order_by(self, field): model_label = order.utils.resolve_labels(.join(\ [self.model._meta.app_label, self.model._meta.object_name])) orderitem_set = getattr(self.model, \ order.utils.resolve_order_item_related_set_name(model_label)) order_model = orderitem_set.related.model db_table = order_model._meta.db_table pk_name = self.model._meta.pk.attname sanitized_field = field.lstrip() extra_select = { sanitized_field: % \ (sanitized_field, db_table, self.model._meta.db_table, pk_name) } return self.extra(select=extra_select).all().order_by(field)
Queryset method ordering objects by user ordering field.
26,216
def get_total_DOS(self): warnings.warn("Phonopy.get_total_DOS is deprecated. " "Use Phonopy.get_total_dos_dict.", DeprecationWarning) dos = self.get_total_dos_dict() return dos[], dos[]
Return frequency points and total DOS as a tuple. Returns ------- A tuple with (frequency_points, total_dos). frequency_points: ndarray shape=(frequency_sampling_points, ), dtype='double' total_dos: shape=(frequency_sampling_points, ), dtype='double'
26,217
def suspendMember(self, clusterId, memberId): self.send_suspendMember(clusterId, memberId) return self.recv_suspendMember()
Parameters: - clusterId - memberId
26,218
def error(self): r = self.residuals.ravel() return np.dot(r,r)
Class property: Sum of the squared errors, :math:`E = \sum_i (D_i - M_i(\\theta))^2`
26,219
def _transform_index(index, func, level=None): if isinstance(index, MultiIndex): if level is not None: items = [tuple(func(y) if i == level else y for i, y in enumerate(x)) for x in index] else: items = [tuple(func(y) for y in x) for x in index] return MultiIndex.from_tuples(items, names=index.names) else: items = [func(x) for x in index] return Index(items, name=index.name, tupleize_cols=False)
Apply function to all values found in index. This includes transforming multiindex entries separately. Only apply function to one level of the MultiIndex if level is specified.
26,220
def teardown(self): for table_spec in reversed(self._table_specs): with self._conn: table_spec.teardown(self._conn)
Cleanup cache tables.
26,221
def get_ssh_client(ip, ssh_private_key_file, ssh_user=, port=22, timeout=600, wait_period=10): if ip in CLIENT_CACHE: return CLIENT_CACHE[ip] start = time.time() end = start + timeout client = None while time.time() < end: try: client = establish_ssh_connection( ip, ssh_private_key_file, ssh_user, port, timeout=wait_period ) execute_ssh_command(client, ) except: if client: client.close() wait_period += wait_period else: CLIENT_CACHE[ip] = client return client raise IpaSSHException( )
Attempt to establish and test ssh connection.
26,222
def list_extmods(): * ret = {} ext_dir = os.path.join(__opts__[], ) mod_types = os.listdir(ext_dir) for mod_type in mod_types: ret[mod_type] = set() for _, _, files in salt.utils.path.os_walk(os.path.join(ext_dir, mod_type)): for fh_ in files: ret[mod_type].add(fh_.split()[0]) ret[mod_type] = list(ret[mod_type]) return ret
.. versionadded:: 2017.7.0 List Salt modules which have been synced externally CLI Examples: .. code-block:: bash salt '*' saltutil.list_extmods
26,223
def get_magnitude_scaling_term(self, C, mag): mval = mag - 3.0 return C[] + C[] * mval + C[] * (mval ** 2.0) +\ C[] * (mval ** 3.0)
Returns the magnitude scaling term (equation 1)
26,224
def get_scope_names(self) -> list: lscope = [] for scope in reversed(self.get_scope_list()): if scope.name is not None: lscope.append(scope.name) return lscope
Return the list of all contained scope from global to local
26,225
def reads_overlapping_variant( samfile, variant, chromosome=None, use_duplicate_reads=USE_DUPLICATE_READS, use_secondary_alignments=USE_SECONDARY_ALIGNMENTS, min_mapping_quality=MIN_READ_MAPPING_QUALITY): logger.info("Gathering reads for %s", variant) if chromosome is None: chromosome = variant.contig logger.info( "Gathering variant reads for variant %s (chromosome = %s, gene names = %s)", variant, chromosome, variant.gene_names) base1_position, ref, alt = trim_variant(variant) if len(ref) == 0: base1_position_before_variant = base1_position base1_position_after_variant = base1_position + 1 else: base1_position_before_variant = base1_position - 1 base1_position_after_variant = base1_position + len(ref) locus_reads = locus_read_generator( samfile=samfile, chromosome=chromosome, base1_position_before_variant=base1_position_before_variant, base1_position_after_variant=base1_position_after_variant, use_duplicate_reads=use_duplicate_reads, use_secondary_alignments=use_secondary_alignments, min_mapping_quality=min_mapping_quality) allele_reads = allele_reads_from_locus_reads( locus_reads=locus_reads, n_ref=len(ref)) return allele_reads
Find reads in the given SAM/BAM file which overlap the given variant and return them as a list of AlleleRead objects. Parameters ---------- samfile : pysam.AlignmentFile variant : varcode.Variant chromosome : str use_duplicate_reads : bool Should we use reads that have been marked as PCR duplicates use_secondary_alignments : bool Should we use reads at locations other than their best alignment min_mapping_quality : int Drop reads below this mapping quality only_alt_allele : bool Filter reads to only include those that support the alt allele of the variant. Returns sequence of AlleleRead objects.
26,226
def exec_python(*args, **kwargs): cmdargs, kwargs = __wrap_python(args, kwargs) return exec_command(*cmdargs, **kwargs)
Wrap running python script in a subprocess. Return stdout of the invoked command.
26,227
def frameAndSave(abf,tag="",dataType="plot",saveAsFname=False,closeWhenDone=True): print("closeWhenDone",closeWhenDone) plt.tight_layout() plt.subplots_adjust(top=.93,bottom =.07) plt.annotate(tag,(.01,.99),xycoords=,ha=,va=,family=,size=10,alpha=.5) msgBot="%s [%s]"%(abf.ID,abf.protocomment) plt.annotate(msgBot,(.01,.01),xycoords=,ha=,va=,family=,size=10,alpha=.5) fname=tag.lower().replace(" ",)+".jpg" fname=dataType+"_"+fname plt.tight_layout() if IMAGE_SAVE: abf.log.info("saving [%s]",fname) try: if saveAsFname: saveAs=os.path.abspath(saveAsFname) else: saveAs=os.path.abspath(abf.outPre+fname) if not os.path.exists(abf.outFolder): os.mkdir(abf.outFolder) plt.savefig(saveAs) except Exception as E: abf.log.error("saving [%s] failed! ?",fname) print(E) if IMAGE_SHOW==True: if closeWhenDone==False: print("NOT SHOWING (because closeWhenDone==True and showing would mess things up)") else: abf.log.info("showing [%s]",fname) plt.show() if closeWhenDone: print("closing figure") plt.close()
frame the current matplotlib plot with ABF info, and optionally save it. Note that this is entirely independent of the ABFplot class object. if saveImage is False, show it instead. Datatype should be: * plot * experiment
26,228
def run_command(cmd, debug=False): if debug: msg = .format(os.getcwd()) print_warn(msg) msg = .format(cmd) print_warn(msg) cmd()
Execute the given command and return None. :param cmd: A `sh.Command` object to execute. :param debug: An optional bool to toggle debug output. :return: None
26,229
def GetScripts(self, dest_dir): metadata_dict = self.watcher.GetMetadata() or {} try: instance_data = metadata_dict[][] except KeyError: instance_data = None self.logger.warning() try: project_data = metadata_dict[][] except KeyError: project_data = None self.logger.warning() return (self._GetAttributeScripts(instance_data, dest_dir) or self._GetAttributeScripts(project_data, dest_dir))
Retrieve the scripts to execute. Args: dest_dir: string, the path to a directory for storing metadata scripts. Returns: dict, a dictionary mapping set metadata keys with associated scripts.
26,230
def get_variant_phenotypes_with_suggested_changes(variant_id_list): variants = civic.get_variants_by_ids(variant_id_list) evidence = list() for variant in variants: evidence.extend(variant.evidence) for e in evidence: suggested_changes_url = f resp = requests.get(suggested_changes_url) resp.raise_for_status() suggested_changes = dict() for suggested_change in resp.json(): pheno_changes = suggested_change[].get(, None) if pheno_changes is None: continue a, b = pheno_changes added = set(b) - set(a) deleted = set(a) - set(b) rid = suggested_change[] suggested_changes[rid] = {: added, : deleted} yield e, {: suggested_changes, : set([x.id for x in e.phenotypes])}
for each variant, yields evidence and associated phenotypes, both current and suggested
26,231
def load_edgegrid_client_settings(): if getattr(settings, , None):
Load Akamai EdgeGrid configuration returns a (hostname, EdgeGridAuth) tuple from the following locations: 1. Values specified directly in the Django settings:: AKAMAI_CCU_CLIENT_SECRET AKAMAI_CCU_HOST AKAMAI_CCU_ACCESS_TOKEN AKAMAI_CCU_CLIENT_TOKEN 2. An edgerc file specified in the AKAMAI_EDGERC_FILENAME settings 3. The default ~/.edgerc file Both edgerc file load options will return the values from the “CCU” section by default. This may be customized using the AKAMAI_EDGERC_CCU_SECTION setting.
26,232
def _restart_target(self): if self._server: if self._server.returncode is None: self._server.kill() time.sleep(0.2) self._server = subprocess.Popen("python session_server.py", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) time.sleep(0.2)
Restart our Target.
26,233
def end_task(self): self.progress(self.task_stack[-1].size) self.task_stack.pop()
Remove the current task from the stack.
26,234
def get_navigation(request): sections = _get_sections(request) trail = _get_trail(request, exclude_section=True) return mark_safe(render_to_string(, dict(sections=sections,trail=trail)))
Returns the rendered navigation block. Requires that the `navigation.html` template exists. Two context variables are passed to it: * sections (see :func:`get_breadcrumb_sections`) * trail (see :func:`get_breadcrumb_trail`)
26,235
def _wait_for_job_done(self, project_id, job_id, interval=30): if interval <= 0: raise ValueError("Interval must be > 0") while True: job = self._get_job(project_id, job_id) if job[] in [, , ]: return job time.sleep(interval)
Waits for the Job to reach a terminal state. This method will periodically check the job state until the job reach a terminal state. Raises: googleapiclient.errors.HttpError: if HTTP error is returned when getting the job
26,236
def freeze(self, progressbar=None): self._storage_broker.pre_freeze_hook() if progressbar: progressbar.label = "Freezing dataset" manifest = self.generate_manifest(progressbar=progressbar) self._storage_broker.put_manifest(manifest) overlays = self._generate_overlays() for overlay_name, overlay in overlays.items(): self._put_overlay(overlay_name, overlay) datetime_obj = datetime.datetime.utcnow() metadata_update = { "type": "dataset", "frozen_at": dtoolcore.utils.timestamp(datetime_obj) } self._admin_metadata.update(metadata_update) self._storage_broker.put_admin_metadata(self._admin_metadata) self._storage_broker.post_freeze_hook()
Convert :class:`dtoolcore.ProtoDataSet` to :class:`dtoolcore.DataSet`.
26,237
def has_hlu(self, lun_or_snap, cg_member=None): hlu = self.get_hlu(lun_or_snap, cg_member=cg_member) return hlu is not None
Returns True if `lun_or_snap` is attached to the host. :param lun_or_snap: can be lun, lun snap, cg snap or a member snap of cg snap. :param cg_member: the member lun of cg if `lun_or_snap` is cg snap. :return: True - if `lun_or_snap` is attached, otherwise False.
26,238
def initialize_workflow(self, workflow): self.workflow = workflow() self.workflow.tasks = self.tasks self.workflow.input_file = self.input_file self.workflow.input_format = self.input_format self.workflow.target_file = self.target_file self.workflow.target_format = self.target_format self.workflow.run_id = self.run_id self.workflow.setup()
Create a workflow workflow - a workflow class
26,239
def acm_certificate_arn(self, lookup, default=None): try: MaxItems=100 ) except Exception: return default if len(response["CertificateSummaryList"]) < 1: return default best_match_cert = None for cert_handle in response["CertificateSummaryList"]: if cert_handle["DomainName"] == domain_name: cert = acm_client.describe_certificate(CertificateArn=cert_handle["CertificateArn"])["Certificate"] if not cert.has_key("IssuedAt"): cert[u"IssuedAt"] = datetime.datetime(1970, 1, 1, 0, 0) if best_match_cert is None: best_match_cert = cert elif cert["IssuedAt"] > best_match_cert["IssuedAt"]: best_match_cert = cert if best_match_cert is not None: return best_match_cert["CertificateArn"] return default
Args: lookup: region/domain on the certificate to be looked up default: the optional value to return if lookup failed; returns None if not set Returns: ARN of a certificate with status "Issued" for the region/domain, if found, or default/None if no match If more than one "Issued" certificate matches the region/domain: - if any matching cert was issued by Amazon, returns ARN of certificate with most recent IssuedAt timestamp - if no certs were issued by Amazon, returns ARN of an arbitrary matching certificate - certificates issued by Amazon take precedence over certificates not issued by Amazon
26,240
def safe_popen(*args, **kwargs): re not allowed to set close_fds=True while also setting stdin/stdout/stderr. Descriptors from os.pipe() on Windows have never been inheritable, so it would seem that we close_fds = (os.name != ) with popen_lock: return subprocess.Popen(*args, close_fds=close_fds, **kwargs)
This wrapper works around two major deadlock issues to do with pipes. The first is that, before Python 3.2 on POSIX systems, os.pipe() creates inheritable file descriptors, which leak to all child processes and prevent reads from reaching EOF. The workaround for this is to set close_fds=True on POSIX, which was not the default in those versions. See PEP 0446 for many details. The second issue arises on Windows, where we're not allowed to set close_fds=True while also setting stdin/stdout/stderr. Descriptors from os.pipe() on Windows have never been inheritable, so it would seem that we're safe. However, the Windows implementation of subprocess.Popen() creates temporary inheritable copies of its descriptors, and these can leak. The workaround for this is to protect Popen() with a global lock. See https://bugs.python.org/issue25565.
26,241
def minter(record_uuid, data, pid_type, key): pid = PersistentIdentifier.create( pid_type, data[key], object_type=, object_uuid=record_uuid, status=PIDStatus.REGISTERED ) for scheme, identifier in data[].items(): if identifier: PersistentIdentifier.create( scheme, identifier, object_type=, object_uuid=record_uuid, status=PIDStatus.REGISTERED ) return pid
Mint PIDs for a record.
26,242
def from_context(cls): try: ctx = click.get_current_context() except RuntimeError: return cls() return ctx.find_object(cls)
Retrieve this class' instance from the current Click context. :return: Instance of this class. :rtype: Config
26,243
def _add_timeout_handler(self, handler): now = time.time() for dummy, method in inspect.getmembers(handler, callable): if not hasattr(method, "_pyxmpp_timeout"): continue self._timeout_handlers.append((now + method._pyxmpp_timeout, method)) self._timeout_handlers.sort(key = lambda x: x[0])
Add a `TimeoutHandler` to the main loop.
26,244
def list_not_state(subset=None, show_ip=False, show_ipv4=None): show_ipv4show_ips presence detection (no commands will be sent to minions) subset : None Pass in a CIDR range to filter minions by IP address. show_ip : False Also show the IP address each minion is connecting from. CLI Example: .. code-block:: bash salt-run manage.list_not_state ' show_ip = _show_ip_migration(show_ip, show_ipv4) connected = list_state(subset=None, show_ip=show_ip) key = salt.key.get_key(__opts__) keys = key.list_keys() not_connected = [] for minion in keys[key.ACC]: if minion not in connected and (subset is None or minion in subset): not_connected.append(minion) return not_connected
.. versionadded:: 2015.8.0 .. versionchanged:: 2019.2.0 The 'show_ipv4' argument has been renamed to 'show_ip' as it now includes IPv6 addresses for IPv6-connected minions. Print a list of all minions that are NOT up according to Salt's presence detection (no commands will be sent to minions) subset : None Pass in a CIDR range to filter minions by IP address. show_ip : False Also show the IP address each minion is connecting from. CLI Example: .. code-block:: bash salt-run manage.list_not_state
26,245
def all(self, value, pos=None): value = bool(value) length = self.len if pos is None: pos = xrange(self.len) for p in pos: if p < 0: p += length if not 0 <= p < length: raise IndexError("Bit position {0} out of range.".format(p)) if not self._datastore.getbit(p) is value: return False return True
Return True if one or many bits are all set to value. value -- If value is True then checks for bits set to 1, otherwise checks for bits set to 0. pos -- An iterable of bit positions. Negative numbers are treated in the same way as slice indices. Defaults to the whole bitstring.
26,246
def pack_bytes(self, obj_dict, encoding=None): assert self.dict_to_bytes or self.dict_to_string encoding = encoding or self.default_encoding or LOGGER.debug(, self, encoding) if self.dict_to_bytes: return None, self.dict_to_bytes(obj_dict) try: return encoding, self.dict_to_string(obj_dict).encode(encoding) except LookupError as error: raise web.HTTPError( 406, , error, reason=.format(encoding)) except UnicodeEncodeError as error: LOGGER.warning(, encoding, str(error)) return , self.dict_to_string(obj_dict).encode()
Pack a dictionary into a byte stream.
26,247
def has(*permissions, **kwargs): target = kwargs[] kwargs[] = type_for(target) return target in filter_(*permissions, **kwargs)
Checks if the passed bearer has the passed permissions (optionally on the passed target).
26,248
def validate_base_url(base_url): parsed_url = urllib.parse.urlparse(base_url) if parsed_url.scheme and parsed_url.netloc: return parsed_url.geturl() else: error_message = "base_url must contain a valid scheme (protocol " \ "specifier) and network location (hostname)" raise ValueError(error_message)
Verify that base_url specifies a protocol and network location.
26,249
def convert_datetime(obj): if in obj: sep = elif in obj: sep = else: return convert_date(obj) try: ymd, hms = obj.split(sep, 1) usecs = if in hms: hms, usecs = hms.split() usecs = float( + usecs) * 1e6 return datetime.datetime(*[ int(x) for x in ymd.split()+hms.split()+[usecs] ]) except ValueError: return convert_date(obj)
Returns a DATETIME or TIMESTAMP column value as a datetime object: >>> datetime_or_None('2007-02-25 23:06:20') datetime.datetime(2007, 2, 25, 23, 6, 20) >>> datetime_or_None('2007-02-25T23:06:20') datetime.datetime(2007, 2, 25, 23, 6, 20) Illegal values are returned as None: >>> datetime_or_None('2007-02-31T23:06:20') is None True >>> datetime_or_None('0000-00-00 00:00:00') is None True
26,250
def translate_basic(usercode): codenum = get_code_num(codes[][usercode]) colorcode = codeformat(codenum) msg = .format( usercode, codenum, colorcode ) if disabled(): return msg return str(C(msg, fore=usercode))
Translate a basic color name to color with explanation.
26,251
def copy(self, sleep=_unset, stop=_unset, wait=_unset, retry=_unset, before=_unset, after=_unset, before_sleep=_unset, reraise=_unset): if before_sleep is _unset: before_sleep = self.before_sleep return self.__class__( sleep=self.sleep if sleep is _unset else sleep, stop=self.stop if stop is _unset else stop, wait=self.wait if wait is _unset else wait, retry=self.retry if retry is _unset else retry, before=self.before if before is _unset else before, after=self.after if after is _unset else after, before_sleep=before_sleep, reraise=self.reraise if after is _unset else reraise, )
Copy this object with some parameters changed if needed.
26,252
def writeRecord(self, infile): if self.bad: raise BadWOSRecord("This record cannot be converted to a file as the input was malformed.\nThe original line number (if any) is: {} and the original file is: ".format(self._sourceLine, self._sourceFile)) else: for tag in self._fieldDict.keys(): for i, value in enumerate(self._fieldDict[tag]): if i == 0: infile.write(tag + ) else: infile.write() infile.write(value + ) infile.write("ER\n")
Writes to _infile_ the original contents of the Record. This is intended for use by [RecordCollections](./RecordCollection.html#metaknowledge.RecordCollection) to write to file. What is written to _infile_ is bit for bit identical to the original record file (if utf-8 is used). No newline is inserted above the write but the last character is a newline. # Parameters _infile_ : `file stream` > An open utf-8 encoded file
26,253
def patch_all(): global _patched if _patched: return _patched = True patch_default_retcodes() patch_worker_run_task() patch_worker_factory() patch_keepalive_run() patch_cmdline_parser() logger.debug("applied law-specific luigi patches")
Runs all patches. This function ensures that a second invocation has no effect.
26,254
def UpdateManifestResourcesFromXML(dstpath, xmlstr, names=None, languages=None): logger.info("Updating manifest in %s", dstpath) if dstpath.lower().endswith(".exe"): name = 1 else: name = 2 winresource.UpdateResources(dstpath, xmlstr, RT_MANIFEST, names or [name], languages or [0, "*"])
Update or add manifest XML as resource in dstpath
26,255
def assert_requirements(self):
Asserts PEP 508 specifiers.
26,256
def redata(self, *args): if self.rulesview is None: Clock.schedule_once(self.redata, 0) return data = [ {: self.rulesview, : rule, : i, : self} for i, rule in enumerate(self.rulebook) ] self.data = data
Make my data represent what's in my rulebook right now
26,257
def do_plot_and_bestfit(self): fmt = str(self.kwargs.get("fmt", "k.")) if "errors" in self.kwargs: errors = self.kwargs["errors"] if isinstance(errors, dict): self.subplot.errorbar(self.x, self.y, fmt=fmt, xerr=errors.get("xerr", None), yerr=errors.get("yerr", None)) elif isinstance(errors, (collections.Sequence, np.ndarray, float)): self.subplot.errorbar(self.x, self.y, fmt=fmt, yerr=errors) else: self.subplot.plot(self.x, self.y, fmt) else: self.subplot.plot(self.x, self.y, fmt) bestfit = self.kwargs.get("bestfit", None) if bestfit is not None: bestfitlim = self.kwargs.get("bestfitlim", None) if bestfitlim is None: bestfitlim = self.kwargs.get("xlim", None) if bestfitlim is None: bestfitlim = (min(self.x), max(self.x)) fit_args = bestfit.do_bestfit() bestfit_line = bestfit.get_bestfit_line( x_min=bestfitlim[0], x_max=bestfitlim[1]) self.subplot.plot( bestfit_line[0], bestfit_line[1], self.kwargs.get("bestfitfmt", "k-") ) self.outputdict["fit_args"] = fit_args self.outputdict["rmse"] = bestfit.get_rmse() return self
Create plot
26,258
def get_process_path(tshark_path=None, process_name="tshark"): config = get_config() possible_paths = [config.get(process_name, "%s_path" % process_name)] if tshark_path is not None: possible_paths.insert(0, tshark_path) )
Finds the path of the tshark executable. If the user has provided a path or specified a location in config.ini it will be used. Otherwise default locations will be searched. :param tshark_path: Path of the tshark binary :raises TSharkNotFoundException in case TShark is not found in any location.
26,259
def rank_path(graph, path, edge_ranking=None): edge_ranking = default_edge_ranking if edge_ranking is None else edge_ranking return sum(max(edge_ranking[d[RELATION]] for d in graph.edge[u][v].values()) for u, v in pairwise(path))
Takes in a path (a list of nodes in the graph) and calculates a score :param pybel.BELGraph graph: A BEL graph :param list[tuple] path: A list of nodes in the path (includes terminal nodes) :param dict edge_ranking: A dictionary of {relationship: score} :return: The score for the edge :rtype: int
26,260
def _static_folder_path(static_url, static_folder, static_asset): if not static_asset.startswith(static_folder): raise ValueError("%s static asset must be under %s static folder" % (static_asset, static_folder)) rel_asset = static_asset[len(static_folder):] return % (static_url.rstrip(), rel_asset.lstrip())
Returns a path to a file based on the static folder, and not on the filesystem holding the file. Returns a path relative to static_url for static_asset
26,261
async def can(self, identity, permission) -> bool: assert isinstance(permission, (str, enum.Enum)), permission assert permission identify = await self.identity_policy.identify(identity) access = await self.autz_policy.can(identify, permission) return access
Check user permissions. :return: ``True`` if the identity is allowed the permission, else return ``False``.
26,262
def enable_broadcasting(self): if self._broadcast_reports is not None: _clear_queue(self._broadcast_reports) return self._broadcast_reports self._broadcast_reports = queue.Queue() return self._broadcast_reports
Begin accumulating broadcast reports received from all devices. This method will allocate a queue to receive broadcast reports that will be filled asynchronously as broadcast reports are received. Returns: queue.Queue: A queue that will be filled with braodcast reports.
26,263
def get_user_info(self, access_token): params = dict(oxd_id=self.oxd_id, access_token=access_token) params["access_token"] = access_token logger.debug("Sending command `get_user_info` with params %s", params) response = self.msgr.request("get_user_info", **params) logger.debug("Received response: %s", response) if response[] == : raise OxdServerError(response[]) return response[][]
Function to get the information about the user using the access code obtained from the OP Note: Refer to the /.well-known/openid-configuration URL of your OP for the complete list of the claims for different scopes. Parameters: * **access_token (string):** access token from the get_tokens_by_code function Returns: **dict:** The user data claims that are returned by the OP in format Example response:: { "sub": ["248289761001"], "name": ["Jane Doe"], "given_name": ["Jane"], "family_name": ["Doe"], "preferred_username": ["j.doe"], "email": ["[email protected]"], "picture": ["http://example.com/janedoe/me.jpg"] } Raises: **OxdServerError:** If the param access_token is empty OR if the oxd Server returns an error.
26,264
def set_states(self, states=None, value=None): assert self.binded and self.params_initialized self._curr_module.set_states(states, value)
Sets value for states. Only one of states & values can be specified. Parameters ---------- states : list of list of NDArrays Source states arrays formatted like ``[[state1_dev1, state1_dev2], [state2_dev1, state2_dev2]]``. value : number A single scalar value for all state arrays.
26,265
def get_or_set_default(self, section, option, value): try: ret = self.get(section, option) except MissingSetting: self.set(section, option, value) ret = value return ret
Base method to fetch values and to set defaults in case they don't exist.
26,266
def update(self, forecasts, observations): if len(observations.shape) == 1: obs_cdfs = np.zeros((observations.size, self.thresholds.size)) for o, observation in enumerate(observations): obs_cdfs[o, self.thresholds >= observation] = 1 else: obs_cdfs = observations self.errors["F_2"] += np.sum(forecasts ** 2, axis=0) self.errors["F_O"] += np.sum(forecasts * obs_cdfs, axis=0) self.errors["O_2"] += np.sum(obs_cdfs ** 2, axis=0) self.errors["O"] += np.sum(obs_cdfs, axis=0) self.num_forecasts += forecasts.shape[0]
Update the statistics with forecasts and observations. Args: forecasts: The discrete Cumulative Distribution Functions of observations:
26,267
def find_magic(self, magic_name, magic_kind=): return self.magics_manager.magics[magic_kind].get(magic_name)
Find and return a magic of the given type by name. Returns None if the magic isn't found.
26,268
def create(self, parameters): response = self._client.session.post( .format(url=self.endpoint_url), data=parameters ) return self.process_response(response)
Create a new item (if supported) :param parameters: dict :return: dict|str
26,269
def __applytns(self, root): TNS = "targetNamespace" tns = root.get(TNS) if tns is None: tns = self.schema.tns[1] root.set(TNS, tns) else: if self.schema.tns[1] != tns: raise Exception, "%s mismatch" % TNS
Make sure included schema has the same target namespace.
26,270
def _node_type(st): _types = [ (stat.S_ISBLK, "block device"), (stat.S_ISCHR, "character device"), (stat.S_ISFIFO, "named pipe"), (stat.S_ISSOCK, "socket") ] for t in _types: if t[0](st.st_mode): return t[1]
return a string indicating the type of special node represented by the stat buffer st (block, character, fifo, socket).
26,271
def _add_onchain_locksroot_to_snapshot( raiden: RaidenService, storage: SQLiteStorage, snapshot_record: StateChangeRecord, ) -> str: snapshot = json.loads(snapshot_record.data) for payment_network in snapshot.get(, dict()).values(): for token_network in payment_network.get(, list()): channelidentifiers_to_channels = token_network.get( , dict(), ) for channel in channelidentifiers_to_channels.values(): our_locksroot, partner_locksroot = _get_onchain_locksroots( raiden=raiden, storage=storage, token_network=token_network, channel=channel, ) channel[][] = serialize_bytes(our_locksroot) channel[][] = serialize_bytes(partner_locksroot) return json.dumps(snapshot, indent=4), snapshot_record.identifier
Add `onchain_locksroot` to each NettingChannelEndState
26,272
def date_range_for_webtrends(cls, start_at=None, end_at=None): if start_at and end_at: start_date = cls.parse_standard_date_string_to_date( start_at) end_date = cls.parse_standard_date_string_to_date( end_at) numdays = (end_date - start_date).days + 1 start_dates = [end_date - timedelta(days=x) for x in reversed(range(0, numdays))] date_range = [] for i, date in enumerate(start_dates): query_date = cls.parse_date_for_query(date) date_range.append((query_date, query_date)) return date_range else: return [("current_day-1", "current_day-1")]
Get the day dates in between start and end formatted for query. This returns dates inclusive e.g. final day is (end_at, end_at+1 day)
26,273
def put(self, key, value): mem = sys.getsizeof(value) if self._mem + mem > self._maxmem: self.delete(self.last()) LRUCache.put(self, key, (value, mem)) self._mem += mem
>>> c = MemSizeLRUCache(maxmem=24*4) >>> c.put(1, 1) >>> c.mem() # 24-bytes per integer 24 >>> c.put(2, 2) >>> c.put(3, 3) >>> c.put(4, 4) >>> c.get(1) 1 >>> c.mem() 96 >>> c.size() 4 >>> c.put(5, 5) >>> c.size() 4 >>> c.get(2) Traceback (most recent call last): ... KeyError: 2
26,274
def json_query(data, expr): if jmespath is None: err = log.error(err) raise RuntimeError(err) return jmespath.search(expr, data)
Query data using JMESPath language (http://jmespath.org).
26,275
def intrusion_sets(self, name, owner=None, **kwargs): return IntrusionSet(self.tcex, name, owner=owner, **kwargs)
Create the Intrustion Set TI object. Args: owner: name: **kwargs: Return:
26,276
def exists(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None): 127.0.0.1:2181) scheme Scheme to authenticate with (Default: ) username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.exists /test/name profile=prod ' conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme, username=username, password=password, default_acl=default_acl) return bool(conn.exists(path))
Check if path exists path path to check profile Configured Zookeeper profile to authenticate with (Default: None) hosts Lists of Zookeeper Hosts (Default: '127.0.0.1:2181) scheme Scheme to authenticate with (Default: 'digest') username Username to authenticate (Default: None) password Password to authenticate (Default: None) default_acl Default acls to assign if a node is created in this connection (Default: None) CLI Example: .. code-block:: bash salt minion1 zookeeper.exists /test/name profile=prod
26,277
def convert_dedent(self): if self.indent_amounts: self.indent_amounts.pop() tokenum = INDENT last_indent = 0 if self.indent_amounts: last_indent = self.indent_amounts[-1] while self.result[-1][0] == INDENT: self.result.pop() value = self.indent_type * last_indent return tokenum, value
Convert a dedent into an indent
26,278
def fetch(cls, id, api_key=None, endpoint=None, add_headers=None, **kwargs): if endpoint is None: endpoint = cls.get_endpoint() inst = cls(api_key=api_key) parse_key = cls.sanitize_ep(endpoint).split("/")[-1] endpoint = .join((endpoint, id)) data = cls._parse(inst.request(, endpoint=endpoint, add_headers=add_headers, query_params=kwargs), key=parse_key) inst._set(data) return inst
Fetch a single entity from the API endpoint. Used when you know the exact ID that must be queried.
26,279
def single_row_or_col_df_to_dict(desired_type: Type[T], single_rowcol_df: pd.DataFrame, logger: Logger, **kwargs)\ -> Dict[str, str]: if single_rowcol_df.shape[0] == 1: return single_rowcol_df.transpose()[0].to_dict() elif single_rowcol_df.shape[1] == 2 and isinstance(single_rowcol_df.index, pd.RangeIndex): d = single_rowcol_df.set_index(single_rowcol_df.columns[0]) return d[d.columns[0]].to_dict() elif single_rowcol_df.shape[1] == 1: d = single_rowcol_df return d[d.columns[0]].to_dict() else: raise ValueError( + str(single_rowcol_df.shape) + )
Helper method to convert a dataframe with one row or one or two columns into a dictionary :param desired_type: :param single_rowcol_df: :param logger: :param kwargs: :return:
26,280
def _stmt_list(self, stmts, indent=True): stmts = "\n".join(nstr for nstr in [n.accept(self) for n in stmts] if nstr) if indent: return self.indent + stmts.replace("\n", "\n" + self.indent) return stmts
return a list of nodes to string
26,281
def _render_content_list(self, content, depth, dstack, **settings): result = [] i = 0 size = len(content) for value in content: ds = [(depth, i, size)] ds = dstack + ds if isinstance(value, dict): result.append(self._render_item(ds, "[{}]".format(i), **settings)) result += self._render_content_dict(value, depth + 1, ds, **settings) elif isinstance(value, list): result.append(self._render_item(ds, "[{}]".format(i), **settings)) result += self._render_content_list(value, depth + 1, ds, **settings) else: result.append(self._render_item(ds, value, **settings)) i += 1 return result
Render the list.
26,282
def dt_str_to_posix(dt_str): parsable, _ = dt_str.split() dt = datetime.datetime.strptime(parsable, _DT_FORMAT) return calendar.timegm(dt.utctimetuple())
format str to posix. datetime str is of format %Y-%m-%dT%H:%M:%S.%fZ, e.g. 2013-04-12T00:22:27.978Z. According to ISO 8601, T is a separator between date and time when they are on the same line. Z indicates UTC (zero meridian). A pointer: http://www.cl.cam.ac.uk/~mgk25/iso-time.html This is used to parse LastModified node from GCS's GET bucket XML response. Args: dt_str: A datetime str. Returns: A float of secs from unix epoch. By posix definition, epoch is midnight 1970/1/1 UTC.
26,283
def getCoords(self): s coordinates ((L, T), (R, B)) ' if DEBUG_COORDS: print >>sys.stderr, "getCoords(%s %s (x, y) = self.getXY(); w = self.getWidth() h = self.getHeight() return ((x, y), (x+w, y+h))
Gets the coords of the View @return: A tuple containing the View's coordinates ((L, T), (R, B))
26,284
def cmd_queue_peaks(self): threshold = 1 peaks = [] current_peak = 0 current_queue = 0 current_span = 0 first_on_queue = None for line in self._valid_lines: current_queue = line.queue_backend if current_queue > 0: current_span += 1 if first_on_queue is None: first_on_queue = line.accept_date if current_queue == 0 and current_peak > threshold: data = { : current_peak, : current_span, : first_on_queue, : line.accept_date, } peaks.append(data) current_peak = 0 current_span = 0 first_on_queue = None if current_queue > current_peak: current_peak = current_queue if current_queue > 0 and current_peak > threshold: data = { : current_peak, : current_span, : first_on_queue, : line.accept_date, } peaks.append(data) return peaks
Generate a list of the requests peaks on the queue. A queue peak is defined by the biggest value on the backend queue on a series of log lines that are between log lines without being queued. .. warning:: Allow to configure up to which peak can be ignored. Currently set to 1.
26,285
def get_spider_stats(self): self.logger.debug("Gathering spider stats") the_dict = {} spider_set = set() total_spider_count = 0 keys = self.redis_conn.keys() for key in keys: elements = key.split(":") spider = elements[3] if spider not in the_dict: the_dict[spider] = {} the_dict[spider][] = 0 if len(elements) == 6: response = elements[4] end = elements[5] if response not in the_dict[spider]: the_dict[spider][response] = {} the_dict[spider][response][end] = self._get_key_value(key, end == ) elif len(elements) == 5: the_dict[spider][] += 1 total_spider_count += 1 spider_set.add(spider) else: self.logger.warn("Unknown crawler stat key", {"key":key}) the_dict[] = len(spider_set) the_dict[] = total_spider_count ret_dict = {} ret_dict[] = the_dict return ret_dict
Gather spider based stats
26,286
def _get_id2gos(self, associations, **kws): options = AnnoOptions(self.evobj, **kws) assc = self.reduce_annotations(associations, options) return self._get_dbid2goids(assc) if options.b_geneid2gos else self._get_goid2dbids(assc)
Return given associations in a dict, id2gos
26,287
def do_build(self): tmp = self.explicit self.explicit = True b = super(KeyShareEntry, self).do_build() self.explicit = tmp return b
We need this hack, else 'self' would be replaced by __iter__.next().
26,288
def get_elements(self, json_string, expr): load_input_json = self.string_to_json(json_string) jsonpath_expr = parse(expr) value_list = [] for match in jsonpath_expr.find(load_input_json): value_list.append(match.value) if not value_list: return None else: return value_list
Get list of elements from _json_string_, matching [http://goessner.net/articles/JsonPath/|JSONPath] expression. *Args:*\n _json_string_ - JSON string;\n _expr_ - JSONPath expression; *Returns:*\n List of found elements or ``None`` if no elements were found *Example:*\n | *Settings* | *Value* | | Library | JsonValidator | | Library | OperatingSystem | | *Test Cases* | *Action* | *Argument* | *Argument* | | Get json elements | ${json_example}= | OperatingSystem.Get File | ${CURDIR}${/}json_example.json | | | ${json_elements}= | Get elements | ${json_example} | $.store.book[*].author | =>\n | [u'Nigel Rees', u'Evelyn Waugh', u'Herman Melville', u'J. R. R. Tolkien']
26,289
def order_by_json_path(self, json_path, language_code=None, order=): language_code = (language_code or self._language_code or self.get_language_key(language_code)) json_path = % (language_code, json_path) raw_sql_expression = RawSQL("translations if order == : raw_sql_expression = raw_sql_expression.desc() return self.order_by(raw_sql_expression)
Orders a queryset by the value of the specified `json_path`. More about the `#>>` operator and the `json_path` arg syntax: https://www.postgresql.org/docs/current/static/functions-json.html More about Raw SQL expressions: https://docs.djangoproject.com/en/dev/ref/models/expressions/#raw-sql-expressions Usage example: MyModel.objects.language('en_us').filter(is_active=True).order_by_json_path('title')
26,290
def renderThumbnail(self, relpath=""): if self.thumbnail is None: return "" elif self.thumbnail is self.fullimage: fname = relpath + self.fullimage return % (quote_url(fname), quote_url(os.path.basename(self.filename))) else: tname = relpath + self.thumbnail fname = relpath + self.fullimage return % ( quote_url(fname), quote_url(tname), quote_url(os.path.basename(self.filename)))
renderThumbnail() is called to render a thumbnail of the DP (e.g. in Data Product tables).
26,291
def remote_command(function, self, *args, **kwargs): try: return function(self, *args, **kwargs) except RuntimeError, exception: error_message = str(exception) match = CRE_REMOTE_ERROR.match(error_message) if match: command_code = int(match.group()) return_code = int(match.group()) raise FirmwareError(command_code, return_code) match = CRE_REMOTE_COMMAND_ERROR.match(error_message) if match: command_code = int(match.group()) command_name = NAMES_BY_COMMAND_CODE[command_code] raise RuntimeError(CRE_REMOTE_COMMAND_ERROR.sub(command_name, error_message)) raise
Catch `RuntimeError` exceptions raised by remote control board firmware commands and re-raise as more specific `FirmwareError` exception type, which includes command code and return code.
26,292
def _eval_target_brutal(state, ip, limit): addrs = state.solver.eval_upto(ip, limit) return [ (ip == addr, addr) for addr in addrs ]
The traditional way of evaluating symbolic jump targets. :param state: A SimState instance. :param ip: The AST of the instruction pointer to evaluate. :param limit: The maximum number of concrete IPs. :return: A list of conditions and the corresponding concrete IPs. :rtype: list
26,293
def pop(self, key, default=NONE): if key in self: self._list_remove(key) return self._pop(key) else: if default is NONE: raise KeyError(key) else: return default
If *key* is in the dictionary, remove it and return its value, else return *default*. If *default* is not given and *key* is not in the dictionary, a KeyError is raised.
26,294
def _default_transform_fn(self, model, content, content_type, accept): try: data = self._input_fn(content, content_type) except _errors.UnsupportedFormatError as e: return self._error_response(e, http_client.UNSUPPORTED_MEDIA_TYPE) prediction = self._predict_fn(data, model) try: result = self._output_fn(prediction, accept) except _errors.UnsupportedFormatError as e: return self._error_response(e, http_client.NOT_ACCEPTABLE) return result
Make predictions against the model and return a serialized response. This serves as the default implementation of transform_fn, used when the user has not implemented one themselves. Args: model (obj): model loaded by model_fn. content: request content. content_type (str): the request Content-Type. accept (str): accept content-type expected by the client. Returns: sagemaker_containers.beta.framework.worker.Response or tuple: the serialized response data and its content type, either as a Response object or a tuple of the form (response_data, content_type)
26,295
def render_name(self, template_name, *context, **kwargs): loader = self._make_loader() template = loader.load_name(template_name) return self._render_string(template, *context, **kwargs)
Render the template with the given name using the given context. See the render() docstring for more information.
26,296
def eol_distance_last(self, offset=0): distance = 0 for char in reversed(self.string[:self.pos + offset]): if char == : break else: distance += 1 return distance
Return the ammount of characters until the last newline.
26,297
def _parse_package(cls, package_string): pkg, arch = rsplit(package_string, cls._arch_sep(package_string)) if arch not in KNOWN_ARCHITECTURES: pkg, arch = (package_string, None) pkg, release = rsplit(pkg, ) name, version = rsplit(pkg, ) epoch, version = version.split(, 1) if ":" in version else [, version] if name.startswith() and name.endswith(): name, version2 = name.split(, 1) version = version2 + + version return { : name, : version, : release, : arch, : epoch }
Helper method for parsing package string. Args: package_string (str): dash separated package string such as 'bash-4.2.39-3.el7' Returns: dict: dictionary containing 'name', 'version', 'release' and 'arch' keys
26,298
def to_str(obj): if isinstance(obj, str): return obj if isinstance(obj, unicode): return obj.encode() return str(obj)
convert a object to string
26,299
def get_site(self, *args): num_args = len(args) if num_args == 1: site = args[0] elif num_args == 2: host_name, path_to_site = args path_to_site = + path_to_site if not path_to_site.startswith( ) else path_to_site site = .format(host_name, path_to_site) elif num_args == 3: site = .join(args) else: raise ValueError() url = self.build_url(self._endpoints.get().format(id=site)) response = self.con.get(url) if not response: return None data = response.json() return self.site_constructor(parent=self, **{self._cloud_data_key: data})
Returns a sharepoint site :param args: It accepts multiple ways of retrieving a site: get_site(host_name): the host_name: host_name ej. 'contoso.sharepoint.com' or 'root' get_site(site_id): the site_id: a comma separated string of (host_name, site_collection_id, site_id) get_site(host_name, path_to_site): host_name ej. 'contoso. sharepoint.com', path_to_site: a url path (with a leading slash) get_site(host_name, site_collection_id, site_id): host_name ej. 'contoso.sharepoint.com' :rtype: Site