Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
378,700
def get_config_section(self, name): if self.config.has_section(name): return self.config.items(name) return []
Get a section of a configuration
378,701
def _sleep(current_sleep, max_sleep=_MAX_SLEEP, multiplier=_MULTIPLIER): actual_sleep = random.uniform(0.0, current_sleep) time.sleep(actual_sleep) return min(multiplier * current_sleep, max_sleep)
Sleep and produce a new sleep time. .. _Exponential Backoff And Jitter: https://www.awsarchitectureblog.com/\ 2015/03/backoff.html Select a duration between zero and ``current_sleep``. It might seem counterintuitive to have so much jitter, but `Exponential Backoff And Jitter`_ argues that "full jitter" is the best strategy. Args: current_sleep (float): The current "max" for sleep interval. max_sleep (Optional[float]): Eventual "max" sleep time multiplier (Optional[float]): Multiplier for exponential backoff. Returns: float: Newly doubled ``current_sleep`` or ``max_sleep`` (whichever is smaller)
378,702
def _max_width_formatter(string, cols, separator=): is_color = isinstance(string, ColoredString) if is_color: string_copy = string._new() string = string.s stack = tsplit(string, NEWLINES) for i, substring in enumerate(stack): stack[i] = substring.split() _stack = [] for row in stack: _row = [,] _row_i = 0 for word in row: if (len(_row[_row_i]) + len(word)) <= cols: _row[_row_i] += word _row[_row_i] += elif len(word) > cols: if len(_row[_row_i]): _row[_row_i] = _row[_row_i].rstrip() _row.append() _row_i += 1 chunks = schunk(word, cols) for i, chunk in enumerate(chunks): if not (i + 1) == len(chunks): _row[_row_i] += chunk _row[_row_i] = _row[_row_i].rstrip() _row.append() _row_i += 1 else: _row[_row_i] += chunk _row[_row_i] += else: _row[_row_i] = _row[_row_i].rstrip() _row.append() _row_i += 1 _row[_row_i] += word _row[_row_i] += else: _row[_row_i] = _row[_row_i].rstrip() _row = map(str, _row) _stack.append(separator.join(_row)) _s = .join(_stack) if is_color: _s = string_copy._new(_s) return _s
Returns a freshly formatted :param string: string to be formatted :type string: basestring or clint.textui.colored.ColoredString :param cols: max width the text to be formatted :type cols: int :param separator: separator to break rows :type separator: basestring
378,703
def xoffset(self, value): if self._xoffset != value and \ isinstance(value, (int, float, long)): self._xoffset = value
gets/sets the xoffset
378,704
def document_delete(index, doc_type, id, hosts=None, profile=None): es = _get_instance(hosts, profile) try: return es.delete(index=index, doc_type=doc_type, id=id) except elasticsearch.exceptions.NotFoundError: return None except elasticsearch.TransportError as e: raise CommandExecutionError("Cannot delete document {0} in index {1}, server returned code {2} with message {3}".format(id, index, e.status_code, e.error))
Delete a document from an index index Index name where the document resides doc_type Type of the document id Document identifier CLI example:: salt myminion elasticsearch.document_delete testindex doctype1 AUx-384m0Bug_8U80wQZ
378,705
def unsafe_peek(init): def peek(store, container, _stack=None): return init(*[ store.peek(attr, container, _stack=_stack) for attr in container ]) return peek
Deserialize all the attributes available in the container and pass them in the same order as they come in the container. This is a factory function; returns the actual `peek` routine. Arguments: init: type constructor. Returns: callable: deserializer (`peek` routine).
378,706
def set_distribute_verbatim(self, distribute_verbatim=None): if distribute_verbatim is None: raise NullArgument() metadata = Metadata(**settings.METADATA[]) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(distribute_verbatim, metadata, array=False): self._my_map[] = distribute_verbatim else: raise InvalidArgument()
Sets the distribution rights. :param distribute_verbatim: right to distribute verbatim copies :type distribute_verbatim: ``boolean`` :raise: ``InvalidArgument`` -- ``distribute_verbatim`` is invalid :raise: ``NoAccess`` -- authorization failure *compliance: mandatory -- This method must be implemented.*
378,707
def decode (cls, bytes, cmddict=None): byte = struct.unpack(, bytes)[0] self = cls() defval = self.default for bit, name, value0, value1, default in SeqCmdAttrs.Table: mask = 1 << bit bitset = mask & byte defset = mask & defval if bitset != defset: if bitset: self.attrs[name] = value1 else: self.attrs[name] = value0 return self
Decodes sequence command attributes from an array of bytes and returns a new SeqCmdAttrs.
378,708
def make_shell_logfiles_url(host, shell_port, _, instance_id=None): if not shell_port: return None if not instance_id: return "http://%s:%d/browse/log-files" % (host, shell_port) else: return "http://%s:%d/file/log-files/%s.log.0" % (host, shell_port, instance_id)
Make the url for log-files in heron-shell from the info stored in stmgr. If no instance_id is provided, the link will be to the dir for the whole container. If shell port is not present, it returns None.
378,709
def get_space_information(self, space_key, expand=None, callback=None): params = {} if expand: params["expand"] = expand return self._service_get_request("rest/api/space/{key}".format(key=space_key), params=params, callback=callback)
Returns information about a space. :param space_key (string): A string containing the key of the space. :param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty. :param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns. Default: None (no callback, raw data returned). :return: The JSON data returned from the space/{spaceKey} endpoint, or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
378,710
def refresh(self): resp = { API_SECURITY_REFRESH_TOKEN_KEY: create_access_token( identity=get_jwt_identity(), fresh=False ) } return self.response(200, **resp)
Security endpoint for the refresh token, so we can obtain a new token without forcing the user to login again --- post: responses: 200: description: Refresh Successful content: application/json: schema: type: object properties: refresh_token: type: string 401: $ref: '#/components/responses/401' 500: $ref: '#/components/responses/500'
378,711
def parseprofile(profilelog, out): file = open(out, ) print( % profilelog) p = pstats.Stats(profilelog, stream=file) print() file.write("=== All stats:\n") p.strip_dirs().sort_stats(-1).print_stats() file.write("=== Cumulative time:\n") p.sort_stats().print_stats(100) file.write("=== Time:\n") p.sort_stats().print_stats(100) file.write("=== Time + cumulative time:\n") p.sort_stats(, ).print_stats(.5, ) file.write("=== Callees:\n") p.print_callees() file.write("=== Callers:\n") p.print_callers() file.close() print( % out) print()
Parse a profile log and print the result on screen
378,712
def write_summary_cnts_all(self): cnts = self.get_cnts_levels_depths_recs(set(self.obo.values())) self._write_summary_cnts(cnts)
Write summary of level and depth counts for all active GO Terms.
378,713
def get_snippet(self, snippet_key = None): uri = .join([ self.api_uri, self.snippets_suffix ]) if snippet_key: uri = .join([ uri, snippet_key ]) code, data = self._req(, uri) return code, data
Get all/one specific snippet by its key Args: key snippet key (default: None i.e. ALL) return (status code, snippet dict or list thereof)
378,714
def generic_find_constraint_name(table, columns, referenced, db): t = sa.Table(table, db.metadata, autoload=True, autoload_with=db.engine) for fk in t.foreign_key_constraints: if fk.referred_table.name == referenced and set(fk.column_keys) == columns: return fk.name
Utility to find a constraint name in alembic migrations
378,715
def add_default_import(cls, module: str): if module in cls.GATED_IMPORTS: cls.DEFAULT_IMPORTS.swap(lambda s: s.cons(sym.symbol(module)))
Add a gated default import to the default imports. In particular, we need to avoid importing 'basilisp.core' before we have finished macro-expanding.
378,716
def blob(self, nodeid, tag, start=0, end=0xFFFFFFFF): startkey = self.makekey(nodeid, tag, start) endkey = self.makekey(nodeid, tag, end) cur = self.btree.find(, startkey) data = b while cur.getkey() <= endkey: data += cur.getval() cur.next() return data
Blobs are stored in sequential nodes with increasing index values. most blobs, like scripts start at index 0, long names start at a specified offset.
378,717
def GetArtifactPathDependencies(rdf_artifact): deps = set() for source in rdf_artifact.sources: for arg, value in iteritems(source.attributes): paths = [] if arg in ["path", "query"]: paths.append(value) if arg == "key_value_pairs": paths.extend([x["key"] for x in value]) if arg in ["keys", "paths", "path_list", "content_regex_list"]: paths.extend(value) for path in paths: for match in artifact_utils.INTERPOLATED_REGEX.finditer(path): deps.add(match.group()[2:-2]) deps.update(GetArtifactParserDependencies(rdf_artifact)) return deps
Return a set of knowledgebase path dependencies. Args: rdf_artifact: RDF artifact object. Returns: A set of strings for the required kb objects e.g. ["users.appdata", "systemroot"]
378,718
def set_permissions(self, object, replace=False): if isinstance(self.config.origin, S3Origin): if self.config.origin.origin_access_identity: id = self.config.origin.origin_access_identity.split()[-1] oai = self.connection.get_origin_access_identity_info(id) policy = object.get_acl() if replace: policy.acl = ACL() policy.acl.add_user_grant(, oai.s3_user_id) object.set_acl(policy) else: object.set_canned_acl()
Sets the S3 ACL grants for the given object to the appropriate value based on the type of Distribution. If the Distribution is serving private content the ACL will be set to include the Origin Access Identity associated with the Distribution. If the Distribution is serving public content the content will be set up with "public-read". :type object: :class:`boto.cloudfront.object.Object` :param enabled: The Object whose ACL is being set :type replace: bool :param replace: If False, the Origin Access Identity will be appended to the existing ACL for the object. If True, the ACL for the object will be completely replaced with one that grants READ permission to the Origin Access Identity.
378,719
def _initURL(self, org_url=None, token_url=None, referer_url=None): if org_url is not None and org_url != : if not org_url.startswith() and not org_url.startswith(): org_url = + org_url self._org_url = org_url if not self._org_url.startswith() and not self._org_url.startswith(): self._org_url = + self._org_url if self._org_url.lower().find() > -1: self._url = self._org_url else: self._url = self._org_url + "/sharing/rest" if self._url.startswith(): self._surl = self._url.replace(, ) else: self._surl = self._url if token_url is None: results = self._get(url= self._surl + , param_dict={:}, proxy_port=self._proxy_port, proxy_url=self._proxy_url) if in results and in results[]: self._token_url = results[][] else: self._token_url = self._surl + else: self._token_url = token_url parsed_url = urlparse(self._org_url) self._parsed_org_url = urlunparse((parsed_url[0],parsed_url[1],"","","","")) if referer_url is None: self._referer_url = parsed_url.netloc
sets proper URLs for AGOL
378,720
def count_year(year, **kwargs): 1990,2000 url = gbif_baseurl + out = gbif_GET(url, {: year}, **kwargs) return out
Lists occurrence counts by year :param year: [int] year range, e.g., ``1990,2000``. Does not support ranges like ``asterisk,2010`` :return: dict Usage:: from pygbif import occurrences occurrences.count_year(year = '1990,2000')
378,721
def handle_connection_repl(client): client.settimeout(None) setinterval(2147483647) try: client.close() fh.detach() else: fh.close() except IOError: pass del fh del junk finally: setinterval(old_interval) _LOG("Cleaned up.")
Handles connection.
378,722
def encode(data, checksum=True): if checksum: data = data + utils.hash256(data)[:4] v, prefix = to_long(256, lambda x: x, iter(data)) data = from_long(v, prefix, BASE58_BASE, lambda v: BASE58_ALPHABET[v]) return data.decode("utf8")
Convert binary to base58 using BASE58_ALPHABET.
378,723
def enrich_variants(graph: BELGraph, func: Union[None, str, Iterable[str]] = None): if func is None: func = {PROTEIN, RNA, MIRNA, GENE} nodes = list(get_nodes_by_function(graph, func)) for u in nodes: parent = u.get_parent() if parent is None: continue if parent not in graph: graph.add_has_variant(parent, u)
Add the reference nodes for all variants of the given function. :param graph: The target BEL graph to enrich :param func: The function by which the subject of each triple is filtered. Defaults to the set of protein, rna, mirna, and gene.
378,724
def json2py(json_obj): for key, value in json_obj.items(): if type(value) not in (str, unicode): continue if re.match(, value): value = datetime.datetime.strptime(value, ) elif re.match(, value): year, month, day = map(int, value.split()) value = datetime.date(year, month, day) elif re.match(, value): hour, minute, second, micro = map(int, value.split()) value = datetime.time(hour, minute, second, micro) else: found = False for decoder in _decoders: success, new_value = decoder(value) if success: value = new_value found = True break if not found: continue json_obj[key] = value return json_obj
Converts the inputted JSON object to a python value. :param json_obj | <variant>
378,725
def parse_class_names(args): num_class = args.num_class if len(args.class_names) > 0: if os.path.isfile(args.class_names): with open(args.class_names, ) as f: class_names = [l.strip() for l in f.readlines()] else: class_names = [c.strip() for c in args.class_names.split()] assert len(class_names) == num_class, str(len(class_names)) for name in class_names: assert len(name) > 0 else: class_names = None return class_names
parse # classes and class_names if applicable
378,726
def get_ips(v6=False): res = {} for iface in six.itervalues(IFACES): ips = [] for ip in iface.ips: if v6 and ":" in ip: ips.append(ip) elif not v6 and ":" not in ip: ips.append(ip) res[iface] = ips return res
Returns all available IPs matching to interfaces, using the windows system. Should only be used as a WinPcapy fallback.
378,727
def traverse_commits(self) -> Generator[Commit, None, None]: if isinstance(self._path_to_repo, str): self._path_to_repo = [self._path_to_repo] for path_repo in self._path_to_repo: if self._isremote(path_repo): tmp_folder = tempfile.TemporaryDirectory() path_repo = self._clone_remote_repos(tmp_folder.name, path_repo) git_repo = GitRepository(path_repo) self._sanity_check_filters(git_repo) self._check_timezones() logger.info(, git_repo.path) if self._filepath is not None: self._filepath_commits = git_repo.get_commits_modified_file( self._filepath) for commit in git_repo.get_list_commits(self._only_in_branch, not self._reversed_order): logger.info(, commit.hash, commit.committer_date, commit.author.name) if self._is_commit_filtered(commit): logger.info(, commit.hash) continue yield commit
Analyze all the specified commits (all of them by default), returning a generator of commits.
378,728
def process_post_tag(self, bulk_mode, api_tag): tag = None if bulk_mode: tag = self.ref_data_map["tags"].get(api_tag["ID"]) if not tag: tag, created = Tag.objects.get_or_create(site_id=self.site_id, wp_id=api_tag["ID"], defaults=self.api_object_data("tag", api_tag)) if tag and not created: self.update_existing_tag(tag, api_tag) if tag: self.ref_data_map["tags"][api_tag["ID"]] = tag return tag
Create or update a Tag related to a post. :param bulk_mode: If True, minimize db operations by bulk creating post objects :param api_tag: the API data for the Tag :return: the Tag object
378,729
def plot(x, y, z, ax=None, **kwargs): r if ax is None: ax = matplotlib.pyplot.gca() colors = kwargs.pop(, matplotlib.pyplot.cm.Reds_r) smooth = kwargs.pop(, False) linewidths = kwargs.pop(, 0.3) contour_line_levels = kwargs.pop(, [1, 2, 3]) fineness = kwargs.pop(, 0.5) default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1, fineness) contour_color_levels = kwargs.pop(, default_color_levels) rasterize_contours = kwargs.pop(, False) lines = kwargs.pop(, True) if kwargs: raise TypeError( % kwargs) z = numpy.sqrt(2) * scipy.special.erfinv(1 - z) if smooth: sigma = smooth*numpy.array(z.shape)/100.0 z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0) cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels) if rasterize_contours: for c in cbar.collections: c.set_rasterized(True) for c in cbar.collections: c.set_edgecolor("face") if lines: ax.contour(x, y, z, colors=, linewidths=linewidths, levels=contour_line_levels) return cbar
r""" Plot iso-probability mass function, converted to sigmas. Parameters ---------- x, y, z : numpy arrays Same as arguments to :func:`matplotlib.pyplot.contour` ax: axes object, optional :class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to get the last axis used, or create a new one. colors: color scheme, optional :class:`matplotlib.colors.LinearSegmentedColormap` Color scheme to plot with. Recommend plotting in reverse (Default: :class:`matplotlib.pyplot.cm.Reds_r`) smooth: float, optional Percentage by which to smooth the contours. (Default: no smoothing) contour_line_levels: List[float], optional Contour lines to be plotted. (Default: [1,2]) linewidths: float, optional Thickness of contour lines. (Default: 0.3) contour_color_levels: List[float], optional Contour color levels. (Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`) fineness: float, optional Spacing of contour color levels. (Default: 0.1) lines: bool, optional (Default: True) rasterize_contours: bool, optional Rasterize the contours while keeping the lines, text etc in vector format. Useful for reducing file size bloat and making printing easier when you have dense contours. (Default: False) Returns ------- cbar: color bar :class:`matplotlib.contour.QuadContourSet` Colors to create a global colour bar
378,730
def working_directory(self): _complain_ifclosed(self.closed) wd = self.fs.get_working_directory() return wd
Get the current working directory. :rtype: str :return: current working directory
378,731
def findOrLoadRenderModel(self, pchRenderModelName): "Purpose: Finds a render model wet find one if pRenderModel is None: error = openvr.EVRRenderModelError() while True: error, pModel = openvr.VRRenderModels().loadRenderModel_Async( pchRenderModelName ) if error != openvr.VRRenderModelError_Loading: break threadSleep( 1 ) if error != openvr.VRRenderModelError_None: dprintf( "Unable to load render model %s - %s\n" % ( pchRenderModelName, openvr.VRRenderModels().getRenderModelErrorNameFromEnum( error )) ) return None while True: error, pTexture = openvr.VRRenderModels().loadTexture_Async( pModel.contents.diffuseTextureId ) if error != openvr.VRRenderModelError_Loading: break threadSleep( 1 ) if error != openvr.VRRenderModelError_None: dprintf( "Unable to load render texture id:%d for render model %s\n" % ( pModel.contents.diffuseTextureId, pchRenderModelName) ) openvr.VRRenderModels().FreeRenderModel( pModel ) return None pRenderModel = CGLRenderModel( pchRenderModelName ) if not pRenderModel.bInit( pModel.contents, pTexture.contents ): dprintf( "Unable to create GL model from render model %s\n" % pchRenderModelName ) pRenderModel = None else: self.m_vecRenderModels.append( pRenderModel ) openvr.VRRenderModels().freeRenderModel( pModel ) openvr.VRRenderModels().freeTexture( pTexture ) return pRenderModel
Purpose: Finds a render model we've already loaded or loads a new one
378,732
def set_font_size(self, size): if self.font.font_size == size: pass else: self.font._set_size(size)
Convenience method for just changing font size.
378,733
def login_checking_email(pending_id, ticket, response, detail_url=): return LoginCheckingEmail(pending_id, ticket, response, USER_COOKIE_NAME, detail_url)
Log user in using Passwordless service :param pending_id: PendingExternalToMainUser's id :param ticket: ticket returned from Passwordless :param response: Response object from webapp2 :param detail_url: url to check ticket and user data :return: a Command that log user in when executed
378,734
def _delete_entity(self): if self._is_ndb(): _NDB_KEY(self._model, self._key_name).delete() else: entity_key = db.Key.from_path(self._model.kind(), self._key_name) db.delete(entity_key)
Delete entity from datastore. Attempts to delete using the key_name stored on the object, whether or not the given key is in the datastore.
378,735
def _copy_each_include_files_to_include_dir(self): src_header_dirs = [ , , , , ] with Cmd.pushd(): src_include_dir = os.path.abspath() for header_dir in src_header_dirs: if not os.path.isdir(header_dir): message_format = "Skip not existing header directory " Log.debug(message_format.format(header_dir)) continue header_files = Cmd.find(header_dir, ) for header_file in header_files: pattern = .format(header_dir) (dst_header_file, subs_num) = re.subn(pattern, , header_file) if subs_num == 0: message = .format( header_file) raise ValueError(message) dst_header_file = os.path.abspath( os.path.join(src_include_dir, , dst_header_file) ) dst_dir = os.path.dirname(dst_header_file) if not os.path.isdir(dst_dir): Cmd.mkdir_p(dst_dir) shutil.copyfile(header_file, dst_header_file)
Copy include header files for each directory to include directory. Copy include header files from rpm/ rpmio/*.h lib/*.h build/*.h sign/*.h to rpm/ include/ rpm/*.h . This is a status after running "make" on actual rpm build process.
378,736
def event_gen( self, timeout_s=None, yield_nones=True, filter_predicate=None, terminal_events=_DEFAULT_TERMINAL_EVENTS): self.__last_success_return = None last_hit_s = time.time() while True: block_duration_s = self.__get_block_duration() try: events = self.__epoll.poll(block_duration_s) except IOError as e: if e.errno != EINTR: raise if timeout_s is not None: time_since_event_s = time.time() - last_hit_s if time_since_event_s > timeout_s: break continue for fd, event_type in events: names = self._get_event_names(event_type) _LOGGER.debug("Events received from epoll: {}".format(names)) for (header, type_names, path, filename) \ in self._handle_inotify_event(fd): last_hit_s = time.time() e = (header, type_names, path, filename) for type_name in type_names: if filter_predicate is not None and \ filter_predicate(type_name, e) is False: self.__last_success_return = (type_name, e) return elif type_name in terminal_events: raise TerminalEventException(type_name, e) yield e if timeout_s is not None: time_since_event_s = time.time() - last_hit_s if time_since_event_s > timeout_s: break if yield_nones is True: yield None
Yield one event after another. If `timeout_s` is provided, we'll break when no event is received for that many seconds.
378,737
def function(self,p): pg = p.generator motion_orientation=p.orientation+pi/2.0 new_x = p.x+p.size*pg.x new_y = p.y+p.size*pg.y image_array = pg(xdensity=p.xdensity,ydensity=p.ydensity,bounds=p.bounds, x=new_x + p.speed*p.step*np.cos(motion_orientation), y=new_y + p.speed*p.step*np.sin(motion_orientation), orientation=p.orientation, scale=pg.scale*p.scale,offset=pg.offset+p.offset) return image_array
Selects and returns one of the patterns in the list.
378,738
def add_event(self, event): for pin_function_map in self.pin_function_maps: if _event_matches_pin_function_map(event, pin_function_map): pin_settle_time = pin_function_map.settle_time break else: return threshold_time = self.last_event_time[event.pin_num] + pin_settle_time if event.timestamp > threshold_time: self.put(event) self.last_event_time[event.pin_num] = event.timestamp
Adds events to the queue. Will ignore events that occur before the settle time for that pin/direction. Such events are assumed to be bouncing.
378,739
def _run(self): def get_next_interval(): start_time = time.time() start = 0 if self.eager else 1 for count in itertools.count(start=start): yield max(start_time + count * self.interval - time.time(), 0) interval = get_next_interval() sleep_time = next(interval) while True: with Timeout(sleep_time, exception=False): self.should_stop.wait() break self.handle_timer_tick() self.worker_complete.wait() self.worker_complete.reset() sleep_time = next(interval)
Runs the interval loop.
378,740
def get_bidi_paired_bracket_type_property(value, is_bytes=False): obj = unidata.ascii_bidi_paired_bracket_type if is_bytes else unidata.unicode_bidi_paired_bracket_type if value.startswith(): negated = value[1:] value = + unidata.unicode_alias[].get(negated, negated) else: value = unidata.unicode_alias[].get(value, value) return obj[value]
Get `BPT` property.
378,741
def to_project_config(self, with_packages=False): result = deepcopy({ : self.project_name, : self.version, : self.project_root, : self.profile_name, : self.source_paths, : self.macro_paths, : self.data_paths, : self.test_paths, : self.analysis_paths, : self.docs_paths, : self.target_path, : self.archive_paths, : self.clean_targets, : self.log_path, : self.quoting, : self.models, : self.on_run_start, : self.on_run_end, : self.archive, : self.seeds, : [ v.to_version_string() for v in self.dbt_version ], }) if with_packages: result.update(self.packages.serialize()) return result
Return a dict representation of the config that could be written to disk with `yaml.safe_dump` to get this configuration. :param with_packages bool: If True, include the serialized packages file in the root. :returns dict: The serialized profile.
378,742
def _collect_paths(element): output = [] path = vectors.el_to_path_vector(element) root = path[0] params = element.params if element.params else None match = root.find(element.getTagName(), params) if len(match) == 1: output.append( PathCall("find", 0, [element.getTagName(), params]) ) output.extend(path_patterns.neighbours_pattern(element)) output.extend(path_patterns.predecesors_pattern(element, root)) index_backtrack = [] last_index_backtrack = [] params_backtrack = [] last_params_backtrack = [] for el in reversed(path): if not el.parent: continue tag_name = el.getTagName() match = el.parent.wfind(tag_name).childs index = match.index(el) index_backtrack.append( PathCall("wfind", index, [tag_name]) ) last_index_backtrack.append( PathCall("wfind", index - len(match), [tag_name]) ) if el.params: match = el.parent.wfind(tag_name, el.params).childs index = match.index(el) params_backtrack.append( PathCall("wfind", index, [tag_name, el.params]) ) last_params_backtrack.append( PathCall("wfind", index - len(match), [tag_name, el.params]) ) else: params_backtrack.append( PathCall("wfind", index, [tag_name]) ) last_params_backtrack.append( PathCall("wfind", index - len(match), [tag_name]) ) output.extend([ Chained(reversed(params_backtrack)), Chained(reversed(last_params_backtrack)), Chained(reversed(index_backtrack)), Chained(reversed(last_index_backtrack)), ]) return output
Collect all possible path which leads to `element`. Function returns standard path from root element to this, reverse path, which uses negative indexes for path, also some pattern matches, like "this is element, which has neighbour with id 7" and so on. Args: element (obj): HTMLElement instance. Returns: list: List of :class:`.PathCall` and :class:`.Chained` objects.
378,743
def check_version(version: str): code_version = parse_version(__version__) given_version = parse_version(version) check_condition(code_version[0] == given_version[0], "Given release version (%s) does not match release code version (%s)" % (version, __version__)) check_condition(code_version[1] == given_version[1], "Given major version (%s) does not match major code version (%s)" % (version, __version__))
Checks given version against code version and determines compatibility. Throws if versions are incompatible. :param version: Given version.
378,744
def find_in_line(line): if not line: return None for method in [find_by_ast, find_version_by_string_lib, find_version_by_regex]: by = method(line) by = validate_string(by) if by: return by return None
Find a version in a line. :param line: :return:
378,745
def add_group(data_api, data_setters, group_index): group_type_ind = data_api.group_type_list[group_index] atom_count = len(data_api.group_list[group_type_ind]["atomNameList"]) insertion_code = data_api.ins_code_list[group_index] data_setters.set_group_info(data_api.group_list[group_type_ind]["groupName"], data_api.group_id_list[group_index], insertion_code, data_api.group_list[group_type_ind]["chemCompType"], atom_count, data_api.num_bonds, data_api.group_list[group_type_ind]["singleLetterCode"], data_api.sequence_index_list[group_index], data_api.sec_struct_list[group_index]) for group_atom_ind in range(atom_count): add_atom_data(data_api, data_setters, data_api.group_list[group_type_ind]["atomNameList"], data_api.group_list[group_type_ind]["elementList"], data_api.group_list[group_type_ind]["formalChargeList"], group_atom_ind) data_api.atom_counter +=1 add_group_bonds(data_setters, data_api.group_list[group_type_ind]["bondAtomList"], data_api.group_list[group_type_ind]["bondOrderList"]) return atom_count
Add the data for a whole group. :param data_api the data api from where to get the data :param data_setters the class to push the data to :param group_index the index for this group
378,746
def require_metadata(): "Prevent improper installs without necessary metadata. See egg_info_dir = os.path.join(here, ) if not os.path.exists(egg_info_dir): msg = ( "Cannot build setuptools without metadata. " "Run `bootstrap.py`." ) raise RuntimeError(msg)
Prevent improper installs without necessary metadata. See #659
378,747
def revert(self): index = self.get_stack_index() finfo = self.data[index] filename = finfo.filename if finfo.editor.document().isModified(): self.msgbox = QMessageBox( QMessageBox.Warning, self.title, _("All changes to <b>%s</b> will be lost." "<br>Do you want to revert file from disk?" ) % osp.basename(filename), QMessageBox.Yes | QMessageBox.No, self) answer = self.msgbox.exec_() if answer != QMessageBox.Yes: return self.reload(index)
Revert file from disk
378,748
def magic_mprun(self, parameter_s=): try: from StringIO import StringIO except ImportError: from io import StringIO from distutils.version import LooseVersion import IPython ipython_version = LooseVersion(IPython.__version__) if ipython_version < : from IPython.genutils import page from IPython.ipstruct import Struct from IPython.ipapi import UsageError else: from IPython.core.page import page from IPython.utils.ipstruct import Struct from IPython.core.error import UsageError opts_def = Struct(T=[], f=[]) parameter_s = parameter_s.replace(, r).replace("") opts, arg_str = self.parse_options(parameter_s, , list_all=True) opts.merge(opts_def) global_ns = self.shell.user_global_ns local_ns = self.shell.user_ns funcs = [] for name in opts.f: try: funcs.append(eval(name, global_ns, local_ns)) except Exception as e: raise UsageError( % (name, e.__class__.__name__, e)) include_children = in opts profile = LineProfiler(include_children=include_children) for func in funcs: profile(func) try: import builtins except ImportError: import __builtin__ as builtins if in builtins.__dict__: had_profile = True old_profile = builtins.__dict__[] else: had_profile = False old_profile = None builtins.__dict__[] = profile try: try: profile.runctx(arg_str, global_ns, local_ns) message = except SystemExit: message = "*** SystemExit exception caught in code being profiled." except KeyboardInterrupt: message = ("*** KeyboardInterrupt exception caught in code being " "profiled.") finally: if had_profile: builtins.__dict__[] = old_profile stdout_trap = StringIO() show_results(profile, stdout_trap) output = stdout_trap.getvalue() output = output.rstrip() if ipython_version < : page(output, screen_lines=self.shell.rc.screen_length) else: page(output) print(message,) text_file = opts.T[0] if text_file: with open(text_file, ) as pfile: pfile.write(output) print( % (text_file, message)) return_value = None if in opts: return_value = profile return return_value
Execute a statement under the line-by-line memory profiler from the memory_profiler module. Usage: %mprun -f func1 -f func2 <statement> The given statement (which doesn't require quote marks) is run via the LineProfiler. Profiling is enabled for the functions specified by the -f options. The statistics will be shown side-by-side with the code through the pager once the statement has completed. Options: -f <function>: LineProfiler only profiles functions and methods it is told to profile. This option tells the profiler about these functions. Multiple -f options may be used. The argument may be any expression that gives a Python function or method object. However, one must be careful to avoid spaces that may confuse the option parser. Additionally, functions defined in the interpreter at the In[] prompt or via %run currently cannot be displayed. Write these functions out to a separate file and import them. One or more -f options are required to get any useful results. -T <filename>: dump the text-formatted statistics with the code side-by-side out to a text file. -r: return the LineProfiler object after it has completed profiling. -c: If present, add the memory usage of any children process to the report.
378,749
def try_read(self, address, size): value = 0x0 for i in range(0, size): addr = address + i if addr in self._memory: value |= self._read_byte(addr) << (i * 8) else: return False, None return True, value
Try to read memory content at specified address. If any location was not written before, it returns a tuple (False, None). Otherwise, it returns (True, memory content).
378,750
def release(self, *args, **kwargs): if not self.field.lockable: return if self.sub_lock_mode: return super(FieldLock, self).release(*args, **kwargs) self.already_locked_by_model = self.sub_lock_mode = False
Really release the lock only if it's not a sub-lock. Then save the sub-lock status and mark the model as unlocked.
378,751
def create(self, company, timezone, country): body = { "CompanyName": company, "TimeZone": timezone, "Country": country} response = self._post("/clients.json", json.dumps(body)) self.client_id = json_to_py(response) return self.client_id
Creates a client.
378,752
def generate_private_key(self): random_string = base64.b64encode(os.urandom(4096)).decode() binary_data = bytes(random_string, ) hash_object = hashlib.sha256(binary_data) message_digest_bin = hash_object.digest() message_digest_hex = binascii.hexlify(message_digest_bin) return message_digest_hex
Generates a private key based on the password. SHA-256 is a member of the SHA-2 cryptographic hash functions designed by the NSA. SHA stands for Secure Hash Algorithm. The password is converted to bytes and hashed with SHA-256. The binary output is converted to a hex representation. Args: data (str): The data to be hashed with SHA-256. Returns: bytes: The hexadecimal representation of the hashed binary data.
378,753
def remove_segments(self, segments_to_remove): v_ind = self.vertex_indices_in_segments(segments_to_remove) self.segm = {name: faces for name, faces in self.segm.iteritems() if name not in segments_to_remove} self.remove_vertices(v_ind)
Remove the faces and vertices for given segments, keeping all others. Args: segments_to_remove: a list of segnments whose vertices will be removed
378,754
def logical_cores(self): try: return self._logical_cores() except Exception as e: from rez.utils.logging_ import print_error print_error("Error detecting logical core count, defaulting to 1: %s" % str(e)) return 1
Return the number of cpu cores as reported to the os. May be different from physical_cores if, ie, intel's hyperthreading is enabled.
378,755
def _get_rename_function(mapper): if isinstance(mapper, (abc.Mapping, ABCSeries)): def f(x): if x in mapper: return mapper[x] else: return x else: f = mapper return f
Returns a function that will map names/labels, dependent if mapper is a dict, Series or just a function.
378,756
def get_block_info(self, block): url = .format(self._url, block) return self.make_request(url)
Args: block: block number (eg: 223212) block hash (eg: 0000000000000000210b10d620600dc1cc2380bb58eb2408f9767eb792ed31fa) word "last" - this will always return the latest block word "first" - this will always return the first block Returns: basic block data
378,757
def get_col_sep(self): if self.tab_btn.isChecked(): return u"\t" elif self.ws_btn.isChecked(): return None return to_text_string(self.line_edt.text())
Return the column separator
378,758
def set_value(self, value): if value: self.setCheckState(Qt.Checked) else: self.setCheckState(Qt.Unchecked)
Set value of the checkbox. Parameters ---------- value : bool value for the checkbox
378,759
def parse_DID(did, name_type=None): did_pattern = .format(OP_BASE58CHECK_CLASS) m = re.match(did_pattern, did) assert m, .format(did) original_address = str(m.groups()[0]) name_index = int(m.groups()[1]) vb = keylib.b58check.b58check_version_byte(original_address) name_type = None if vb in [SUBDOMAIN_ADDRESS_VERSION_BYTE, SUBDOMAIN_ADDRESS_MULTISIG_VERSION_BYTE]: name_type = if vb == SUBDOMAIN_ADDRESS_VERSION_BYTE: vb = bitcoin_blockchain.version_byte else: vb = bitcoin_blockchain.multisig_version_byte original_address = virtualchain.address_reencode(original_address, version_byte=vb) else: name_type = original_address = virtualchain.address_reencode(original_address) return {: original_address, : name_index, : name_type}
Given a DID string, parse it into {'address': ..., 'index': ..., 'name_type'} Raise on invalid DID
378,760
def _handle_template(self, token): params = [] default = 1 self._push() while self._tokens: token = self._tokens.pop() if isinstance(token, tokens.TemplateParamSeparator): if not params: name = self._pop() param = self._handle_parameter(default) params.append(param) if not param.showkey: default += 1 elif isinstance(token, tokens.TemplateClose): if not params: name = self._pop() return Template(name, params) else: self._write(self._handle_token(token)) raise ParserError("_handle_template() missed a close token")
Handle a case where a template is at the head of the tokens.
378,761
def setNetworkName(self, networkName=): print % self.port print networkName try: cmd = % networkName datasetCmd = % networkName self.hasActiveDatasetToCommit = True return self.__sendCommand(cmd)[0] == and self.__sendCommand(datasetCmd)[0] == except Exception, e: ModuleHelper.WriteIntoDebugLogger("setNetworkName() Error: " + str(e))
set Thread Network name Args: networkName: the networkname string to be set Returns: True: successful to set the Thread Networkname False: fail to set the Thread Networkname
378,762
def setAttributeNS(self, namespaceURI, localName, value): prefix = None if namespaceURI: try: prefix = self.getPrefix(namespaceURI) except KeyError, ex: prefix = self.setNamespaceAttribute(prefix, namespaceURI) qualifiedName = localName if prefix: qualifiedName = %(prefix, localName) self._setAttributeNS(namespaceURI, qualifiedName, value)
Keyword arguments: namespaceURI -- namespace of attribute to create, None is for attributes in no namespace. localName -- local name of new attribute value -- value of new attribute
378,763
def get_ecommerce_client(url_postfix=, site_code=None): ecommerce_api_root = get_configuration(, site_code=site_code) signing_key = get_configuration(, site_code=site_code) issuer = get_configuration(, site_code=site_code) service_username = get_configuration(, site_code=site_code) return EdxRestApiClient( ecommerce_api_root + url_postfix, signing_key=signing_key, issuer=issuer, username=service_username)
Get client for fetching data from ecommerce API. Arguments: site_code (str): (Optional) The SITE_OVERRIDES key to inspect for site-specific values url_postfix (str): (Optional) The URL postfix value to append to the ECOMMERCE_API_ROOT value. Returns: EdxRestApiClient object
378,764
def namespace_splitter(self, value): if value is not None: assert type(value) is unicode, " attribute: type is not !".format( "namespace_splitter", value) assert len(value) == 1, " attribute: has multiples characters!".format("namespace_splitter", value) assert not re.search(r"\w", value), " attribute: is an alphanumeric character!".format( "namespace_splitter", value) self.__namespace_splitter = value
Setter for **self.__namespace_splitter** attribute. :param value: Attribute value. :type value: unicode
378,765
def capture(returns, factor_returns, period=DAILY): return (annual_return(returns, period=period) / annual_return(factor_returns, period=period))
Compute capture ratio. Parameters ---------- returns : pd.Series or np.ndarray Returns of the strategy, noncumulative. - See full explanation in :func:`~empyrical.stats.cum_returns`. factor_returns : pd.Series or np.ndarray Noncumulative returns of the factor to which beta is computed. Usually a benchmark such as the market. - This is in the same style as returns. period : str, optional Defines the periodicity of the 'returns' data for purposes of annualizing. Value ignored if `annualization` parameter is specified. Defaults are:: 'monthly':12 'weekly': 52 'daily': 252 Returns ------- capture_ratio : float Note ---- See http://www.investopedia.com/terms/u/up-market-capture-ratio.asp for details.
378,766
def _add_thread(self, aThread): dwThreadId = aThread.dwThreadId aThread.set_process(self) self.__threadDict[dwThreadId] = aThread
Private method to add a thread object to the snapshot. @type aThread: L{Thread} @param aThread: Thread object.
378,767
def backprop(self, input_data, targets, cache=None): if cache is not None: activations = cache else: activations = self.feed_forward(input_data, prediction=False) if activations.shape != targets.shape: raise ValueError( % (activations.shape, targets.shape)) delta = substract_matrix(activations, targets) nan_to_zeros(delta, delta) df_W = linalg.dot(input_data, delta, transa=) df_b = matrix_sum_out_axis(delta, 0) df_input = linalg.dot(delta, self.W, transb=) if self.l1_penalty_weight: df_W += self.l1_penalty_weight * sign(self.W) if self.l2_penalty_weight: df_W += self.l2_penalty_weight * self.W return (df_W, df_b), df_input
Backpropagate through the logistic layer. **Parameters:** input_data : ``GPUArray`` Inpute data to compute activations for. targets : ``GPUArray`` The target values of the units. cache : list of ``GPUArray`` Cache obtained from forward pass. If the cache is provided, then the activations are not recalculated. **Returns:** gradients : tuple of ``GPUArray`` Gradients with respect to the weights and biases in the form ``(df_weights, df_biases)``. df_input : ``GPUArray`` Gradients with respect to the input.
378,768
def set_edge_label(self, edge, label): self.set_edge_properties(edge, label=label ) if not self.DIRECTED: self.set_edge_properties((edge[1], edge[0]) , label=label )
Set the label of an edge. @type edge: edge @param edge: One edge. @type label: string @param label: Edge label.
378,769
def shell(self): r = self.local_renderer if in self.genv.host_string: r.env.shell_host_string = self.genv.host_string else: r.env.shell_host_string = r.env.shell_default_dir = self.genv.shell_default_dir_template r.env.shell_interactive_djshell_str = self.genv.interactive_shell_template r.run_or_local()
Opens a Django focussed Python shell. Essentially the equivalent of running `manage.py shell`.
378,770
def extract_tar(url, target_dir, additional_compression="", remove_common_prefix=False, overwrite=False): try: if not os.path.exists(target_dir): os.makedirs(target_dir) tf = tarfile.TarFile.open(fileobj=download_to_bytesio(url)) if not os.path.exists(target_dir): os.makedirs(target_dir) common_prefix = os.path.commonprefix(tf.getnames()) if not common_prefix.endswith(): common_prefix += "/" for tfile in tf.getmembers(): if remove_common_prefix: tfile.name = tfile.name.replace(common_prefix, "", 1) if tfile.name != "": target_path = os.path.join(target_dir, tfile.name) if target_path != target_dir and os.path.exists(target_path): if overwrite: remove_path(target_path) else: continue tf.extract(tfile, target_dir) except OSError: e = sys.exc_info()[1] raise ExtractException(str(e)) except IOError: e = sys.exc_info()[1] raise ExtractException(str(e))
extract a targz and install to the target directory
378,771
def find_bind_module(name, verbose=False): bindnames = get_bind_modules(verbose=verbose) bindfile = bindnames.get(name) if bindfile: return bindfile if not verbose: return None fuzzy_matches = get_close_pkgs(name, bindnames.keys()) if fuzzy_matches: rows = [(x[0], bindnames[x[0]]) for x in fuzzy_matches] print " not found. Close matches:" % name print .join(columnise(rows)) else: print "No matches." return None
Find the bind module matching the given name. Args: name (str): Name of package to find bind module for. verbose (bool): If True, print extra output. Returns: str: Filepath to bind module .py file, or None if not found.
378,772
def train(cls, new_data, old=None): if not len(new_data): return old if not hasattr(new_data, ): new_data = np.asarray(new_data) if new_data.dtype.kind not in CONTINUOUS_KINDS: raise TypeError( "Discrete value supplied to continuous scale") if old is not None: new_data = np.hstack([new_data, old]) return min_max(new_data, na_rm=True, finite=True)
Train a continuous scale Parameters ---------- new_data : array_like New values old : array_like Old range. Most likely a tuple of length 2. Returns ------- out : tuple Limits(range) of the scale
378,773
def setPotential(self, columnIndex, potential): assert(columnIndex < self._numColumns) potentialSparse = numpy.where(potential > 0)[0] if len(potentialSparse) < self._stimulusThreshold: raise Exception("This is likely due to a " + "value of stimulusThreshold that is too large relative " + "to the input size.") self._potentialPools.replace(columnIndex, potentialSparse)
Sets the potential mapping for a given column. ``potential`` size must match the number of inputs, and must be greater than ``stimulusThreshold``. :param columnIndex: (int) column index to set potential for. :param potential: (list) value to set.
378,774
def sum(cls, iresults): res = object.__new__(cls) res.received = [] res.sent = 0 for iresult in iresults: res.received.extend(iresult.received) res.sent += iresult.sent name = iresult.name.split(, 1)[0] if hasattr(res, ): assert res.name.split(, 1)[0] == name, (res.name, name) else: res.name = iresult.name.split()[0] return res
Sum the data transfer information of a set of results
378,775
def clean_all(self): self.log.info() if self.fullUpdate: recent = False else: recent = True self.import_new_atlas_pointings(recent) self._run_bookkeeping_sql_scripts() self.log.info() return bookkeeper
*clean and sync all the bookkeeping tables* **Return:** - ``bookkeeper`` **Usage:** .. code-block:: python from rockAtlas.bookkeeping import bookkeeper bk = bookkeeper( log=log, settings=settings, fullUpdate=False ) bk.clean_all()
378,776
def _debug_off(): if _os.path.exists(__debugflag__): _os.remove(__debugflag__) __loglevel__ = "ERROR" _LOGGER.info("debugging turned off") _set_debug_dict(__loglevel__)
turns off debugging by removing hidden tmp file
378,777
def federation_payment(self, fed_address, amount, asset_code=, asset_issuer=None, source=None, allow_http=False): fed_info = federation( address_or_id=fed_address, fed_type=, allow_http=allow_http) if not fed_info or not fed_info.get(): raise FederationError( ) self.append_payment_op(fed_info[], amount, asset_code, asset_issuer, source) memo_type = fed_info.get() if memo_type is not None and memo_type in (, , ): getattr(self, + memo_type.lower() + )(fed_info[])
Append a :class:`Payment <stellar_base.operation.Payment>` operation to the list of operations using federation on the destination address. Translates the destination stellar address to an account ID via :func:`federation <stellar_base.federation.federation>`, before creating a new payment operation via :meth:`append_payment_op`. :param str fed_address: A Stellar Address that needs to be translated into a valid account ID via federation. :param str amount: The amount of the currency to send in the payment. :param str asset_code: The asset code for the asset to send. :param str asset_issuer: The address of the issuer of the asset. :param str source: The source address of the payment. :param bool allow_http: When set to `True`, connections to insecure http protocol federation servers will be allowed. Must be set to `False` in production. Default: `False`. :return: This builder instance.
378,778
def getGraphFieldList(self, graph_name): graph = self._getGraph(graph_name, True) return graph.getFieldList()
Returns list of names of fields for graph with name graph_name. @param graph_name: Graph Name @return: List of field names for graph.
378,779
def create_for_object_attributes(item_type, faulty_attribute_name: str, hint): return TypeInformationRequiredError("Cannot create instances of type {t}: constructor attribute has an" " invalid PEP484 type hint: {h}.".format(t=str(item_type), a=faulty_attribute_name, h=hint))
Helper method for constructor attributes :param item_type: :return:
378,780
def extractPrintSaveIntermittens(): global g_summary_dict_intermittents localtz = time.tzname[0] for ind in range(len(g_summary_dict_all["TestName"])): if g_summary_dict_all["TestInfo"][ind]["FailureCount"] >= g_threshold_failure: addFailedTests(g_summary_dict_intermittents, g_summary_dict_all, ind) if len(g_summary_dict_intermittents["TestName"]) > 0: json.dump(g_summary_dict_intermittents, open(g_summary_dict_name, )) with open(g_summary_csv_filename, ) as summaryFile: for ind in range(len(g_summary_dict_intermittents["TestName"])): testName = g_summary_dict_intermittents["TestName"][ind] numberFailure = g_summary_dict_intermittents["TestInfo"][ind]["FailureCount"] firstFailedTS = parser.parse(time.ctime(min(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ +localtz) firstFailedStr = firstFailedTS.strftime("%a %b %d %H:%M:%S %Y %Z") recentFail = parser.parse(time.ctime(max(g_summary_dict_intermittents["TestInfo"][ind]["Timestamp"]))+ +localtz) recentFailStr = recentFail.strftime("%a %b %d %H:%M:%S %Y %Z") eachTest = "{0}, {1}, {2}, {3}\n".format(testName, recentFailStr, numberFailure, g_summary_dict_intermittents["TestInfo"][ind]["TestCategory"][0]) summaryFile.write(eachTest) print("Intermittent: {0}, Last failed: {1}, Failed {2} times since " "{3}".format(testName, recentFailStr, numberFailure, firstFailedStr))
This function will print out the intermittents onto the screen for casual viewing. It will also print out where the giant summary dictionary is going to be stored. :return: None
378,781
def set_window_override_redirect(self, window, override_redirect): _libxdo.xdo_set_window_override_redirect( self._xdo, window, override_redirect)
Set the override_redirect value for a window. This generally means whether or not a window manager will manage this window. If you set it to 1, the window manager will usually not draw borders on the window, etc. If you set it to 0, the window manager will see it like a normal application window.
378,782
def setAutoRaise(self, state): self._autoRaise = state self.setMouseTracking(state) try: self.lineEdit().setVisible(not state) except AttributeError: pass
Sets whether or not this combo box should automatically raise up. :param state | <bool>
378,783
def dfs(args=None, properties=None, hadoop_conf_dir=None): return run_class( "org.apache.hadoop.fs.FsShell", args, properties, hadoop_conf_dir=hadoop_conf_dir, keep_streams=True )
Run the Hadoop file system shell. All arguments are passed to :func:`run_class`.
378,784
def make_compare(key, value, obj): "Map a key name to a specific comparison function" if not in key: key, comp = key, else: key, comp = key.rsplit(, 1) if hasattr(Compare, comp): return getattr(Compare, comp)(key, value, obj) raise AttributeError("No comparison " % comp)
Map a key name to a specific comparison function
378,785
def generate(self, *args, **kwargs): vars = dict(*args, **kwargs) try: for event in self.root_render_func(self.new_context(vars)): yield event except: exc_info = sys.exc_info() else: return yield self.environment.handle_exception(exc_info, True)
For very large templates it can be useful to not render the whole template at once but evaluate each statement after another and yield piece for piece. This method basically does exactly that and returns a generator that yields one item after another as unicode strings. It accepts the same arguments as :meth:`render`.
378,786
def _create_id(self): cursor = self._db.cursor() cursor.execute() return cursor.lastrowid
Returns a freshly created DB-wide unique ID.
378,787
def xpathRegisterVariable(self, name, ns_uri, value): ret = libxml2mod.xmlXPathRegisterVariable(self._o, name, ns_uri, value) return ret
Register a variable with the XPath context
378,788
def format_number(number): char_list = list(str(number)) length = len(char_list) if length <= 3: return number result = if length % 3 != 0: while len(char_list) % 3 != 0: c = char_list[0] result += c char_list.remove(c) result += i = 0 while len(char_list) > 0: c = char_list[0] result += c char_list.remove(c) i += 1 if i % 3 == 0: result += return result[0:-1] if result[-1] == else result
>>> format_number(1) 1 >>> format_number(22) 22 >>> format_number(333) 333 >>> format_number(4444) '4,444' >>> format_number(55555) '55,555' >>> format_number(666666) '666,666' >>> format_number(7777777) '7,777,777'
378,789
def image_mime_type(data):
Return the MIME type of the image data (a bytestring).
378,790
def _clean_intenum(obj): if isinstance(obj, dict): for key, value in obj.items(): if isinstance(value, IntEnum): obj[key] = value.value elif isinstance(value, (dict, list)): obj[key] = _clean_intenum(value) elif isinstance(obj, list): for i, value in enumerate(obj): if isinstance(value, IntEnum): obj[i] = value.value elif isinstance(value, (dict, list)): obj[i] = _clean_intenum(value) return obj
Remove all IntEnum classes from a map.
378,791
def delete_record_set(self, record_set): if not isinstance(record_set, ResourceRecordSet): raise ValueError("Pass a ResourceRecordSet") self._deletions += (record_set,)
Append a record set to the 'deletions' for the change set. :type record_set: :class:`google.cloud.dns.resource_record_set.ResourceRecordSet` :param record_set: the record set to append. :raises: ``ValueError`` if ``record_set`` is not of the required type.
378,792
def read(self): with open(self.path) as f: d = f.read() return d
Read and return the contents of the file.
378,793
def get_ref(self): _id = self.id if _id is None: return None else: return DBRef(self.collection, _id)
Returns a `DBRef` for this object or ``None``.
378,794
def set_object_metadata(self, container, obj, metadata, clear=False, extra_info=None, prefix=None): return container.set_object_metadata(obj, metadata, clear=clear, prefix=prefix)
Accepts a dictionary of metadata key/value pairs and updates the specified object metadata with them. If 'clear' is True, any existing metadata is deleted and only the passed metadata is retained. Otherwise, the values passed here update the object's metadata. 'extra_info; is an optional dictionary which will be populated with 'status', 'reason', and 'headers' keys from the underlying swiftclient call. By default, the standard object metadata prefix ('X-Object-Meta-') is prepended to the header name if it isn't present. For non-standard headers, you must include a non-None prefix, such as an empty string.
378,795
def save_object(self, obj): obj.pre_save(self.jurisdiction.jurisdiction_id) filename = .format(obj._type, obj._id).replace(, ) self.info(, obj._type, obj, filename) self.debug(json.dumps(OrderedDict(sorted(obj.as_dict().items())), cls=utils.JSONEncoderPlus, indent=4, separators=(, ))) self.output_names[obj._type].add(filename) with open(os.path.join(self.datadir, filename), ) as f: json.dump(obj.as_dict(), f, cls=utils.JSONEncoderPlus) try: obj.validate() except ValueError as ve: if self.strict_validation: raise ve else: self.warning(ve) for obj in obj._related: self.save_object(obj)
Save object to disk as JSON. Generally shouldn't be called directly.
378,796
def ocsp_urls(self): if not self.authority_information_access_value: return [] output = [] for entry in self.authority_information_access_value: if entry[].native == : location = entry[] if location.name != : continue url = location.native if url.lower().startswith((, , , )): output.append(url) return output
:return: A list of zero or more unicode strings of the OCSP URLs for this cert
378,797
def translate_labels(val): if not isinstance(val, dict): if not isinstance(val, list): val = split(val) new_val = {} for item in val: if isinstance(item, dict): if len(item) != 1: raise SaltInvocationError() key = next(iter(item)) val = item[key] else: try: key, val = split(item, , 1) except ValueError: key = item val = if not isinstance(key, six.string_types): key = six.text_type(key) if not isinstance(val, six.string_types): val = six.text_type(val) new_val[key] = val val = new_val return val
Can either be a list of label names, or a list of name=value pairs. The API can accept either a list of label names or a dictionary mapping names to values, so the value we translate will be different depending on the input.
378,798
def contains_field_list(self, path, name): try: self.get_field_list(path, name) return True except KeyError: return False
Returns True if a multi-valued field exists at the specified path, otherwise False. :param path: str or Path instance :param name: :type name: str :return: :raises ValueError: A component of path is a field name. :raises TypeError: The field name is a component of a path.
378,799
def is_user_id_available(self, user_id, note=None, loglevel=logging.DEBUG): shutit = self.shutit shutit.handle_note(note) self.send(ShutItSendSpec(self, send= + user_id + , expect=self.default_expect, echo=False, loglevel=loglevel, ignore_background=True)) shutit.handle_note_after(note=note) if shutit.match_string(self.pexpect_child.before, ) == : return False return True
Determine whether the specified user_id available. @param user_id: User id to be checked. @param note: See send() @type user_id: integer @rtype: boolean @return: True is the specified user id is not used yet, False if it's already been assigned to a user.