code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def queryEx(self, viewcls, *args, **kwargs): kwargs['itercls'] = viewcls o = super(AsyncBucket, self).query(*args, **kwargs) if not self.connected: self.connect().addCallback(lambda x: o.start()) else: o.start() return o
Query a view, with the ``viewcls`` instance receiving events of the query as they arrive. :param type viewcls: A class (derived from :class:`AsyncViewBase`) to instantiate Other arguments are passed to the standard `query` method. This functions exactly like the :meth:`~couchbase.asynchronous.AsyncBucket.query` method, except it automatically schedules operations if the connection has not yet been negotiated.
def import_name(mod_name): try: mod_obj_old = sys.modules[mod_name] except KeyError: mod_obj_old = None if mod_obj_old is not None: return mod_obj_old __import__(mod_name) mod_obj = sys.modules[mod_name] return mod_obj
Import a module by module name. @param mod_name: module name.
def login(self, username, *, token=None): self._username = username self._oauth(username, token=token) return self.is_authenticated
Log in to Google Music. Parameters: username (str, Optional): Your Google Music username. Used for keeping stored OAuth tokens for multiple accounts separate. device_id (str, Optional): A mobile device ID or music manager uploader ID. Default: MAC address is used. token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``. Returns: bool: ``True`` if successfully authenticated, ``False`` if not.
def move(self, target, pos=None): if self.outline != target.outline: raise IntegrityError('Elements must be from the same outline!') tree_manipulation.send( sender=self.__class__, instance=self, action='move', target_node_type=None, target_node=target, pos=pos ) return super().move(target, pos)
An override of the treebeard api in order to send a signal in advance.
def createmergerequest(self, project_id, sourcebranch, targetbranch, title, target_project_id=None, assignee_id=None): data = { 'source_branch': sourcebranch, 'target_branch': targetbranch, 'title': title, 'assignee_id': assignee_id, 'target_project_id': target_project_id } request = requests.post( '{0}/{1}/merge_requests'.format(self.projects_url, project_id), data=data, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 201: return request.json() else: return False
Create a new merge request. :param project_id: ID of the project originating the merge request :param sourcebranch: name of the branch to merge from :param targetbranch: name of the branch to merge to :param title: Title of the merge request :param assignee_id: Assignee user ID :return: dict of the new merge request
def fopenat_rw(base_fd, path): return os.fdopen(openat(base_fd, path, os.O_RDWR), 'rb+')
Does openat read-write, then does fdopen to get a file object
def db_create(name, character_set=None, collate=None, **connection_args): if db_exists(name, **connection_args): log.info('DB \'%s\' already exists', name) return False dbc = _connect(**connection_args) if dbc is None: return False cur = dbc.cursor() s_name = quote_identifier(name) qry = 'CREATE DATABASE IF NOT EXISTS {0}'.format(s_name) args = {} if character_set is not None: qry += ' CHARACTER SET %(character_set)s' args['character_set'] = character_set if collate is not None: qry += ' COLLATE %(collate)s' args['collate'] = collate qry += ';' try: if _execute(cur, qry, args): log.info('DB \'%s\' created', name) return True except MySQLdb.OperationalError as exc: err = 'MySQL Error {0}: {1}'.format(*exc.args) __context__['mysql.error'] = err log.error(err) return False
Adds a databases to the MySQL server. name The name of the database to manage character_set The character set, if left empty the MySQL default will be used collate The collation, if left empty the MySQL default will be used CLI Example: .. code-block:: bash salt '*' mysql.db_create 'dbname' salt '*' mysql.db_create 'dbname' 'utf8' 'utf8_general_ci'
def from_tree(cls, repo, *treeish, **kwargs): if len(treeish) == 0 or len(treeish) > 3: raise ValueError("Please specify between 1 and 3 treeish, got %i" % len(treeish)) arg_list = [] if len(treeish) > 1: arg_list.append("--reset") arg_list.append("--aggressive") tmp_index = tempfile.mktemp('', '', repo.git_dir) arg_list.append("--index-output=%s" % tmp_index) arg_list.extend(treeish) index_handler = TemporaryFileSwap(join_path_native(repo.git_dir, 'index')) try: repo.git.read_tree(*arg_list, **kwargs) index = cls(repo, tmp_index) index.entries del(index_handler) finally: if osp.exists(tmp_index): os.remove(tmp_index) return index
Merge the given treeish revisions into a new index which is returned. The original index will remain unaltered :param repo: The repository treeish are located in. :param treeish: One, two or three Tree Objects, Commits or 40 byte hexshas. The result changes according to the amount of trees. If 1 Tree is given, it will just be read into a new index If 2 Trees are given, they will be merged into a new index using a two way merge algorithm. Tree 1 is the 'current' tree, tree 2 is the 'other' one. It behaves like a fast-forward. If 3 Trees are given, a 3-way merge will be performed with the first tree being the common ancestor of tree 2 and tree 3. Tree 2 is the 'current' tree, tree 3 is the 'other' one :param kwargs: Additional arguments passed to git-read-tree :return: New IndexFile instance. It will point to a temporary index location which does not exist anymore. If you intend to write such a merged Index, supply an alternate file_path to its 'write' method. :note: In the three-way merge case, --aggressive will be specified to automatically resolve more cases in a commonly correct manner. Specify trivial=True as kwarg to override that. As the underlying git-read-tree command takes into account the current index, it will be temporarily moved out of the way to assure there are no unsuspected interferences.
def create(self, Name, Subject, HtmlBody=None, TextBody=None, Alias=None): assert TextBody or HtmlBody, "Provide either email TextBody or HtmlBody or both" data = {"Name": Name, "Subject": Subject, "HtmlBody": HtmlBody, "TextBody": TextBody, "Alias": Alias} return self._init_instance(self.call("POST", "/templates", data=data))
Creates a template. :param Name: Name of template :param Subject: The content to use for the Subject when this template is used to send email. :param HtmlBody: The content to use for the HtmlBody when this template is used to send email. :param TextBody: The content to use for the HtmlBody when this template is used to send email. :return:
def _process_change(self, server_description): td_old = self._description if self._publish_server: old_server_description = td_old._server_descriptions[ server_description.address] self._events.put(( self._listeners.publish_server_description_changed, (old_server_description, server_description, server_description.address, self._topology_id))) self._description = updated_topology_description( self._description, server_description) self._update_servers() self._receive_cluster_time_no_lock(server_description.cluster_time) if self._publish_tp: self._events.put(( self._listeners.publish_topology_description_changed, (td_old, self._description, self._topology_id))) self._condition.notify_all()
Process a new ServerDescription on an opened topology. Hold the lock when calling this.
def stop(self): yield from self._stop_ubridge() if self._nvram_watcher: self._nvram_watcher.close() self._nvram_watcher = None if self._telnet_server: self._telnet_server.close() self._telnet_server = None if self.is_running(): self._terminate_process_iou() if self._iou_process.returncode is None: try: yield from gns3server.utils.asyncio.wait_for_process_termination(self._iou_process, timeout=3) except asyncio.TimeoutError: if self._iou_process.returncode is None: log.warning("IOU process {} is still running... killing it".format(self._iou_process.pid)) try: self._iou_process.kill() except ProcessLookupError: pass self._iou_process = None self._started = False self.save_configs()
Stops the IOU process.
def mul(name, num, minimum=0, maximum=0, ref=None): return calc( name=name, num=num, oper='mul', minimum=minimum, maximum=maximum, ref=ref )
Multiplies together the ``num`` most recent values. Requires a list. USAGE: .. code-block:: yaml foo: calc.mul: - name: myregentry - num: 5
def get_activity_comments(self, activity_id, markdown=False, limit=None): result_fetcher = functools.partial(self.protocol.get, '/activities/{id}/comments', id=activity_id, markdown=int(markdown)) return BatchedResultsIterator(entity=model.ActivityComment, bind_client=self, result_fetcher=result_fetcher, limit=limit)
Gets the comments for an activity. http://strava.github.io/api/v3/comments/#list :param activity_id: The activity for which to fetch comments. :type activity_id: int :param markdown: Whether to include markdown in comments (default is false/filterout). :type markdown: bool :param limit: Max rows to return (default unlimited). :type limit: int :return: An iterator of :class:`stravalib.model.ActivityComment` objects. :rtype: :class:`BatchedResultsIterator`
def destroy_list(self, list_id): return List(tweepy_list_to_json(self._client.destroy_list(list_id=list_id)))
Destroy a list :param list_id: list ID number :return: The destroyed list object :rtype: :class:`~responsebot.models.List`
def set_stripe_api_version(version=None, validate=True): version = version or get_stripe_api_version() if validate: valid = validate_stripe_api_version(version) if not valid: raise ValueError("Bad stripe API version: {}".format(version)) stripe.api_version = version
Set the desired API version to use for Stripe requests. :param version: The version to set for the Stripe API. :type version: ``str`` :param validate: If True validate the value for the specified version). :type validate: ``bool``
def which(path, jail=None, chroot=None, root=None, origin=False, quiet=False): opts = '' if quiet: opts += 'q' if origin: opts += 'o' cmd = _pkg(jail, chroot, root) cmd.append('which') if opts: cmd.append('-' + opts) cmd.append(path) return __salt__['cmd.run']( cmd, output_loglevel='trace', python_shell=False )
Displays which package installed a specific file CLI Example: .. code-block:: bash salt '*' pkg.which <file name> jail Perform the check in the specified jail CLI Example: .. code-block:: bash salt '*' pkg.which <file name> jail=<jail name or id> chroot Perform the check in the specified chroot (ignored if ``jail`` is specified) root Perform the check in the specified root (ignored if ``jail`` is specified) CLI Example: .. code-block:: bash salt '*' pkg.which <file name> chroot=/path/to/chroot origin Shows the origin of the package instead of name-version. CLI Example: .. code-block:: bash salt '*' pkg.which <file name> origin=True quiet Quiet output. CLI Example: .. code-block:: bash salt '*' pkg.which <file name> quiet=True
def set_shutter_level(self, level=0.0): data = {"channelIndex": 1, "deviceId": self.id, "shutterLevel": level} return self._restCall("device/control/setShutterLevel", body=json.dumps(data))
sets the shutter level Args: level(float): the new level of the shutter. 0.0 = open, 1.0 = closed Returns: the result of the _restCall
def fq_merge(R1, R2): c = itertools.cycle([1, 2, 3, 4]) for r1, r2 in zip(R1, R2): n = next(c) if n == 1: pair = [[], []] pair[0].append(r1.strip()) pair[1].append(r2.strip()) if n == 4: yield pair
merge separate fastq files
def cut_from_chain(sciobj_model): if _is_head(sciobj_model): old_pid = sciobj_model.obsoletes.did _cut_head_from_chain(sciobj_model) elif _is_tail(sciobj_model): old_pid = sciobj_model.obsoleted_by.did _cut_tail_from_chain(sciobj_model) else: old_pid = sciobj_model.obsoleted_by.did _cut_embedded_from_chain(sciobj_model) _update_sid_to_last_existing_pid_map(old_pid)
Remove an object from a revision chain. The object can be at any location in the chain, including the head or tail. Preconditions: - The object with the pid is verified to exist and to be a member of an revision chain. E.g., with: d1_gmn.app.views.asserts.is_existing_object(pid) d1_gmn.app.views.asserts.is_in_revision_chain(pid) Postconditions: - The given object is a standalone object with empty obsoletes, obsoletedBy and seriesId fields. - The previously adjacent objects in the chain are adjusted to close any gap that was created or remove dangling reference at the head or tail. - If the object was the last object in the chain and the chain has a SID, the SID reference is shifted over to the new last object in the chain.
def getTriples(pointing): sql="SELECT id FROM triples t join triple_members m ON t.id=m.triple" sql+=" join bucket.exposure e on e.expnum=m.expnum " sql+=" WHERE pointing=%s group by id order by e.expnum " cfeps.execute(sql, ( pointing, ) ) return(cfeps.fetchall())
Get all triples of a specified pointing ID. Defaults is to return a complete list triples.
def _from_python_type(self, obj, field, pytype): json_schema = { 'title': field.attribute or field.name, } for key, val in TYPE_MAP[pytype].items(): json_schema[key] = val if field.dump_only: json_schema['readonly'] = True if field.default is not missing: json_schema['default'] = field.default metadata = field.metadata.get('metadata', {}) metadata.update(field.metadata) for md_key, md_val in metadata.items(): if md_key == 'metadata': continue json_schema[md_key] = md_val if isinstance(field, fields.List): json_schema['items'] = self._get_schema_for_field( obj, field.container ) return json_schema
Get schema definition from python type.
def load_model(model_name, epoch_num, data_shapes, label_shapes, label_names, gpus=''): sym, arg_params, aux_params = mx.model.load_checkpoint(model_name, epoch_num) mod = create_module(sym, data_shapes, label_shapes, label_names, gpus) mod.set_params( arg_params=arg_params, aux_params=aux_params, allow_missing=True ) return mod
Returns a module loaded with the provided model. Parameters ---------- model_name: str Prefix of the MXNet model name as stored on the local directory. epoch_num : int Epoch number of model we would like to load. input_shape: tuple The shape of the input data in the form of (batch_size, channels, height, width) files: list of strings List of URLs pertaining to files that need to be downloaded in order to use the model. data_shapes: list of tuples. List of tuples where each tuple is a pair of input variable name and its shape. label_shapes: list of (str, tuple) Typically is ``data_iter.provide_label``. label_names: list of str Name of the output labels in the MXNet symbolic graph. gpus: str Comma separated string of gpu ids on which inferences are executed. E.g. 3,5,6 would refer to GPUs 3, 5 and 6. If empty, we use CPU. Returns ------- MXNet module
def prttex_summary_cnts_all(self, prt=sys.stdout): cnts = self.get_cnts_levels_depths_recs(set(self.obo.values())) self._prttex_summary_cnts(prt, cnts)
Print LaTeX format summary of level and depth counts for all active GO Terms.
def createUsageReport(self, reportname, queries, metadata, since="LAST_DAY", fromValue=None, toValue=None, aggregationInterval=None ): url = self._url + "/add" params = { "f" : "json", "usagereport": { "reportname" : reportname, "since" : since, "metadata" : metadata} } if isinstance(queries, dict): params["usagereport"]["queries"] = [queries] elif isinstance(queries, list): params["usagereport"]["queries"] = queries if aggregationInterval is not None: params["usagereport"]['aggregationInterval'] = aggregationInterval if since.lower() == "custom": params["usagereport"]['to'] = toValue params["usagereport"]['from'] = fromValue res = self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) self.__init() return res
Creates a new usage report. A usage report is created by submitting a JSON representation of the usage report to this operation. Inputs: reportname - the unique name of the report since - the time duration of the report. The supported values are: LAST_DAY, LAST_WEEK, LAST_MONTH, LAST_YEAR, CUSTOM LAST_DAY represents a time range spanning the previous 24 hours. LAST_WEEK represents a time range spanning the previous 7 days. LAST_MONTH represents a time range spanning the previous 30 days. LAST_YEAR represents a time range spanning the previous 365 days. CUSTOM represents a time range that is specified using the from and to parameters. fromValue - optional value - The timestamp (milliseconds since UNIX epoch, namely January 1, 1970, 00:00:00 GMT) for the beginning period of the report. Only valid when since is CUSTOM toValue - optional value - The timestamp (milliseconds since UNIX epoch, namely January 1, 1970, 00:00:00 GMT) for the ending period of the report.Only valid when since is CUSTOM. aggregationInterval - Optional. Aggregation interval in minutes. Server metrics are aggregated and returned for time slices aggregated using the specified aggregation interval. The time range for the report, specified using the since parameter (and from and to when since is CUSTOM) is split into multiple slices, each covering an aggregation interval. Server metrics are then aggregated for each time slice and returned as data points in the report data. When the aggregationInterval is not specified, the following defaults are used: LAST_DAY: 30 minutes LAST_WEEK: 4 hours LAST_MONTH: 24 hours LAST_YEAR: 1 week CUSTOM: 30 minutes up to 1 day, 4 hours up to 1 week, 1 day up to 30 days, and 1 week for longer periods. If the samplingInterval specified in Usage Reports Settings is more than the aggregationInterval, the samplingInterval is used instead. queries - A list of queries for which to generate the report. You need to specify the list as an array of JSON objects representing the queries. Each query specifies the list of metrics to be queries for a given set of resourceURIs. The queries parameter has the following sub-parameters: resourceURIs - Comma separated list of resource URIs for which to report metrics. Specifies services or folders for which to gather metrics. The resourceURI is formatted as below: services/ - Entire Site services/Folder/ - Folder within a Site. Reports metrics aggregated across all services within that Folder and Sub-Folders. services/Folder/ServiceName.ServiceType - Service in a specified folder, for example: services/Map_bv_999.MapServer. services/ServiceName.ServiceType - Service in the root folder, for example: Map_bv_999.MapServer. metrics - Comma separated list of metrics to be reported. Supported metrics are: RequestCount - the number of requests received RequestsFailed - the number of requests that failed RequestsTimedOut - the number of requests that timed out RequestMaxResponseTime - the maximum response time RequestAvgResponseTime - the average response time ServiceActiveInstances - the maximum number of active (running) service instances sampled at 1 minute intervals, for a specified service metadata - Can be any JSON Object. Typically used for storing presentation tier data for the usage report, such as report title, colors, line-styles, etc. Also used to denote visibility in ArcGIS Server Manager for reports created with the Administrator Directory. To make any report created in the Administrator Directory visible to Manager, include "managerReport":true in the metadata JSON object. When this value is not set (default), reports are not visible in Manager. This behavior can be extended to any client that wants to interact with the Administrator Directory. Any user-created value will need to be processed by the client. Example: >>> queryObj = [{ "resourceURIs": ["services/Map_bv_999.MapServer"], "metrics": ["RequestCount"] }] >>> obj.createReport( reportname="SampleReport", queries=queryObj, metadata="This could be any String or JSON Object.", since="LAST_DAY" )
def wsgi(self, environ, start_response): request = Request(environ) ctx = Context(request) try: try: response = self(request, ctx) ctx._run_callbacks('finalize', (request, response)) response = response.conditional_to(request) except HTTPException as e: response = e.response except Exception: self.handle_error(request, ctx) response = InternalServerError().response response.add_callback(lambda: ctx._run_callbacks('close')) return response(environ, start_response) finally: ctx._run_callbacks('teardown', log_errors=True)
Implements the mapper's WSGI interface.
def save_boolean_setting(self, key, check_box): set_setting(key, check_box.isChecked(), qsettings=self.settings)
Save boolean setting according to check_box state. :param key: Key to retrieve setting value. :type key: str :param check_box: Check box to show and set the setting. :type check_box: PyQt5.QtWidgets.QCheckBox.QCheckBox
def do_create_tool_item(self): proxy = SpinToolItem(*self._args_for_toolitem) self.connect_proxy(proxy) return proxy
This is called by the UIManager when it is time to instantiate the proxy
def set_locs(self, locs): 'Sets the locations of the ticks' _check_implicitly_registered() self.locs = locs (vmin, vmax) = vi = tuple(self.axis.get_view_interval()) if vi != self.plot_obj.view_interval: self.plot_obj.date_axis_info = None self.plot_obj.view_interval = vi if vmax < vmin: (vmin, vmax) = (vmax, vmin) self._set_default_format(vmin, vmax)
Sets the locations of the ticks
def location(args): fastafile = args.fastafile pwmfile = args.pwmfile lwidth = args.width if not lwidth: f = Fasta(fastafile) lwidth = len(f.items()[0][1]) f = None jobs = [] motifs = pwmfile_to_motifs(pwmfile) ids = [motif.id for motif in motifs] if args.ids: ids = args.ids.split(",") n_cpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=n_cpus, maxtasksperchild=1000) for motif in motifs: if motif.id in ids: outfile = os.path.join("%s_histogram" % motif.id) jobs.append( pool.apply_async( motif_localization, (fastafile,motif,lwidth,outfile, args.cutoff) )) for job in jobs: job.get()
Creates histrogram of motif location. Parameters ---------- args : argparse object Command line arguments.
def getFileObjects(self): files = {'project-file': self, 'mapping-table-file': self.mapTableFile, 'channel-input-file': self.channelInputFile, 'precipitation-file': self.precipFile, 'storm-pipe-network-file': self.stormPipeNetworkFile, 'hmet-file': self.hmetFile, 'nwsrfs-file': self.nwsrfsFile, 'orographic-gage-file': self.orographicGageFile, 'grid-pipe-file': self.gridPipeFile, 'grid-stream-file': self.gridStreamFile, 'time-series-file': self.timeSeriesFiles, 'projection-file': self.projectionFile, 'replace-parameters-file': self.replaceParamFile, 'replace-value-file': self.replaceValFile, 'output-location-file': self.outputLocationFiles, 'maps': self.maps, 'link-node-datasets-file': self.linkNodeDatasets} return files
Retrieve a dictionary of file objects. This is a utility method that can be used to programmatically access the GsshaPy file objects. Use this method in conjunction with the getFileKeys method to access only files that have been read into the database. Returns: dict: Dictionary with human readable keys and values of GsshaPy file object instances. Files that have not been read into the database will have a value of None.
def convert_to_match_query(ir_blocks): output_block = ir_blocks[-1] if not isinstance(output_block, ConstructResult): raise AssertionError(u'Expected last IR block to be ConstructResult, found: ' u'{} {}'.format(output_block, ir_blocks)) ir_except_output = ir_blocks[:-1] folds, ir_except_output_and_folds = extract_folds_from_ir_blocks(ir_except_output) global_operation_ir_blocks_tuple = _extract_global_operations(ir_except_output_and_folds) global_operation_blocks, pruned_ir_blocks = global_operation_ir_blocks_tuple if len(global_operation_blocks) > 1: raise AssertionError(u'Received IR blocks with multiple global operation blocks. Only one ' u'is allowed: {} {}'.format(global_operation_blocks, ir_blocks)) if len(global_operation_blocks) == 1: if not isinstance(global_operation_blocks[0], Filter): raise AssertionError(u'Received non-Filter global operation block. {}' .format(global_operation_blocks[0])) where_block = global_operation_blocks[0] else: where_block = None match_steps = _split_ir_into_match_steps(pruned_ir_blocks) match_traversals = _split_match_steps_into_match_traversals(match_steps) return MatchQuery( match_traversals=match_traversals, folds=folds, output_block=output_block, where_block=where_block, )
Convert the list of IR blocks into a MatchQuery object, for easier manipulation.
def AddPorts(self,ports): for port in ports: if 'port_to' in port: self.ports.append(Port(self,port['protocol'],port['port'],port['port_to'])) else: self.ports.append(Port(self,port['protocol'],port['port'])) return(self.Update())
Create one or more port access policies. Include a list of dicts with protocol, port, and port_to (optional - for range) keys. >>> clc.v2.Server("WA1BTDIX01").PublicIPs().public_ips[0] .AddPorts([{'protocol': 'TCP', 'port': '80' }, {'protocol': 'UDP', 'port': '10000', 'port_to': '15000'}]).WaitUntilComplete() 0
def remove_all_cts_records_by(file_name, crypto_idfp): db = XonoticDB.load_path(file_name) db.remove_all_cts_records_by(crypto_idfp) db.save(file_name)
Remove all cts records set by player with CRYPTO_IDFP
def CreateNetworkConnectivityTauDEMTree(network_connectivity_tree_file, out_csv_file): stream_id_array = [] next_down_id_array = [] with open_csv(network_connectivity_tree_file, "r") as csvfile: for row in csvfile: split_row = row.split() stream_id_array.append(split_row[0].strip()) next_down_id_array.append(split_row[3].strip()) stream_id_array = np.array(stream_id_array, dtype=np.int32) next_down_id_array = np.array(next_down_id_array, dtype=np.int32) StreamIDNextDownIDToConnectivity(stream_id_array, next_down_id_array, out_csv_file)
Creates Network Connectivity input CSV file for RAPID based on the TauDEM network connectivity tree file
def get_region_for_chip(x, y, level=3): shift = 6 - 2*level bit = ((x >> shift) & 3) + 4*((y >> shift) & 3) mask = 0xffff ^ ((4 << shift) - 1) nx = x & mask ny = y & mask region = (nx << 24) | (ny << 16) | (level << 16) | (1 << bit) return region
Get the region word for the given chip co-ordinates. Parameters ---------- x : int x co-ordinate y : int y co-ordinate level : int Level of region to build. 0 is the most coarse and 3 is the finest. When 3 is used the specified region will ONLY select the given chip, for other regions surrounding chips will also be selected. Returns ------- int A 32-bit value representing the co-ordinates of the chunk of SpiNNaker chips that should be selected and the blocks within this chunk that are selected. As long as bits (31:16) are the same these values may be OR-ed together to increase the number of sub-blocks selected.
def _run(self): for node in self.node.relatives: launch_node_task(node) for node in self.node.relatives: self.wait_and_join(node.task) if self.node.parent: while not self.node.parent.task.siblings_permission: time.sleep(self._polling_time) self.has_started = True self.main() self.siblings_permission = True for node in self.node.siblings: launch_node_task(node) for node in self.node.siblings: self.wait_and_join(node.task) self.finished_at = time.time() self.scheduler.notify_execution(self) self.has_finished = True
Run the task respecting dependencies
def print_item_callback(item): print('&listen [{}, {}={}]'.format( item.get('cmd', ''), item.get('id', ''), item.get('data', '')))
Print an item callback, used by &listen.
def crypto_box_keypair(): pk = ffi.new("unsigned char[]", crypto_box_PUBLICKEYBYTES) sk = ffi.new("unsigned char[]", crypto_box_SECRETKEYBYTES) rc = lib.crypto_box_keypair(pk, sk) ensure(rc == 0, 'Unexpected library error', raising=exc.RuntimeError) return ( ffi.buffer(pk, crypto_box_PUBLICKEYBYTES)[:], ffi.buffer(sk, crypto_box_SECRETKEYBYTES)[:], )
Returns a randomly generated public and secret key. :rtype: (bytes(public_key), bytes(secret_key))
def _get_property_values_with_defaults(self, classname, property_values): final_values = self.get_default_property_values(classname) final_values.update(property_values) return final_values
Return the property values for the class, with default values applied where needed.
def _sanity_check_registered_locations_parent_locations(query_metadata_table): for location, location_info in query_metadata_table.registered_locations: if (location != query_metadata_table.root_location and not query_metadata_table.root_location.is_revisited_at(location)): if location_info.parent_location is None: raise AssertionError(u'Found a location that is not the root location of the query ' u'or a revisit of the root, but does not have a parent: ' u'{} {}'.format(location, location_info)) if location_info.parent_location is not None: query_metadata_table.get_location_info(location_info.parent_location)
Assert that all registered locations' parent locations are also registered.
def log(msg, level=0): red = '\033[91m' endc = '\033[0m' cfg = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'stdout': { 'format': '[%(levelname)s]: %(asctime)s - %(message)s', 'datefmt': '%x %X' }, 'stderr': { 'format': red + '[%(levelname)s]: %(asctime)s - %(message)s' + endc, 'datefmt': '%x %X' } }, 'handlers': { 'stdout': { 'class': 'logging.StreamHandler', 'level': 'DEBUG', 'formatter': 'stdout' }, 'stderr': { 'class': 'logging.StreamHandler', 'level': 'ERROR', 'formatter': 'stderr' } }, 'loggers': { 'info': { 'handlers': ['stdout'], 'level': 'INFO', 'propagate': True }, 'error': { 'handlers': ['stderr'], 'level': 'ERROR', 'propagate': False } } } dictConfig(cfg) lg = 'info' if level == 0 else 'error' lvl = 20 if level == 0 else 40 logger = logging.getLogger(lg) logger.log(lvl, msg)
Logs a message to the console, with optional level paramater Args: - msg (str): message to send to console - level (int): log level; 0 for info, 1 for error (default = 0)
def _cutout(x, n_holes:uniform_int=1, length:uniform_int=40): "Cut out `n_holes` number of square holes of size `length` in image at random locations." h,w = x.shape[1:] for n in range(n_holes): h_y = np.random.randint(0, h) h_x = np.random.randint(0, w) y1 = int(np.clip(h_y - length / 2, 0, h)) y2 = int(np.clip(h_y + length / 2, 0, h)) x1 = int(np.clip(h_x - length / 2, 0, w)) x2 = int(np.clip(h_x + length / 2, 0, w)) x[:, y1:y2, x1:x2] = 0 return x
Cut out `n_holes` number of square holes of size `length` in image at random locations.
def describe(self): return OrderedDict([ (name, field.describe()) for name, field in self.fields.items() ])
Describe all serialized fields. It returns dictionary of all fields description defined for this serializer using their own ``describe()`` methods with respect to order in which they are defined as class attributes. Returns: OrderedDict: serializer description
def endpoint_create(service, publicurl=None, internalurl=None, adminurl=None, region=None, profile=None, url=None, interface=None, **connection_args): kstone = auth(profile, **connection_args) keystone_service = service_get(name=service, profile=profile, **connection_args) if not keystone_service or 'Error' in keystone_service: return {'Error': 'Could not find the specified service'} if _OS_IDENTITY_API_VERSION > 2: kstone.endpoints.create(service=keystone_service[service]['id'], region_id=region, url=url, interface=interface) else: kstone.endpoints.create(region=region, service_id=keystone_service[service]['id'], publicurl=publicurl, adminurl=adminurl, internalurl=internalurl) return endpoint_get(service, region, profile, interface, **connection_args)
Create an endpoint for an Openstack service CLI Examples: .. code-block:: bash salt 'v2' keystone.endpoint_create nova 'http://public/url' 'http://internal/url' 'http://adminurl/url' region salt 'v3' keystone.endpoint_create nova url='http://public/url' interface='public' region='RegionOne'
def ScanForVolumeSystem(self, source_path_spec): if source_path_spec.type_indicator == definitions.TYPE_INDICATOR_VSHADOW: return None if source_path_spec.IsVolumeSystemRoot(): return source_path_spec if source_path_spec.type_indicator == ( definitions.TYPE_INDICATOR_APFS_CONTAINER): return None try: type_indicators = analyzer.Analyzer.GetVolumeSystemTypeIndicators( source_path_spec, resolver_context=self._resolver_context) except (IOError, RuntimeError) as exception: raise errors.BackEndError(( 'Unable to process source path specification with error: ' '{0!s}').format(exception)) if not type_indicators: return None if len(type_indicators) > 1: raise errors.BackEndError( 'Unsupported source found more than one volume system types.') if (type_indicators[0] == definitions.TYPE_INDICATOR_TSK_PARTITION and source_path_spec.type_indicator in [ definitions.TYPE_INDICATOR_TSK_PARTITION]): return None if type_indicators[0] in definitions.VOLUME_SYSTEM_TYPE_INDICATORS: return path_spec_factory.Factory.NewPathSpec( type_indicators[0], location='/', parent=source_path_spec) return path_spec_factory.Factory.NewPathSpec( type_indicators[0], parent=source_path_spec)
Scans the path specification for a supported volume system format. Args: source_path_spec (PathSpec): source path specification. Returns: PathSpec: volume system path specification or None if no supported volume system type was found. Raises: BackEndError: if the source cannot be scanned or more than one volume system type is found.
def AddMethod(self, interface, name, in_sig, out_sig, code): if not interface: interface = self.interface n_args = len(dbus.Signature(in_sig)) method = lambda self, *args, **kwargs: DBusMockObject.mock_method( self, interface, name, in_sig, *args, **kwargs) dbus_method = dbus.service.method(interface, out_signature=out_sig)(method) dbus_method.__name__ = str(name) dbus_method._dbus_in_signature = in_sig dbus_method._dbus_args = ['arg%i' % i for i in range(1, n_args + 1)] if interface == self.interface: setattr(self.__class__, name, dbus_method) self.methods.setdefault(interface, {})[str(name)] = (in_sig, out_sig, code, dbus_method)
Add a method to this object interface: D-Bus interface to add this to. For convenience you can specify '' here to add the method to the object's main interface (as specified on construction). name: Name of the method in_sig: Signature of input arguments; for example "ias" for a method that takes an int32 and a string array as arguments; see http://dbus.freedesktop.org/doc/dbus-specification.html#message-protocol-signatures out_sig: Signature of output arguments; for example "s" for a method that returns a string; use '' for methods that do not return anything. code: Python 3 code to run in the method call; you have access to the arguments through the "args" list, and can set the return value by assigning a value to the "ret" variable. You can also read the global "objects" variable, which is a dictionary mapping object paths to DBusMockObject instances. For keeping state across method calls, you are free to use normal Python members of the "self" object, which will be persistent for the whole mock's life time. E. g. you can have a method with "self.my_state = True", and another method that returns it with "ret = self.my_state". When specifying '', the method will not do anything (except logging) and return None.
def decrypt_seal(self, data: bytes) -> bytes: curve25519_public_key = libnacl.crypto_sign_ed25519_pk_to_curve25519(self.vk) curve25519_secret_key = libnacl.crypto_sign_ed25519_sk_to_curve25519(self.sk) return libnacl.crypto_box_seal_open(data, curve25519_public_key, curve25519_secret_key)
Decrypt bytes data with a curve25519 version of the ed25519 key pair :param data: Encrypted data :return:
def get_context_arguments(self): cargs = {} for context in self.__context_stack: cargs.update(context.context_arguments) return cargs
Return a dictionary containing the current context arguments.
def _get_toc_reference(app, node, toc, docname): if isinstance(node, nodes.section) and isinstance(node.parent, nodes.document): ref_id = docname toc_reference = _find_toc_node(toc, ref_id, nodes.section) elif isinstance(node, nodes.section): ref_id = node.attributes["ids"][0] toc_reference = _find_toc_node(toc, ref_id, nodes.section) else: try: ref_id = node.children[0].attributes["ids"][0] toc_reference = _find_toc_node(toc, ref_id, addnodes.desc) except (KeyError, IndexError) as e: LOGGER.warning("Invalid desc node: %s" % e) toc_reference = None return toc_reference
Logic that understands maps a specific node to it's part of the toctree. It takes a specific incoming ``node``, and returns the actual TOC Tree node that is said reference.
def shutdown(self): inputQueue = self.inputQueue self.inputQueue = None for i in range(self.numWorkers): inputQueue.put(None) for thread in self.workerThreads: thread.join() BatchSystemSupport.workerCleanup(self.workerCleanupInfo)
Cleanly terminate worker threads. Add sentinels to inputQueue equal to maxThreads. Join all worker threads.
def _calf(self, spec): self.prepare(spec) self.compile(spec) self.assemble(spec) self.link(spec) self.finalize(spec)
The main call, assuming the base spec is prepared. Also, no advices will be triggered.
def reorient_wf(name='ReorientWorkflow'): workflow = pe.Workflow(name=name) inputnode = pe.Node(niu.IdentityInterface(fields=['in_file']), name='inputnode') outputnode = pe.Node(niu.IdentityInterface( fields=['out_file']), name='outputnode') deoblique = pe.Node(afni.Refit(deoblique=True), name='deoblique') reorient = pe.Node(afni.Resample( orientation='RPI', outputtype='NIFTI_GZ'), name='reorient') workflow.connect([ (inputnode, deoblique, [('in_file', 'in_file')]), (deoblique, reorient, [('out_file', 'in_file')]), (reorient, outputnode, [('out_file', 'out_file')]) ]) return workflow
A workflow to reorient images to 'RPI' orientation
def convert(self): if self.downloaded is False: raise serror("Track not downloaded, can't convert file..") filetype = magic.from_file(self.filepath, mime=True) if filetype == "audio/mpeg": print("File is already in mp3 format. Skipping convert.") return False rootpath = os.path.dirname(os.path.dirname(self.filepath)) backupdir = rootpath + "/backups/" + self.get("username") if not os.path.exists(backupdir): os.makedirs(backupdir) backupfile = "%s/%s%s" % ( backupdir, self.gen_filename(), self.get_file_extension(self.filepath)) newfile = "%s.mp3" % self.filename_without_extension() os.rename(self.filepath, backupfile) self.filepath = newfile print("Converting to %s.." % newfile) song = AudioSegment.from_file(backupfile) return song.export(newfile, format="mp3")
Convert file in mp3 format.
def readFromFile(self, filename): s = dict(np.load(filename)) try: self.coeffs = s['coeffs'][()] except KeyError: self.coeffs = s try: self.opts = s['opts'][()] except KeyError: pass return self.coeffs
read the distortion coeffs from file
def dollars_to_math(source): r s = "\n".join(source) if s.find("$") == -1: return global _data _data = {} def repl(matchobj): global _data s = matchobj.group(0) t = "___XXX_REPL_%d___" % len(_data) _data[t] = s return t s = re.sub(r"({[^{}$]*\$[^{}$]*\$[^{}]*})", repl, s) dollars = re.compile(r"(?<!\$)(?<!\\)\$([^\$]+?)\$") slashdollar = re.compile(r"\\\$") s = dollars.sub(r":math:`\1`", s) s = slashdollar.sub(r"$", s) for r in _data: s = s.replace(r, _data[r]) source[:] = [s]
r""" Replace dollar signs with backticks. More precisely, do a regular expression search. Replace a plain dollar sign ($) by a backtick (`). Replace an escaped dollar sign (\$) by a dollar sign ($). Don't change a dollar sign preceded or followed by a backtick (`$ or $`), because of strings like "``$HOME``". Don't make any changes on lines starting with spaces, because those are indented and hence part of a block of code or examples. This also doesn't replaces dollar signs enclosed in curly braces, to avoid nested math environments, such as :: $f(n) = 0 \text{ if $n$ is prime}$ Thus the above line would get changed to `f(n) = 0 \text{ if $n$ is prime}`
def _merge_map(key, values, partial): proto = kv_pb.KeyValues() proto.set_key(key) proto.value_list().extend(values) yield proto.Encode()
A map function used in merge phase. Stores (key, values) into KeyValues proto and yields its serialization. Args: key: values key. values: values themselves. partial: True if more values for this key will follow. False otherwise. Yields: The proto.
def get_distributed_seismicity_source_nodes(source): source_nodes = [] source_nodes.append( Node("magScaleRel", text=source.magnitude_scaling_relationship.__class__.__name__)) source_nodes.append( Node("ruptAspectRatio", text=source.rupture_aspect_ratio)) source_nodes.append(obj_to_node(source.mfd)) source_nodes.append( build_nodal_plane_dist(source.nodal_plane_distribution)) source_nodes.append( build_hypo_depth_dist(source.hypocenter_distribution)) return source_nodes
Returns list of nodes of attributes common to all distributed seismicity source classes :param source: Seismic source as instance of :class: `openquake.hazardlib.source.area.AreaSource` or :class: `openquake.hazardlib.source.point.PointSource` :returns: List of instances of :class:`openquake.baselib.node.Node`
def classname(self): cls = javabridge.call(self.jobject, "getClass", "()Ljava/lang/Class;") return javabridge.call(cls, "getName", "()Ljava/lang/String;")
Returns the Java classname in dot-notation. :return: the Java classname :rtype: str
def find_module(self, name): defmodule = lib.EnvFindDefmodule(self._env, name.encode()) if defmodule == ffi.NULL: raise LookupError("Module '%s' not found" % name) return Module(self._env, defmodule)
Find the Module by its name.
def StrikeDip(n, e, u): r2d = 180 / np.pi if u < 0: n = -n e = -e u = -u strike = np.arctan2(e, n) * r2d strike = strike - 90 while strike >= 360: strike = strike - 360 while strike < 0: strike = strike + 360 x = np.sqrt(np.power(n, 2) + np.power(e, 2)) dip = np.arctan2(x, u) * r2d return (strike, dip)
Finds strike and dip of plane given normal vector having components n, e, and u. Adapted from MATLAB script `bb.m <http://www.ceri.memphis.edu/people/olboyd/Software/Software.html>`_ written by Andy Michael and Oliver Boyd.
def synergy_to_datetime(time_qualifier, timeperiod): if time_qualifier == QUALIFIER_HOURLY: date_format = SYNERGY_HOURLY_PATTERN elif time_qualifier == QUALIFIER_DAILY: date_format = SYNERGY_DAILY_PATTERN elif time_qualifier == QUALIFIER_MONTHLY: date_format = SYNERGY_MONTHLY_PATTERN elif time_qualifier == QUALIFIER_YEARLY: date_format = SYNERGY_YEARLY_PATTERN elif time_qualifier == QUALIFIER_REAL_TIME: date_format = SYNERGY_SESSION_PATTERN else: raise ValueError('unknown time qualifier: {0}'.format(time_qualifier)) return datetime.strptime(timeperiod, date_format).replace(tzinfo=None)
method receives timeperiod in Synergy format YYYYMMDDHH and convert it to UTC _naive_ datetime
def verify_login(user, password=None, **connection_args): connection_args['connection_user'] = user connection_args['connection_pass'] = password dbc = _connect(**connection_args) if dbc is None: if 'mysql.error' in __context__: del __context__['mysql.error'] return False return True
Attempt to login using the provided credentials. If successful, return true. Otherwise, return False. CLI Example: .. code-block:: bash salt '*' mysql.verify_login root password
def state_counts(gamma, T, out=None): return np.sum(gamma[0:T], axis=0, out=out)
Sum the probabilities of being in state i to time t Parameters ---------- gamma : ndarray((T,N), dtype = float), optional, default = None gamma[t,i] is the probabilty at time t to be in state i ! T : int number of time steps Returns ------- count : numpy.array shape (N) count[i] is the summed probabilty to be in state i ! See Also -------- state_probabilities : to calculate `gamma`
def moments(self): moment1 = statstools.calc_mean_time(self.delays, self.coefs) moment2 = statstools.calc_mean_time_deviation( self.delays, self.coefs, moment1) return numpy.array([moment1, moment2])
The first two time delay weighted statistical moments of the MA coefficients.
def is_tagged(required_tags, has_tags): if not required_tags and not has_tags: return True elif not required_tags: return False found_tags = [] for tag in required_tags: if tag in has_tags: found_tags.append(tag) return len(found_tags) == len(required_tags)
Checks if tags match
def dist(self, other): dx = self.x - other.x dy = self.y - other.y return math.sqrt(dx**2 + dy**2)
Distance to some other point.
def _check_not_empty(string): string = string.strip() if len(string) == 0: message = 'The string should not be empty' raise pp.ParseException(message)
Checks that the string is not empty. If it is empty an exception is raised, stopping the validation. This is used for compulsory alphanumeric fields. :param string: the field value
def activate(lang=None): if lang is None: lang = locale.getlocale()[0] tr = gettext.translation("argparse", os.path.join(locpath, "locale"), [lang], fallback=True) argparse._ = tr.gettext argparse.ngettext = tr.ngettext
Activate a translation for lang. If lang is None, then the language of locale.getdefaultlocale() is used. If the translation file does not exist, the original messages will be used.
def getOutputName(self,name): val = self.outputNames[name] if self.inmemory: val = self.virtualOutputs[val] return val
Return the name of the file or PyFITS object associated with that name, depending on the setting of self.inmemory.
def delete_policy(self, pol_id): if pol_id not in self.policies: LOG.error("Invalid policy %s", pol_id) return del self.policies[pol_id] self.policy_cnt -= 1
Deletes the policy from the local dictionary.
def _get_system(model_folder): model_description_file = os.path.join(model_folder, "info.yml") if not os.path.isfile(model_description_file): logging.error("You are probably not in the folder of a model, because " "%s is not a file. (-m argument)", model_description_file) sys.exit(-1) with open(model_description_file, 'r') as ymlfile: model_desc = yaml.load(ymlfile) feature_desc = _get_description(model_desc) preprocessing_desc = _get_description(feature_desc) return (preprocessing_desc, feature_desc, model_desc)
Return the preprocessing description, the feature description and the model description.
def elapsed(self): dt = 0 for ss in self.starts_and_stops[:-1]: dt += (ss['stop'] - ss['start']).total_seconds() ss = self.starts_and_stops[-1] if ss['stop']: dt += (ss['stop'] - ss['start']).total_seconds() else: dt += (doublethink.utcnow() - ss['start']).total_seconds() return dt
Returns elapsed crawl time as a float in seconds. This metric includes all the time that a site was in active rotation, including any time it spent waiting for its turn to be brozzled. In contrast `Site.active_brozzling_time` only counts time when a brozzler worker claimed the site and was actively brozzling it.
def smartread(path): with open(path, "rb") as f: content = f.read() result = chardet.detect(content) return content.decode(result["encoding"])
Read text from file, automatically detect encoding. ``chardet`` required.
def powerDown(self, powerup, interface=None): if interface is None: for interface, priority in powerup._getPowerupInterfaces(): self.powerDown(powerup, interface) else: for cable in self.store.query(_PowerupConnector, AND(_PowerupConnector.item == self, _PowerupConnector.interface == unicode(qual(interface)), _PowerupConnector.powerup == powerup)): cable.deleteFromStore() return raise ValueError("Not powered up for %r with %r" % (interface, powerup))
Remove a powerup. If no interface is specified, and the type of the object being installed has a "powerupInterfaces" attribute (containing either a sequence of interfaces, or a sequence of (interface, priority) tuples), the target will be powered down with this object on those interfaces. If this object has a "__getPowerupInterfaces__" method, it will be called with an iterable of (interface, priority) tuples. The iterable of (interface, priority) tuples it returns will then be uninstalled. (Note particularly that if powerups are added or removed to the collection described above between calls to powerUp and powerDown, more powerups or less will be removed than were installed.)
def remove_file_data(file_id, silent=True): try: f = FileInstance.get(file_id) if not f.writable: return f.delete() db.session.commit() f.storage().delete() except IntegrityError: if not silent: raise
Remove file instance and associated data. :param file_id: The :class:`invenio_files_rest.models.FileInstance` ID. :param silent: It stops propagation of a possible arised IntegrityError exception. (Default: ``True``) :raises sqlalchemy.exc.IntegrityError: Raised if the database removal goes wrong and silent is set to ``False``.
def validate_properties(self): for name, property_type in self.property_types.items(): value = getattr(self, name) if property_type.supports_intrinsics and self._is_intrinsic_function(value): continue if value is None: if property_type.required: raise InvalidResourceException( self.logical_id, "Missing required property '{property_name}'.".format(property_name=name)) elif not property_type.validate(value, should_raise=False): raise InvalidResourceException( self.logical_id, "Type of property '{property_name}' is invalid.".format(property_name=name))
Validates that the required properties for this Resource have been populated, and that all properties have valid values. :returns: True if all properties are valid :rtype: bool :raises TypeError: if any properties are invalid
def shape(self): if self._shape is None: self._populate_from_rasterio_object(read_image=False) return self._shape
Raster shape.
def getInfoMutator(self): if self._infoMutator: return self._infoMutator infoItems = [] for sourceDescriptor in self.sources: if sourceDescriptor.layerName is not None: continue loc = Location(sourceDescriptor.location) sourceFont = self.fonts[sourceDescriptor.name] if sourceFont is None: continue if hasattr(sourceFont.info, "toMathInfo"): infoItems.append((loc, sourceFont.info.toMathInfo())) else: infoItems.append((loc, self.mathInfoClass(sourceFont.info))) bias, self._infoMutator = self.getVariationModel(infoItems, axes=self.serializedAxes, bias=self.newDefaultLocation()) return self._infoMutator
Returns a info mutator
def read_login(collector, image, **kwargs): docker_api = collector.configuration["harpoon"].docker_api collector.configuration["authentication"].login(docker_api, image, is_pushing=False, global_docker=True)
Login to a docker registry with read permissions
def get_provider_links(self): if not bool(self._my_map['providerLinkIds']): raise errors.IllegalState('no providerLinkIds') mgr = self._get_provider_manager('RESOURCE') if not mgr.supports_resource_lookup(): raise errors.OperationFailed('Resource does not support Resource lookup') lookup_session = mgr.get_resource_lookup_session(proxy=getattr(self, "_proxy", None)) lookup_session.use_federated_bin_view() return lookup_session.get_resources_by_ids(self.get_provider_link_ids())
Gets the ``Resources`` representing the source of this asset in order from the most recent provider to the originating source. return: (osid.resource.ResourceList) - the provider chain raise: OperationFailed - unable to complete request *compliance: mandatory -- This method must be implemented.*
def match_serializers(self, serializers, default_media_type): return self._match_serializers_by_query_arg(serializers) or self.\ _match_serializers_by_accept_headers(serializers, default_media_type)
Choose serializer for a given request based on query arg or headers. Checks if query arg `format` (by default) is present and tries to match the serializer based on the arg value, by resolving the mimetype mapped to the arg value. Otherwise, chooses the serializer by retrieving the best quality `Accept` headers and matching its value (mimetype). :param serializers: Dictionary of serializers. :param default_media_type: The default media type. :returns: Best matching serializer based on `format` query arg first, then client `Accept` headers or None if no matching serializer.
def _get_flux_bounds(self, r_id, model, flux_limits, equation): if r_id not in flux_limits or flux_limits[r_id][0] is None: if equation.direction == Direction.Forward: lower = 0 else: lower = -model.default_flux_limit else: lower = flux_limits[r_id][0] if r_id not in flux_limits or flux_limits[r_id][1] is None: if equation.direction == Direction.Reverse: upper = 0 else: upper = model.default_flux_limit else: upper = flux_limits[r_id][1] if lower % 1 == 0: lower = int(lower) if upper % 1 == 0: upper = int(upper) return text_type(lower), text_type(upper)
Read reaction's limits to set up strings for limits in the output file.
def canonicalize(parsed_op): assert 'op' in parsed_op assert len(parsed_op['op']) == 2 if parsed_op['op'][1] == TRANSFER_KEEP_DATA: parsed_op['keep_data'] = True elif parsed_op['op'][1] == TRANSFER_REMOVE_DATA: parsed_op['keep_data'] = False else: raise ValueError("Invalid op '{}'".format(parsed_op['op'])) return parsed_op
Get the "canonical form" of this operation, putting it into a form where it can be serialized to form a consensus hash. This method is meant to preserve compatibility across blockstackd releases. For NAME_TRANSFER, this means: * add 'keep_data' flag
def delete_cookie(self, key, **kwargs): kwargs['max_age'] = -1 kwargs['expires'] = 0 self.set_cookie(key, '', **kwargs)
Delete a cookie. Be sure to use the same `domain` and `path` parameters as used to create the cookie.
def compose(self, sources, client=None): client = self._require_client(client) query_params = {} if self.user_project is not None: query_params["userProject"] = self.user_project request = { "sourceObjects": [{"name": source.name} for source in sources], "destination": self._properties.copy(), } api_response = client._connection.api_request( method="POST", path=self.path + "/compose", query_params=query_params, data=request, _target_object=self, ) self._set_properties(api_response)
Concatenate source blobs into this one. If :attr:`user_project` is set on the bucket, bills the API request to that project. :type sources: list of :class:`Blob` :param sources: blobs whose contents will be composed into this blob. :type client: :class:`~google.cloud.storage.client.Client` or ``NoneType`` :param client: Optional. The client to use. If not passed, falls back to the ``client`` stored on the blob's bucket.
def filter_records(self, records): for record in records: try: filtered = self.filter_record(record) assert (filtered) if filtered.seq == record.seq: self.passed_unchanged += 1 else: self.passed_changed += 1 yield filtered except FailedFilter as e: self.failed += 1 v = e.value if self.listener: self.listener( 'failed_filter', record, filter_name=self.name, value=v)
Apply the filter to records
def fixPoint(self, plotterPoint, canvasPoint): 'adjust visibleBox.xymin so that canvasPoint is plotted at plotterPoint' self.visibleBox.xmin = canvasPoint.x - self.canvasW(plotterPoint.x-self.plotviewBox.xmin) self.visibleBox.ymin = canvasPoint.y - self.canvasH(plotterPoint.y-self.plotviewBox.ymin) self.refresh()
adjust visibleBox.xymin so that canvasPoint is plotted at plotterPoint
def _skip_frame(self): self._get_line() num_atoms = int(self._get_line()) if self.num_atoms is not None and self.num_atoms != num_atoms: raise ValueError("The number of atoms must be the same over the entire file.") for i in range(num_atoms+1): self._get_line()
Skip one frame
def _parameterize_string(raw): parts = [] s_index = 0 for match in _PARAMETER_PATTERN.finditer(raw): parts.append(raw[s_index:match.start()]) parts.append({u"Ref": match.group(1)}) s_index = match.end() if not parts: return GenericHelperFn(raw) parts.append(raw[s_index:]) return GenericHelperFn({u"Fn::Join": [u"", parts]})
Substitute placeholders in a string using CloudFormation references Args: raw (`str`): String to be processed. Byte strings are not supported; decode them before passing them to this function. Returns: `str` | :class:`troposphere.GenericHelperFn`: An expression with placeholders from the input replaced, suitable to be passed to Troposphere to be included in CloudFormation template. This will be the input string without modification if no substitutions are found, and a composition of CloudFormation calls otherwise.
def getresponse(self): if self.__response and self.__response.isclosed(): self.__response = None if self.__state != _CS_REQ_SENT or self.__response: raise ResponseNotReady(self.__state) if self.debuglevel > 0: response = self.response_class(self.sock, self.debuglevel, method=self._method) else: response = self.response_class(self.sock, method=self._method) response.begin() assert response.will_close != _UNKNOWN self.__state = _CS_IDLE if response.will_close: self.close() else: self.__response = response return response
Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by class the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed.
def default_channel_ops(nqubits): for gates in cartesian_product(TOMOGRAPHY_GATES.values(), repeat=nqubits): yield qt.tensor(*gates)
Generate the tomographic pre- and post-rotations of any number of qubits as qutip operators. :param int nqubits: The number of qubits to perform tomography on. :return: Qutip object corresponding to the tomographic rotation. :rtype: Qobj
def deserialize_basic(self, attr, data_type): if isinstance(attr, ET.Element): attr = attr.text if not attr: if data_type == "str": return '' else: return None if data_type == 'bool': if attr in [True, False, 1, 0]: return bool(attr) elif isinstance(attr, basestring): if attr.lower() in ['true', '1']: return True elif attr.lower() in ['false', '0']: return False raise TypeError("Invalid boolean value: {}".format(attr)) if data_type == 'str': return self.deserialize_unicode(attr) return eval(data_type)(attr)
Deserialize baisc builtin data type from string. Will attempt to convert to str, int, float and bool. This function will also accept '1', '0', 'true' and 'false' as valid bool values. :param str attr: response string to be deserialized. :param str data_type: deserialization data type. :rtype: str, int, float or bool :raises: TypeError if string format is not valid.
def unwrap(self): if self.algorithm == 'rsa': return self['private_key'].parsed if self.algorithm == 'dsa': params = self['private_key_algorithm']['parameters'] return DSAPrivateKey({ 'version': 0, 'p': params['p'], 'q': params['q'], 'g': params['g'], 'public_key': self.public_key, 'private_key': self['private_key'].parsed, }) if self.algorithm == 'ec': output = self['private_key'].parsed output['parameters'] = self['private_key_algorithm']['parameters'] output['public_key'] = self.public_key return output
Unwraps the private key into an RSAPrivateKey, DSAPrivateKey or ECPrivateKey object :return: An RSAPrivateKey, DSAPrivateKey or ECPrivateKey object
def GetAPFSVolumeByPathSpec(self, path_spec): volume_index = apfs_helper.APFSContainerPathSpecGetVolumeIndex(path_spec) if volume_index is None: return None return self._fsapfs_container.get_volume(volume_index)
Retrieves an APFS volume for a path specification. Args: path_spec (PathSpec): path specification. Returns: pyfsapfs.volume: an APFS volume or None if not available.
def selection_r(acquisition_function, samples_y_aggregation, x_bounds, x_types, regressor_gp, num_starting_points=100, minimize_constraints_fun=None): minimize_starting_points = [lib_data.rand(x_bounds, x_types) \ for i in range(0, num_starting_points)] outputs = selection(acquisition_function, samples_y_aggregation, x_bounds, x_types, regressor_gp, minimize_starting_points, minimize_constraints_fun=minimize_constraints_fun) return outputs
Selecte R value
def _get_vlanid(self, context): segment = context.bottom_bound_segment if segment and self.check_segment(segment): return segment.get(api.SEGMENTATION_ID)
Returns vlan_id associated with a bound VLAN segment.
def get(self, id=None, name=None): if not (id is None) ^ (name is None): raise ValueError("Either id or name must be set (but not both!)") if id is not None: return super(TaskQueueManager, self).get(id=id) return self.list(filters={"name": name})[0]
Get a task queue. Either the id xor the name of the task type must be specified. Args: id (int, optional): The id of the task type to get. name (str, optional): The name of the task type to get. Returns: :class:`saltant.models.task_queue.TaskQueue`: A task queue model instance representing the task queue requested. Raises: ValueError: Neither id nor name were set *or* both id and name were set.
def __walk_rec(self, top, rec): if not rec or os.path.islink(top) or not os.path.isdir(top): yield top else: for root, dirs, files in os.walk(top): yield root
Yields each subdirectories of top, doesn't follow symlinks. If rec is false, only yield top. @param top: root directory. @type top: string @param rec: recursive flag. @type rec: bool @return: path of one subdirectory. @rtype: string
def intSize(self, obj): if obj < 0: return 8 elif obj <= 0xFF: return 1 elif obj <= 0xFFFF: return 2 elif obj <= 0xFFFFFFFF: return 4 elif obj <= 0x7FFFFFFFFFFFFFFF: return 8 elif obj <= 0xffffffffffffffff: return 16 else: raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.")
Returns the number of bytes necessary to store the given integer.
def _final_frame_length(header, final_frame_bytes): final_frame_length = 4 final_frame_length += 4 final_frame_length += header.algorithm.iv_len final_frame_length += 4 final_frame_length += final_frame_bytes final_frame_length += header.algorithm.auth_len return final_frame_length
Calculates the length of a final ciphertext frame, given a complete header and the number of bytes of ciphertext in the final frame. :param header: Complete message header object :type header: aws_encryption_sdk.structures.MessageHeader :param int final_frame_bytes: Bytes of ciphertext in the final frame :rtype: int