Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
377,300
def generate_phase_2(phase_1, dim = 40): phase_2 = [] for i in range(dim): indices = [numpy.random.randint(0, dim) for i in range(4)] phase_2.append(numpy.prod([phase_1[i] for i in indices])) return phase_2
The second step in creating datapoints in the Poirazi & Mel model. This takes a phase 1 vector, and creates a phase 2 vector where each point is the product of four elements of the phase 1 vector, randomly drawn with replacement.
377,301
def create_endpoints_csv_file(self, timeout=-1): uri = "{}/endpoints/".format(self.data["uri"]) return self._helper.do_post(uri, {}, timeout, None)
Creates an endpoints CSV file for a SAN. Args: timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation in OneView, just stops waiting for its completion. Returns: dict: Endpoint CSV File Response.
377,302
def add_pic(self, id_, name, desc, rId, x, y, cx, cy): pic = CT_Picture.new_pic(id_, name, desc, rId, x, y, cx, cy) self.insert_element_before(pic, ) return pic
Append a ``<p:pic>`` shape to the group/shapetree having properties as specified in call.
377,303
def getModelIDFromParamsHash(self, paramsHash): entryIdx = self. _paramsHashToIndexes.get(paramsHash, None) if entryIdx is not None: return self._allResults[entryIdx][] else: return None
Return the modelID of the model with the given paramsHash, or None if not found. Parameters: --------------------------------------------------------------------- paramsHash: paramsHash to look for retval: modelId, or None if not found
377,304
def add_ip_address(self, ip_address, sync=True): LOGGER.debug("OSInstance.add_ip_address") if not sync: self.ip_address_2_add.append(ip_address) else: if ip_address.id is None: ip_address.save() if self.id is not None and ip_address.id is not None: params = { : self.id, : ip_address.id } args = {: , : , : params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( + self.name + + str(response.response_content) + + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.ip_address_ids.append(ip_address.id) ip_address.ipa_os_instance_id = self.id else: LOGGER.warning( + self.name + + ip_address.ipAddress + )
add a ip address to this OS instance. :param ip_address: the ip address to add on this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the subnet object on list to be added on next save(). :return:
377,305
def _maximization_step(X, posteriors): sum_post_proba = np.sum(posteriors, axis=0) prior_proba = sum_post_proba / (sum_post_proba.sum() + Epsilon) means = np.dot(posteriors.T, X) / (sum_post_proba[:, np.newaxis] + Epsilon) n_components = posteriors.shape[1] n_features = X.shape[1] covars = np.empty(shape=(n_components, n_features, n_features), dtype=float) for i in range(n_components): post_i = posteriors[:, i] mean_i = means[i] diff_i = X - mean_i with np.errstate(under=): covar_i = np.dot(post_i * diff_i.T, diff_i) / (post_i.sum() + Epsilon) covars[i] = covar_i + Lambda * np.eye(n_features) _validate_params(prior_proba, means, covars) return(prior_proba, means, covars)
Update class parameters as below: priors: P(w_i) = sum_x P(w_i | x) ==> Then normalize to get in [0,1] Class means: center_w_i = sum_x P(w_i|x)*x / sum_i sum_x P(w_i|x)
377,306
def set_stylesheet(self, subreddit, stylesheet): subreddit = six.text_type(subreddit) data = {: subreddit, : stylesheet, : } self.evict(self.config[].format(subreddit=subreddit)) return self.request_json(self.config[], data=data)
Set stylesheet for the given subreddit. :returns: The json response from the server.
377,307
def keys(self): if self._keys is None: self._keys = KeyList(self._version, fleet_sid=self._solution[], ) return self._keys
Access the keys :returns: twilio.rest.preview.deployed_devices.fleet.key.KeyList :rtype: twilio.rest.preview.deployed_devices.fleet.key.KeyList
377,308
def validate_basic_smoother_resid(): x, y = sort_data(*smoother_friedman82.build_sample_smoother_problem_friedman82()) plt.figure() for span in smoother.DEFAULT_SPANS: my_smoother = smoother.perform_smooth(x, y, span) _friedman_smooth, resids = run_friedman_smooth(x, y, span) plt.plot(x, my_smoother.cross_validated_residual, , label=.format(span)) plt.plot(x, resids, , label=.format(span)) finish_plot()
Compare residuals.
377,309
def read_field_report(path, data_flag = "*DATA", meta_data_flag = "*METADATA"): text = open(path).read() mdpos = text.find(meta_data_flag) dpos = text.find(data_flag) mdata = io.StringIO( "\n".join(text[mdpos:dpos].split("\n")[1:])) data = io.StringIO( "\n".join(text[dpos:].split("\n")[1:])) data = pd.read_csv(data, index_col = 0) data = data.groupby(data.index).mean() mdata = pd.read_csv(mdata, sep = "=", header = None, index_col = 0)[1] mdata = mdata.to_dict() out = {} out["step_num"] = int(mdata["step_num"]) out["step_label"] = mdata["step_label"] out["frame"] = int(mdata["frame"]) out["frame_value"] = float(mdata["frame_value"]) out["part"] = mdata["instance"] position_map = {"NODAL": "node", "ELEMENT_CENTROID": "element", "WHOLE_ELEMENT": "element"} out["position"] = position_map[mdata["position"]] out["label"] = mdata["label"] out["data"] = data field_class = getattr(argiope.mesh, mdata["argiope_class"]) return field_class(**out)
Reads a field output report.
377,310
def matrices_compliance(dsm, complete_mediation_matrix): matrix = dsm.data rows_dep_matrix = len(matrix) cols_dep_matrix = len(matrix[0]) rows_med_matrix = len(complete_mediation_matrix) cols_med_matrix = len(complete_mediation_matrix[0]) if (rows_dep_matrix != rows_med_matrix or cols_dep_matrix != cols_med_matrix): raise DesignStructureMatrixError( ) discrepancy_found = False message = [] for i in range(0, rows_dep_matrix): for j in range(0, cols_dep_matrix): if ((complete_mediation_matrix[i][j] == 0 and matrix[i][j] > 0) or (complete_mediation_matrix[i][j] == 1 and matrix[i][j] < 1)): discrepancy_found = True message.append( % ( i, j, dsm.entities[i], dsm.entities[j], matrix[i][j], complete_mediation_matrix[i][j])) message = .join(message) return not discrepancy_found, message
Check if matrix and its mediation matrix are compliant. Args: dsm (:class:`DesignStructureMatrix`): the DSM to check. complete_mediation_matrix (list of list of int): 2-dim array Returns: bool: True if compliant, else False
377,311
def from_mult_iters(cls, name=None, idx=None, **kwargs): if not name: name = lengths = [len(v) for v in kwargs.values()] if len(set(lengths)) != 1: raise ValueError() if not idx: raise ValueError() index = kwargs.pop(idx) vega_vals = [] for k, v in sorted(kwargs.items()): for idx, val in zip(index, v): value = {} value[] = idx value[] = k value[] = val vega_vals.append(value) return cls(name, values=vega_vals)
Load values from multiple iters Parameters ---------- name : string, default None Name of the data set. If None (default), the name will be set to ``'table'``. idx: string, default None Iterable to use for the data index **kwargs : dict of iterables The ``values`` field will contain dictionaries with keys for each of the iterables provided. For example, d = Data.from_iters(idx='x', x=[0, 1, 5], y=(10, 20, 30)) would result in ``d`` having a ``values`` field with [{'idx': 0, 'col': 'y', 'val': 10}, {'idx': 1, 'col': 'y', 'val': 20} If the iterables are not the same length, then ValueError is raised.
377,312
def asDateTime(self): text = str(self) if text.endswith(): tzinfo = TimeMixIn.UTC text = text[:-1] elif in text or in text: if in text: text, plusminus, tz = string.partition(text, ) else: text, plusminus, tz = string.partition(text, ) if self._shortTZ and len(tz) == 2: tz += if len(tz) != 4: raise error.PyAsn1Error( % tz) try: minutes = int(tz[:2]) * 60 + int(tz[2:]) if plusminus == : minutes *= -1 except ValueError: raise error.PyAsn1Error( % self) tzinfo = TimeMixIn.FixedOffset(minutes, ) else: tzinfo = None if in text or in text: if in text: text, _, ms = string.partition(text, ) else: text, _, ms = string.partition(text, ) try: ms = int(ms) * 1000 except ValueError: raise error.PyAsn1Error( % self) else: ms = 0 if self._optionalMinutes and len(text) - self._yearsDigits == 6: text += elif len(text) - self._yearsDigits == 8: text += try: dt = dateandtime.strptime(text, self._yearsDigits == 4 and or ) except ValueError: raise error.PyAsn1Error( % self) return dt.replace(microsecond=ms, tzinfo=tzinfo)
Create :py:class:`datetime.datetime` object from a |ASN.1| object. Returns ------- : new instance of :py:class:`datetime.datetime` object
377,313
def extract_schemas_from_file(source_path): logging.info("Extracting schemas from %s", source_path) try: with open(source_path, ) as source_file: source = source_file.read() except (FileNotFoundError, PermissionError) as e: logging.error("Cannot extract schemas: %s", e.strerror) else: try: schemas = extract_schemas_from_source(source, source_path) except SyntaxError as e: logging.error("Cannot extract schemas: %s", str(e)) else: logging.info( "Extracted %d %s", len(schemas), "schema" if len(schemas) == 1 else "schemas") return schemas
Extract schemas from 'source_path'. :returns: a list of ViewSchema objects on success, None if no schemas could be extracted.
377,314
def local_bind_hosts(self): self._check_is_started() return [_server.local_host for _server in self._server_list if _server.local_host is not None]
Return a list containing the IP addresses listening for the tunnels
377,315
def fill_from_simbad (self, ident, debug=False): info = get_simbad_astrometry_info (ident, debug=debug) posref = for k, v in six.iteritems (info): if in v: continue if k == : self.ra = float (v) * D2R elif k == : self.dec = float (v) * D2R elif k == : a = v.split () self.pos_u_maj = float (a[0]) * A2R * 1e-3 self.pos_u_min = float (a[1]) * A2R * 1e-3 self.pos_u_pa = float (a[2]) * D2R elif k == : posref = v elif k == : self.promo_ra = float (v) elif k == : self.promo_dec = float (v) elif k == : a = v.split () self.promo_u_maj = float (a[0]) self.promo_u_min = float (a[1]) self.promo_u_pa = float (a[2]) * D2R elif k == : self.parallax = float (v) elif k == : self.u_parallax = float (v) elif k == : self.vradial = float (v) elif k == : self.u_vradial = float (v) if self.ra is None: raise Exception ( % ident) if self.u_parallax == 0: self.u_parallax = None if self.u_vradial == 0: self.u_vradial = None if posref == : self.pos_epoch = get_2mass_epoch (self.ra, self.dec, debug) return self
Fill in astrometric information using the Simbad web service. This uses the CDS Simbad web service to look up astrometric information for the source name *ident* and fills in attributes appropriately. Values from Simbad are not always reliable. Returns *self*.
377,316
def __calculate_order(self, node_dict): if len(node_dict.keys()) != len(set(node_dict.keys())): raise DependencyTreeException("Duplicate Keys Exist in node dictionary!") valid_order = [node for node, dependencies in node_dict.items() if len(dependencies) == 0] remaining_nodes = [node for node in node_dict.keys() if node not in valid_order] while len(remaining_nodes) > 0: node_added = False for node in remaining_nodes: dependencies = [d for d in node_dict[node] if d not in valid_order] if len(dependencies) == 0: valid_order.append(node) remaining_nodes.remove(node) node_added = True if not node_added: if invalid_dependency not in remaining_nodes: raise DependencyTreeException( "Missing dependency! One or more of ({dependency}) are missing for {dependant}.".format( dependant=invalid_node, dependency=invalid_dependency)) else: raise DependencyTreeException("The dependency %s is cyclic or dependent on a cyclic dependency" % invalid_dependency) return valid_order
Determine a valid ordering of the nodes in which a node is not called before all of it's dependencies. Raise an error if there is a cycle, or nodes are missing.
377,317
def dict_sort(d, k): return sorted(d.copy(), key=lambda i: i[k])
Sort a dictionary list by key :param d: dictionary list :param k: key :return: sorted dictionary list
377,318
def remove_backup(name): * if name not in list_backups(): log.debug(, name) return True ps_cmd = [, , "".format(name)] cmd_ret = _srvmgr(ps_cmd) if cmd_ret[] != 0: msg = \ .format(name, cmd_ret[]) raise CommandExecutionError(msg) return name not in list_backups()
Remove an IIS Configuration backup from the System. .. versionadded:: 2017.7.0 Args: name (str): The name of the backup to remove Returns: bool: True if successful, otherwise False CLI Example: .. code-block:: bash salt '*' win_iis.remove_backup backup_20170209
377,319
def to_segwizard(segs, target, header=True, coltype=LIGOTimeGPS): if isinstance(target, string_types): with open(target, ) as fobj: return to_segwizard(segs, fobj, header=header, coltype=coltype) if header: print(, file=target) for i, seg in enumerate(segs): a = coltype(seg[0]) b = coltype(seg[1]) c = float(b - a) print( .join(map(str, (i, a, b, c))), file=target, )
Write the given `SegmentList` to a file in SegWizard format. Parameters ---------- segs : :class:`~gwpy.segments.SegmentList` The list of segments to write. target : `file`, `str` An open file, or file path, to which to write. header : `bool`, optional Print a column header into the file, default: `True`. coltype : `type`, optional The numerical type in which to cast times before printing. Notes ----- This method is adapted from original code written by Kipp Cannon and distributed under GPLv3.
377,320
def map(func, items, pool_size=10): with OrderedPool(func, pool_size) as pool: for count, item in enumerate(items): pool.put(item) for i in xrange(count + 1): yield pool.get()
a parallelized work-alike to the built-in ``map`` function this function works by creating an :class:`OrderedPool` and placing all the arguments in :meth:`put<OrderedPool.put>` calls, then yielding items produced by the pool's :meth:`get<OrderedPool.get>` method. :param func: the mapper function to use :type func: function :param items: the items to use as the mapper's arguments :type items: iterable :param pool_size: the number of workers for the pool -- this amounts to the concurrency with which the map is accomplished (default 10) :type pool_size: int :returns: a lazy iterator (like python3's map or python2's itertools.imap) over the results of the mapping
377,321
def cmd(send, msg, args): session = args[] parser = arguments.ArgParser(args[]) parser.add_argument(, action=) parser.add_argument(, nargs=) parser.add_argument(, nargs=, type=int, default=0) parser.add_argument(, nargs=) group = parser.add_mutually_exclusive_group() group.add_argument(, action=) group.add_argument(, action=) group.add_argument(, , type=int) group.add_argument(, type=int) group.add_argument(, nargs=) if not msg: send(do_get_quote(session)) return try: cmdargs = parser.parse_args(msg) except arguments.ArgumentException as e: send(str(e)) return if cmdargs.add: if args[] == : send("You want everybody to know about your witty sayings, right?") else: if cmdargs.nick is None: send() elif not cmdargs.quote: send() else: isadmin = args[](args[]) or not args[][][] approved = cmdargs.approve or not args[][][] do_add_quote(cmdargs.nick, " ".join(cmdargs.quote), session, isadmin, approved, send, args) elif cmdargs.list: send(do_list_quotes(session, args[][][])) elif cmdargs.delete: send(do_delete_quote(args, session, cmdargs.delete)) elif cmdargs.edit: if args[](args[]): send(do_update_quote(session, cmdargs.edit, cmdargs.nick, cmdargs.quote)) else: send("You arenconfigcorenickregexInvalid nick %s.' % msg) else: send(get_quotes_nick(session, msg))
Handles quotes. Syntax: {command} <number|nick>, !quote --add <quote> --nick <nick> (--approve), !quote --list, !quote --delete <number>, !quote --edit <number> <quote> --nick <nick> !quote --search (--offset <num>) <number>
377,322
def _convert_vpathlist(input_obj): vpl = pgmagick.VPathList() for obj in input_obj: obj = pgmagick.PathMovetoAbs(pgmagick.Coordinate(obj[0], obj[1])) vpl.append(obj) return vpl
convert from 'list' or 'tuple' object to pgmagick.VPathList. :type input_obj: list or tuple
377,323
def _ReloadArtifacts(self): self._artifacts = {} self._LoadArtifactsFromFiles(self._sources.GetAllFiles()) self.ReloadDatastoreArtifacts()
Load artifacts from all sources.
377,324
def aggregate(self, key, aggregate, start=None, end=None, namespace=None, percentile=None): return self.make_context(key=key, aggregate=aggregate, start=start, end=end, namespace=namespace, percentile=percentile).aggregate()
Get an aggregate of all gauge data stored in the specified date range
377,325
def reprovision(vm, image, key=): vm** ret = {} if key not in [, , ]: ret[] = return ret vm = lookup(.format(key, vm), one=True) if in vm: return vm if image not in __salt__[](): ret[] = .format(image) return ret cmd = six.text_type().format( uuid=salt.utils.stringutils.to_unicode(vm), image=_quote_args(salt.utils.json.dumps({: image})) ) res = __salt__[](cmd, python_shell=True) retcode = res[] if retcode != 0: ret[] = res[] if in res else _exit_status(retcode) return ret return True
Reprovision a vm vm : string vm to be reprovisioned image : string uuid of new image key : string [uuid|alias|hostname] value type of 'vm' parameter CLI Example: .. code-block:: bash salt '*' vmadm.reprovision 186da9ab-7392-4f55-91a5-b8f1fe770543 c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 salt '*' vmadm.reprovision nacl c02a2044-c1bd-11e4-bd8c-dfc1db8b0182 key=alias
377,326
def refresh_access_token(self, refresh_token): request = self._get_request() response = request.post(self.OAUTH_TOKEN_URL, { "grant_type": "refresh_token", "refresh_token": refresh_token }) self.auth = HSAccessTokenAuth.from_response(response) return self.auth.access_token
Refreshes the current access token. Gets a new access token, updates client auth and returns it. Args: refresh_token (str): Refresh token to use Returns: The new access token
377,327
def ms_bot_framework(self) -> list: ms_bf_controls = [control.ms_bot_framework() for control in self.controls] return ms_bf_controls
Returns list of MS Bot Framework compatible states of the RichMessage instance nested controls. Returns: ms_bf_controls: MS Bot Framework representation of RichMessage instance nested controls.
377,328
def _get_file_handler(package_data_spec, data_files_spec): class FileHandler(BaseCommand): def run(self): package_data = self.distribution.package_data package_spec = package_data_spec or dict() for (key, patterns) in package_spec.items(): package_data[key] = _get_package_data(key, patterns) self.distribution.data_files = _get_data_files( data_files_spec, self.distribution.data_files ) return FileHandler
Get a package_data and data_files handler command.
377,329
def write_collided_alias(collided_alias_dict): open_mode = if os.path.exists(GLOBAL_COLLIDED_ALIAS_PATH) else with open(GLOBAL_COLLIDED_ALIAS_PATH, open_mode) as collided_alias_file: collided_alias_file.truncate() collided_alias_file.write(json.dumps(collided_alias_dict))
Write the collided aliases string into the collided alias file.
377,330
def setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", predictionCol="prediction", numPartitions=None): kwargs = self._input_kwargs return self._set(**kwargs)
setParams(self, minSupport=0.3, minConfidence=0.8, itemsCol="items", \ predictionCol="prediction", numPartitions=None)
377,331
def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None): results = [] while True: decomp = LZMADecompressor(format, memlimit, filters) try: res = decomp.decompress(data) except LZMAError: if results: break else: raise results.append(res) if not decomp.eof: raise LZMAError("Compressed data ended before the " "end-of-stream marker was reached") data = decomp.unused_data if not data: break return b"".join(results)
Decompress a block of data. Refer to LZMADecompressor's docstring for a description of the optional arguments *format*, *check* and *filters*. For incremental decompression, use a LZMADecompressor object instead.
377,332
def store(self, moments): if len(self.storage) == self.nsave: self.storage[-1].combine(moments, mean_free=self.remove_mean) else: self.storage.append(moments) while self._can_merge_tail(): M = self.storage.pop() self.storage[-1].combine(M, mean_free=self.remove_mean)
Store object X with weight w
377,333
def get_new_project_name(self, project_name): timestamp_str = datetime.datetime.utcnow().strftime() return "{} {}".format(project_name, timestamp_str)
Return a unique project name for the copy. :param project_name: str: name of project we will copy :return: str
377,334
def addAttachment(self, oid, file_path): if self.hasAttachments == True: attachURL = self._url + "/%s/addAttachment" % oid params = {:} parsed = urlparse.urlparse(attachURL) files = {: file_path} res = self._post(url=attachURL, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) return self._unicode_convert(res) else: return "Attachments are not supported for this feature service."
Adds an attachment to a feature service Input: oid - string - OBJECTID value to add attachment to file_path - string - path to file Output: JSON Repsonse
377,335
def get_cgroup_item(self, key): value = _lxc.Container.get_cgroup_item(self, key) if value is False: return False else: return value.rstrip("\n")
Returns the value for a given cgroup entry. A list is returned when multiple values are set.
377,336
def get_resource_agent_assignment_session_for_bin(self, bin_id): if not self.supports_resource_agent_assignment(): raise errors.Unimplemented() return sessions.ResourceAgentAssignmentSession(bin_id, runtime=self._runtime)
Gets a resource agent session for the given bin. arg: bin_id (osid.id.Id): the ``Id`` of the bin return: (osid.resource.ResourceAgentAssignmentSession) - a ``ResourceAgentAssignmentSession`` raise: NotFound - ``bin_id`` not found raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_resource_agent_assignment()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_resource_agent_assignment()`` and ``supports_visible_federation()`` are ``true``.*
377,337
def _ensure_coroutine_function(func): if asyncio.iscoroutinefunction(func): return func else: @asyncio.coroutine def coroutine_function(evt): func(evt) yield return coroutine_function
Return a coroutine function. func: either a coroutine function or a regular function Note a coroutine function is not a coroutine!
377,338
def run( self, server=None, host=None, port=None, enable_pretty_logging=True ): if enable_pretty_logging: from werobot.logger import enable_pretty_logging enable_pretty_logging(self.logger) if server is None: server = self.config["SERVER"] if host is None: host = self.config["HOST"] if port is None: port = self.config["PORT"] try: self.wsgi.run(server=server, host=host, port=port) except KeyboardInterrupt: exit(0)
运行 WeRoBot。 :param server: 传递给 Bottle 框架 run 方法的参数,详情见\ `bottle 文档 <https://bottlepy.org/docs/dev/deployment.html#switching-the-server-backend>`_ :param host: 运行时绑定的主机地址 :param port: 运行时绑定的主机端口 :param enable_pretty_logging: 是否开启 log 的输出格式优化
377,339
def QA_fetch_get_stock_block(ip=None, port=None): ip, port = get_mainmarket_ip(ip, port) api = TdxHq_API() with api.connect(ip, port): data = pd.concat([api.to_df(api.get_and_parse_block_info("block_gn.dat")).assign(type=), api.to_df(api.get_and_parse_block_info( "block.dat")).assign(type=), api.to_df(api.get_and_parse_block_info( "block_zs.dat")).assign(type=), api.to_df(api.get_and_parse_block_info("block_fg.dat")).assign(type=)]) if len(data) > 10: return data.assign(source=).drop([, ], axis=1).set_index(, drop=False, inplace=False).drop_duplicates() else: QA_util_log_info()
板块数据
377,340
def MaskSolveSlow(A, b, w=5, progress=True, niter=None): N = b.shape[0] if niter is None: niter = N - w + 1 X = np.empty((niter, N - w)) for n in prange(niter): mask = np.arange(n, n + w) An = np.delete(np.delete(A, mask, axis=0), mask, axis=1) Un = cholesky(An) bn = np.delete(b, mask) X[n] = cho_solve((Un, False), bn) return X
Identical to `MaskSolve`, but computes the solution the brute-force way.
377,341
def snip(tag="",start=-2,write_date=True): import IPython i = IPython.get_ipython() last_history = i.history_manager.get_range(start=start,stop=start+1,output=True) with open("ipython_history.py",) as output_file: for l in last_history: global _session_description output_file.write(+(*80)+) if _session_description != "": output_file.write(+_lines_as_comments(_session_description)+) if tag != "": output_file.write(_lines_as_comments(tag)+) if write_date: import datetime output_file.write(+datetime.datetime.now().isoformat()+) output_file.write(+str(l[1])++l[2][0]) _last_inputs.append(l[2][0]) _tagged_inputs[tag] = _tagged_inputs.get(tag,[]) _tagged_inputs[tag].append(l[2][0]) output_file.write(+str(l[1])++_lines_as_comments(repr(l[2][1])))
This function records a previously execute notebook cell into a file (default: ipython_history.py) a tag can be added to sort the cell `start` defines which cell in the history to record. Default is -2, ie. the one executed previously to the current one.
377,342
def remove_class(cls, *args): for cls2 in args: try: del cls.classes[cls2.__name__] except KeyError: pass
Remove classes from the group. Parameters ---------- *args : `type` Classes to remove.
377,343
def asn1_generaltime_to_seconds(timestr): res = None timeformat = "%Y%m%d%H%M%S" try: res = datetime.strptime(timestr, timeformat + ) except ValueError: try: res = datetime.strptime(timestr, timeformat + ) except ValueError: pass return res
The given string has one of the following formats YYYYMMDDhhmmssZ YYYYMMDDhhmmss+hhmm YYYYMMDDhhmmss-hhmm @return: a datetime object or None on error
377,344
def set_remote_config(experiment_config, port, config_file_name): request_data = dict() request_data[] = experiment_config[] if request_data[]: for i in range(len(request_data[])): if isinstance(request_data[][i].get(), int): request_data[][i][] = str(request_data[][i].get()) response = rest_put(cluster_metadata_url(port), json.dumps(request_data), REST_TIME_OUT) err_message = if not response or not check_response(response): if response is not None: err_message = response.text _, stderr_full_path = get_log_path(config_file_name) with open(stderr_full_path, ) as fout: fout.write(json.dumps(json.loads(err_message), indent=4, sort_keys=True, separators=(, ))) return False, err_message result, message = setNNIManagerIp(experiment_config, port, config_file_name) if not result: return result, message return set_trial_config(experiment_config, port, config_file_name), err_message
Call setClusterMetadata to pass trial
377,345
def get_ranking(self, alt): if self.alts_to_ranks is None: raise ValueError("Aggregate ranking must be created first") try: rank = self.alts_to_ranks[alt] return rank except KeyError: raise KeyError("No alternative \"{}\" found in ".format(str(alt)) + "the aggregate ranking")
Description: Returns the ranking of a given alternative in the computed aggregate ranking. An error is thrown if the alternative does not exist. The ranking is the index in the aggregate ranking, which is 0-indexed. Parameters: alt: the key that represents an alternative
377,346
def SaltAndPepper(p=0, per_channel=False, name=None, deterministic=False, random_state=None): if name is None: name = "Unnamed%s" % (ia.caller_name(),) return ReplaceElementwise( mask=p, replacement=iap.Beta(0.5, 0.5) * 255, per_channel=per_channel, name=name, deterministic=deterministic, random_state=random_state )
Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels. dtype support:: See ``imgaug.augmenters.arithmetic.ReplaceElementwise``. Parameters ---------- p : float or tuple of float or list of float or imgaug.parameters.StochasticParameter, optional Probability of changing a pixel to salt/pepper noise. * If a float, then that value will be used for all images as the probability. * If a tuple ``(a, b)``, then a probability will be sampled per image from the range ``a <= x <= b``. * If a list, then a random value will be sampled from that list per image. * If a StochasticParameter, then this parameter will be used as the *mask*, i.e. it is expected to contain values between 0.0 and 1.0, where 1.0 means that salt/pepper is to be added at that location. per_channel : bool or float, optional Whether to use the same value for all channels (False) or to sample a new value for each channel (True). If this value is a float ``p``, then for ``p`` percent of all images `per_channel` will be treated as True, otherwise as False. name : None or str, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. deterministic : bool, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. random_state : None or int or numpy.random.RandomState, optional See :func:`imgaug.augmenters.meta.Augmenter.__init__`. Examples -------- >>> aug = iaa.SaltAndPepper(0.05) Replaces 5 percent of all pixels with salt/pepper.
377,347
def next_page(self): for link in self.links: if link.next: return self._load(link.next) raise PaginationError()
Fetches next result set. :return: VolumeCollection object.
377,348
def _is_not_archived(sysmeta_pyxb): if _is_archived(sysmeta_pyxb): raise d1_common.types.exceptions.InvalidSystemMetadata( 0, .format( d1_common.xml.get_req_val(sysmeta_pyxb.identifier) ), identifier=d1_common.xml.get_req_val(sysmeta_pyxb.identifier), )
Assert that ``sysmeta_pyxb`` does not have have the archived flag set.
377,349
def write_event(self, *args): num_args = len(args) if not (1 <= num_args <= 3): raise ValueError() if num_args == 3: time, code, text = args elif num_args == 1: code = args[0] time = text = None elif isinstance(args[0], (datetime.time, datetime.datetime)): time, code = args text = None else: code, text = args time = None if time is None: time = datetime.datetime.utcnow() if not patterns.THREE_LETTER_CODE.match(code): raise ValueError() record = self.format_time(time) record += code if text: record += text self.write_record(, record)
Write an event record:: writer.write_event(datetime.time(12, 34, 56), 'PEV') # -> B123456PEV writer.write_event(datetime.time(12, 34, 56), 'PEV', 'Some Text') # -> B123456PEVSome Text writer.write_event('PEV') # uses utcnow() # -> B121503PEV :param time: UTC time of the fix record (default: :meth:`~datetime.datetime.utcnow`) :param code: event type as three-letter-code :param text: additional text describing the event (optional)
377,350
def needs_distribute_ready(self): alive = [c for c in self.connections() if c.alive()] if any(c.ready <= (c.last_ready_sent * 0.25) for c in alive): return True
Determine whether or not we need to redistribute the ready state
377,351
def tear_down(self): if not self.browser_config.get(): self.warning_log("Skipping terminate") return self.info_log("Tearing down") if self.browser_config.get().lower() == : self.execute_command("shutdown -h now", username=) elif self.browser_config.get().lower() == : self.session.console.power_down()
Tear down the virtual box machine
377,352
def Sens_m_sample(poly, dist, samples, rule="R"): dim = len(dist) generator = Saltelli(dist, samples, poly, rule=rule) zeros = [0]*dim ones = [1]*dim index = [0]*dim variance = numpy.var(generator[zeros], -1) matrix_0 = generator[zeros] matrix_1 = generator[ones] mean = .5*(numpy.mean(matrix_1) + numpy.mean(matrix_0)) matrix_0 -= mean matrix_1 -= mean out = [ numpy.mean(matrix_1*((generator[index]-mean)-matrix_0), -1) / numpy.where(variance, variance, 1) for index in numpy.eye(dim, dtype=bool) ] return numpy.array(out)
First order sensitivity indices estimated using Saltelli's method. Args: poly (chaospy.Poly): If provided, evaluated samples through polynomials before returned. dist (chaopy.Dist): distribution to sample from. samples (int): The number of samples to draw for each matrix. rule (str): Scheme for generating random samples. Return: (numpy.ndarray): array with `shape == (len(dist), len(poly))` where `sens[dim][pol]` is the first sensitivity index for distribution dimensions `dim` and polynomial index `pol`. Examples: >>> dist = chaospy.Iid(chaospy.Uniform(), 2) >>> poly = chaospy.basis(2, 2, dim=2) >>> print(poly) [q0^2, q0q1, q1^2] >>> print(numpy.around(Sens_m_sample(poly, dist, 10000, rule="M"), 4)) [[0.008 0.0026 0. ] [0. 0.6464 2.1321]]
377,353
def today(boo): tod = datetime.strptime(datetime.today().date().isoformat().replace(, ), ) if boo: return int(str(tod).replace(, )[:8]) else: return str(tod)[:10]
Return today's date as either a String or a Number, as specified by the User. Args: boo: if true, function returns Number (20151230); if false, returns String ("2015-12-30") Returns: either a Number or a string, dependent upon the user's input
377,354
def maximum(self, node): temp_node = node while temp_node.right is not None: temp_node = temp_node.right return temp_node
find the max node when node regard as a root node :param node: :return: max node
377,355
def _get_values(self, data_blob, dtype_enum, shape_string): buf = np.frombuffer(data_blob, dtype=tf.DType(dtype_enum).as_numpy_dtype) return buf.reshape([int(i) for i in shape_string.split()]).tolist()
Obtains values for histogram data given blob and dtype enum. Args: data_blob: The blob obtained from the database. dtype_enum: The enum representing the dtype. shape_string: A comma-separated string of numbers denoting shape. Returns: The histogram values as a list served to the frontend.
377,356
def user(self, obj, with_user_activity=False, follow_flag=None, **kwargs): q = Q() qs = self.public() if not obj: return qs.none() check(obj) if with_user_activity: q = q | Q( actor_content_type=ContentType.objects.get_for_model(obj), actor_object_id=obj.pk ) follows = apps.get_model(, ).objects.filter(user=obj) if follow_flag: follows = follows.filter(flag=follow_flag) content_types = ContentType.objects.filter( pk__in=follows.values() ) if not (content_types.exists() or with_user_activity): return qs.none() for content_type in content_types: object_ids = follows.filter(content_type=content_type) q = q | Q( actor_content_type=content_type, actor_object_id__in=object_ids.values() ) | Q( target_content_type=content_type, target_object_id__in=object_ids.filter( actor_only=False).values() ) | Q( action_object_content_type=content_type, action_object_object_id__in=object_ids.filter( actor_only=False).values() ) return qs.filter(q, **kwargs)
Create a stream of the most recent actions by objects that the user is following.
377,357
def add_parameter_dd(self, dag_tag, node_dict): if isinstance(node_dict, defaultdict) or isinstance(node_dict, dict): node_tag = etree.SubElement(dag_tag, , attrib={: next(iter(node_dict.keys()))}) edge_dict = next(iter(node_dict.values())) for edge in sorted(edge_dict.keys(), key=tuple): edge_tag = etree.SubElement(node_tag, , attrib={: edge}) value = edge_dict.get(edge) if isinstance(value, six.string_types): terminal_tag = etree.SubElement(edge_tag, ) terminal_tag.text = value elif in value: if in value: etree.SubElement(edge_tag, , attrib={: value[], : value[], : value[]}) elif in value: etree.SubElement(edge_tag, , attrib={: value[], : value[]}) else: etree.SubElement(edge_tag, , attrib={: value[], : value[]}) else: self.add_parameter_dd(edge_tag, value)
helper function for adding parameters in condition Parameters --------------- dag_tag: etree SubElement the DAG tag is contained in this subelement node_dict: dictionary the decision diagram dictionary Return --------------- None
377,358
def format_extension(self): for extension in reversed(self.extensions): compiler = self.environment.compilers.get(extension) if not compiler and self.environment.mimetypes.get(extension): return extension
The format extension of asset. Example:: >>> attrs = AssetAttributes(environment, 'js/models.js.coffee') >>> attrs.format_extension '.js' >>> attrs = AssetAttributes(environment, 'js/lib/external.min.js.coffee') >>> attrs.format_extension '.js'
377,359
def setParts( self, parts ): self.setText(self.separator().join(map(str, parts)))
Sets the path for this edit widget by providing the parts to the path. :param parts | [<str>, ..]
377,360
def parse_string_descriptor(string_desc): if not isinstance(string_desc, str): string_desc = str(string_desc) if not string_desc.endswith(): string_desc += parsed = get_streamer_parser().parseString(string_desc)[0] realtime = in parsed broadcast = in parsed encrypted = in parsed and parsed[] == signed = in parsed and parsed[] == auto = not in parsed with_other = None if in parsed: with_other = parsed[] auto = False dest = SlotIdentifier.FromString() if in parsed: dest = parsed[] selector = parsed[] if realtime and (encrypted or signed): raise SensorGraphSemanticError("Realtime streamers cannot be either signed or encrypted") if broadcast and (encrypted or signed): raise SensorGraphSemanticError("Broadcast streamers cannot be either signed or encrypted") report_type = if broadcast else dest = dest selector = selector if realtime or broadcast: report_format = u elif signed: report_format = u elif encrypted: raise SensorGraphSemanticError("Encrypted streamers are not yet supported") else: report_format = u return DataStreamer(selector, dest, report_format, auto, report_type=report_type, with_other=with_other)
Parse a string descriptor of a streamer into a DataStreamer object. Args: string_desc (str): The string descriptor that we wish to parse. Returns: DataStreamer: A DataStreamer object representing the streamer.
377,361
def process(self, index=None): print "Starting slope calculation round" self.process_twi(index, do_edges=False, skip_uca_twi=True) print "Starting self-area calculation round" self.process_twi(index, do_edges=False) i = self.tile_edge.find_best_candidate(self.elev_source_files) print "Starting edge resolution round: ", count = 0 i_old = -1 same_count = 0 while i is not None and same_count < 3: count += 1 print * 10 print count, % (i_old, i) self.process_twi(i, do_edges=True) i_old = i i = self.tile_edge.find_best_candidate(self.elev_source_files) if i_old == i: same_count += 1 else: same_count = 0 print *79 print print *79 return self
This will completely process a directory of elevation tiles (as supplied in the constructor). Both phases of the calculation, the single tile and edge resolution phases are run. Parameters ----------- index : int/slice (optional) Default None - processes all tiles in a directory. See :py:func:`process_twi` for additional options.
377,362
def models_to_table(obj, params=True): r if not hasattr(obj, ): raise Exception() row = + *4 + + *22 + + *18 + + *26 + fmt = lines = [] lines.append(row) lines.append(fmt.format(, , , , , , , , )) lines.append(row.replace(, )) for i, item in enumerate(obj.models.keys()): prop = item if len(prop) > 20: prop = item[:17] + "..." temp = obj.models[item].copy() model = str(temp.pop()).split()[1] lines.append(fmt.format(, str(i+1), , prop, , , , model, )) lines.append(row) if params: for param in temp.keys(): p1 = param if len(p1) > 16: p1 = p1[:14] + p2 = str(temp[param]) if len(p2) > 24: p2 = p2[:21] + lines.append(fmt.format(, , , , , p1, , p2, )) lines.append(row) return .join(lines)
r""" Converts a ModelsDict object to a ReST compatible table Parameters ---------- obj : OpenPNM object Any object that has a ``models`` attribute params : boolean Indicates whether or not to include a list of parameter values in the table. Set to False for just a list of models, and True for a more verbose table with all parameter values.
377,363
def van_image_enc_2d(x, first_depth, reuse=False, hparams=None): with tf.variable_scope(, reuse=reuse): enc_history = [x] enc = tf.layers.conv2d( x, first_depth, 3, padding=, activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d( enc, first_depth, 3, padding=, activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], ) enc = tf.nn.dropout(enc, hparams.van_keep_prob) enc = tf.contrib.layers.layer_norm(enc) enc_history.append(enc) enc = tf.layers.conv2d( enc, first_depth * 2, 3, padding=, activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 2, 3, padding=, activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], ) enc = tf.nn.dropout(enc, hparams.van_keep_prob) enc = tf.contrib.layers.layer_norm(enc) enc_history.append(enc) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding=, activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding=, activation=tf.nn.relu, strides=1) enc = tf.layers.conv2d( enc, first_depth * 4, 3, padding=, activation=tf.nn.relu, strides=1) enc = tf.nn.max_pool(enc, [1, 2, 2, 1], [1, 2, 2, 1], ) return enc, enc_history
The image encoder for the VAN. Similar architecture as Ruben's paper (http://proceedings.mlr.press/v70/villegas17a/villegas17a.pdf). Args: x: The image to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. hparams: The python hparams. Returns: The encoded image.
377,364
def adjust_frame(self, pos, absolute_pos): if not self.curframe: Mmsg.errmsg(self, "No stack.") return if absolute_pos: if pos >= 0: pos = len(self.stack)-pos-1 else: pos = -pos-1 else: pos += self.curindex if pos < 0: Mmsg.errmsg(self, "Adjusting would put us beyond the oldest frame.") return elif pos >= len(self.stack): Mmsg.errmsg(self, "Adjusting would put us beyond the newest frame.") return self.curindex = pos self.curframe = self.stack[self.curindex][0] self.print_location() self.list_lineno = None return
Adjust stack frame by pos positions. If absolute_pos then pos is an absolute number. Otherwise it is a relative number. A negative number indexes from the other end.
377,365
def _lookup_online(word): URL = "https://www.diki.pl/{word}" HEADERS = { "User-Agent": ( "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; " "Trident/7.0; rv:11.0) like Gecko" ) } logger.debug("Looking up online: %s", word) quoted_word = urllib.parse.quote(word) req = urllib.request.Request(URL.format(word=quoted_word), headers=HEADERS) with urllib.request.urlopen(req) as response: html_string = response.read().decode() return html.unescape(html_string)
Look up word on diki.pl. Parameters ---------- word : str Word too look up. Returns ------- str website HTML content.
377,366
def isclose(a, b, rel_tol=1e-9, abs_tol=0.0): t been unit tested, but has been tested. manual unit testing: from math import isclose as isclose2 from random import uniform for i in range(10000000): a = uniform(-1, 1) b = uniform(-1, 1) rel_tol = uniform(0, 1) abs_tol = uniform(0, .001) ans1 = isclose(a, b, rel_tol, abs_tol) ans2 = isclose2(a, b, rel_tol=rel_tol, abs_tol=abs_tol) try: assert ans1 == ans2 except: print(a, b, rel_tol, abs_tol) Negative tolerances') if ((a.real == b.real) and (a.imag == b.imag)): return True if (isinf(a.real) or isinf(a.imag) or isinf(b.real) or isinf(b.imag)): return False diff = abs(a - b) return (((diff <= rel_tol*abs(b)) or (diff <= rel_tol*abs(a))) or (diff <= abs_tol))
Pure python and therefore slow version of the standard library isclose. Works on older versions of python though! Hasn't been unit tested, but has been tested. manual unit testing: from math import isclose as isclose2 from random import uniform for i in range(10000000): a = uniform(-1, 1) b = uniform(-1, 1) rel_tol = uniform(0, 1) abs_tol = uniform(0, .001) ans1 = isclose(a, b, rel_tol, abs_tol) ans2 = isclose2(a, b, rel_tol=rel_tol, abs_tol=abs_tol) try: assert ans1 == ans2 except: print(a, b, rel_tol, abs_tol)
377,367
def create_runtime(self, env, runtime_context ): any_path_okay = self.builder.get_requirement("DockerRequirement")[1] \ or False runtime = [u"singularity", u"--quiet", u"exec", u"--contain", u"--pid", u"--ipc"] if _singularity_supports_userns(): runtime.append(u"--userns") runtime.append(u"--bind") runtime.append(u"{}:{}:rw".format( docker_windows_path_adjust(os.path.realpath(self.outdir)), self.builder.outdir)) runtime.append(u"--bind") tmpdir = "/tmp" runtime.append(u"{}:{}:rw".format( docker_windows_path_adjust(os.path.realpath(self.tmpdir)), tmpdir)) self.add_volumes(self.pathmapper, runtime, any_path_okay=True, secret_store=runtime_context.secret_store, tmpdir_prefix=runtime_context.tmpdir_prefix) if self.generatemapper is not None: self.add_volumes( self.generatemapper, runtime, any_path_okay=any_path_okay, secret_store=runtime_context.secret_store, tmpdir_prefix=runtime_context.tmpdir_prefix) runtime.append(u"--pwd") runtime.append(u"%s" % (docker_windows_path_adjust(self.builder.outdir))) if runtime_context.custom_net: raise UnsupportedRequirement( "Singularity implementation does not support custom networking") elif runtime_context.disable_net: runtime.append(u"--net") env["SINGULARITYENV_TMPDIR"] = tmpdir env["SINGULARITYENV_HOME"] = self.builder.outdir for name, value in self.environment.items(): env["SINGULARITYENV_{}".format(name)] = str(value) return (runtime, None)
Returns the Singularity runtime list of commands and options.
377,368
def _iter_enum_member_values(eid, bitmask): value = idaapi.get_first_enum_member(eid, bitmask) yield value while value != DEFMASK: value = idaapi.get_next_enum_member(eid, value, bitmask) yield value
Iterate member values with given bitmask inside the enum Note that `DEFMASK` can either indicate end-of-values or a valid value. Iterate serials to tell apart.
377,369
def info(self, message, payload=None): if payload: self.log(event=message, payload=payload) else: self.log(event=message) return self
DEPRECATED
377,370
def rendered(self): expressions = {k: v for (k, v) in self.expressions.items() if v is not None} if self.refs.attr_names: expressions["ExpressionAttributeNames"] = self.refs.attr_names if self.refs.attr_values: expressions["ExpressionAttributeValues"] = self.refs.attr_values return expressions
The rendered wire format for all conditions that have been rendered. Rendered conditions are never cleared. A new :class:`~bloop.conditions.ConditionRenderer` should be used for each operation.
377,371
def windowed_run_events(da, window, dim=): d = rle(da, dim=dim) out = (d >= window).sum(dim=dim) return out
Return the number of runs of a minimum length. Parameters ---------- da: N-dimensional Xarray data array (boolean) Input data array window : int Minimum run length. dim : Xarray dimension (default = 'time') Dimension along which to calculate consecutive run Returns ------- out : N-dimensional xarray data array (int) Number of distinct runs of a minimum length.
377,372
def post_unpack_merkleblock(d, f): level_widths = [] count = d["total_transactions"] while count > 1: level_widths.append(count) count += 1 count //= 2 level_widths.append(1) level_widths.reverse() tx_acc = [] flags = d["flags"] hashes = list(reversed(d["hashes"])) left_hash, flag_index = _recurse(level_widths, 0, 0, hashes, flags, 0, tx_acc) if len(hashes) > 0: raise ValueError("extra hashes: %s" % hashes) idx, r = divmod(flag_index-1, 8) if idx != len(flags) - 1: raise ValueError("not enough flags consumed") if flags[idx] > (1 << (r+1))-1: raise ValueError("unconsumed 1 flag bits set") if left_hash != d["header"].merkle_root: raise ValueError( "merkle root %s does not match calculated hash %s" % ( b2h_rev(d["header"].merkle_root), b2h_rev(left_hash))) d["tx_hashes"] = tx_acc return d
A post-processing "post_unpack" to merkleblock messages. It validates the merkle proofs (throwing an exception if there's an error), and returns the list of transaction hashes in "tx_hashes". The transactions are supposed to be sent immediately after the merkleblock message.
377,373
def set_figure(self, figure, handle=None): self.figure = figure self.bkimage = None self._push_handle = handle wd = figure.plot_width ht = figure.plot_height self.configure_window(wd, ht) doc = curdoc() doc.add_periodic_callback(self.timer_cb, 50) self.logger.info("figure set")
Call this with the Bokeh figure object.
377,374
def get_feeds(self, project=None, feed_role=None, include_deleted_upstreams=None): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) query_parameters = {} if feed_role is not None: query_parameters[] = self._serialize.query(, feed_role, ) if include_deleted_upstreams is not None: query_parameters[] = self._serialize.query(, include_deleted_upstreams, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters) return self._deserialize(, self._unwrap_collection(response))
GetFeeds. [Preview API] Get all feeds in an account where you have the provided role access. :param str project: Project ID or project name :param str feed_role: Filter by this role, either Administrator(4), Contributor(3), or Reader(2) level permissions. :param bool include_deleted_upstreams: Include upstreams that have been deleted in the response. :rtype: [Feed]
377,375
def fix_addresses(start=None, end=None): if start in (None, idaapi.BADADDR): start = idaapi.cvar.inf.minEA if end in (None, idaapi.BADADDR): end = idaapi.cvar.inf.maxEA return start, end
Set missing addresses to start and end of IDB. Take a start and end addresses. If an address is None or `BADADDR`, return start or end addresses of the IDB instead. Args start: Start EA. Use `None` to get IDB start. end: End EA. Use `None` to get IDB end. Returns: (start, end)
377,376
def resolve_path_from_base(path_to_resolve, base_path): return os.path.abspath( os.path.join( base_path, os.path.expanduser(path_to_resolve)))
If path-to_resolve is a relative path, create an absolute path with base_path as the base. If path_to_resolve is an absolute path or a user path (~), just resolve it to an absolute path and return.
377,377
def get_list(self): field = self.FIELD_NAME query = ElasticQuery.get_agg(field=field, date_field=self.FIELD_DATE, start=self.start, end=self.end, filters=self.esfilters) logger.debug("Metric: (%s); Query: %s", self.name, self.id, query) res = self.get_metrics_data(query) list_ = {field: [], "value": []} for bucket in res[][str(ElasticQuery.AGGREGATION_ID)][]: list_[field].append(bucket[]) list_[].append(bucket[]) return list_
Extract from a DSL aggregated response the values for each bucket :return: a list with the values in a DSL aggregated response
377,378
def essays(self): for essay_name in self.dest_user.profile.essays.essay_names: setattr(self.dest_user.profile.essays, essay_name, getattr(self.source_profile.essays, essay_name))
Copy essays from the source profile to the destination profile.
377,379
def adjgraph(args): import pygraphviz as pgv from jcvi.utils.iter import pairwise from jcvi.formats.base import SetFile p = OptionParser(adjgraph.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) infile, subgraph = args subgraph = SetFile(subgraph) subgraph = set(x.strip("-") for x in subgraph) G = pgv.AGraph(strict=False) SG = pgv.AGraph(strict=False) palette = ("green", "magenta", "tomato", "peachpuff") fp = open(infile) genome_id = -1 key = 0 for row in fp: if row.strip() == "": continue atoms = row.split() tag = atoms[0] if tag in ("ChrNumber", "chr"): continue if tag == "genome": genome_id += 1 gcolor = palette[genome_id] continue nodeseq = [] for p in atoms: np = p.strip("-") nodeL, nodeR = np + "L", np + "R" if p[0] == "-": nodeseq += [nodeR, nodeL] else: nodeseq += [nodeL, nodeR] for a, b in pairwise(nodeseq): G.add_edge(a, b, key, color=gcolor) key += 1 na, nb = a[:-1], b[:-1] if na not in subgraph and nb not in subgraph: continue SG.add_edge(a, b, key, color=gcolor) G.graph_attr.update(dpi="300") fw = open("graph.dot", "w") G.write(fw) fw.close() fw = open("subgraph.dot", "w") SG.write(fw) fw.close()
%prog adjgraph adjacency.txt subgraph.txt Construct adjacency graph for graphviz. The file may look like sample below. The lines with numbers are chromosomes with gene order information. genome 0 chr 0 -1 -13 -16 3 4 -6126 -5 17 -6 7 18 5357 8 -5358 5359 -9 -10 -11 5362 5360 chr 1 138 6133 -5387 144 -6132 -139 140 141 146 -147 6134 145 -170 -142 -143
377,380
def _vpcs_path(self): search_path = self._manager.config.get_section_config("VPCS").get("vpcs_path", "vpcs") path = shutil.which(search_path) if not path: return search_path return path
Returns the VPCS executable path. :returns: path to VPCS
377,381
def show_vcs_output_vcs_nodes_vcs_node_info_node_switchname(self, **kwargs): config = ET.Element("config") show_vcs = ET.Element("show_vcs") config = show_vcs output = ET.SubElement(show_vcs, "output") vcs_nodes = ET.SubElement(output, "vcs-nodes") vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info") node_switchname = ET.SubElement(vcs_node_info, "node-switchname") node_switchname.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
377,382
def tag(**tags): transaction = execution_context.get_transaction() if not transaction: error_logger.warning("Ignored tags %s. No transaction currently active.", ", ".join(tags.keys())) else: transaction.tag(**tags)
Tags current transaction. Both key and value of the tag should be strings.
377,383
def intake_path_dirs(path): if isinstance(path, (list, tuple)): return path import re pattern = re.compile(";" if os.name == else r"(?<!:):(?![:/])") return pattern.split(path)
Return a list of directories from the intake path. If a string, perhaps taken from an environment variable, then the list of paths will be split on the character ":" for posix of ";" for windows. Protocol indicators ("protocol://") will be ignored.
377,384
def toProtocolElement(self): ret = protocol.ReferenceSet() ret.assembly_id = pb.string(self.getAssemblyId()) ret.description = pb.string(self.getDescription()) ret.id = self.getId() ret.is_derived = self.getIsDerived() ret.md5checksum = self.getMd5Checksum() if self.getSpecies(): term = protocol.fromJson( json.dumps(self.getSpecies()), protocol.OntologyTerm) ret.species.term_id = term.term_id ret.species.term = term.term ret.source_accessions.extend(self.getSourceAccessions()) ret.source_uri = pb.string(self.getSourceUri()) ret.name = self.getLocalId() self.serializeAttributes(ret) return ret
Returns the GA4GH protocol representation of this ReferenceSet.
377,385
def send_email(sender, pw, to, subject, content, files=None, service=): se = EmailSender(from_=sender, pw=pw, service=service) se.send_email(to=to, subject=subject, content=content, files=files) se.quit()
send email, recommended use 163 mailbox service, as it is tested. :param sender: str email address of sender :param pw: str password for sender :param to: str email addressee :param subject: str subject of email :param content: str content of email :param files: list path list of attachments :param service: str smtp server address, optional is ['163', 'qq'] :return: None
377,386
def order_modified_volume(self, volume_id, new_size=None, new_iops=None, new_tier_level=None): mask_items = [ , , , , , , , , ] block_mask = .join(mask_items) volume = self.get_block_volume_details(volume_id, mask=block_mask) order = storage_utils.prepare_modify_order_object( self, volume, new_iops, new_tier_level, new_size ) return self.client.call(, , order)
Places an order for modifying an existing block volume. :param volume_id: The ID of the volume to be modified :param new_size: The new size/capacity for the volume :param new_iops: The new IOPS for the volume :param new_tier_level: The new tier level for the volume :return: Returns a SoftLayer_Container_Product_Order_Receipt
377,387
def unregister_callback(callback_id): global _callbacks obj = _callbacks.pop(callback_id, None) threads = [] if obj is not None: t, quit = obj quit.set() threads.append(t) for t in threads: t.join()
unregister a callback registration
377,388
def show_image(img:Image, ax:plt.Axes=None, figsize:tuple=(3,3), hide_axis:bool=True, cmap:str=, alpha:float=None, **kwargs)->plt.Axes: "Display `Image` in notebook." if ax is None: fig,ax = plt.subplots(figsize=figsize) ax.imshow(image2np(img.data), cmap=cmap, alpha=alpha, **kwargs) if hide_axis: ax.axis() return ax
Display `Image` in notebook.
377,389
def accept(self, visitor: "BaseVisitor[ResultT]") -> ResultT: if visitor.begin_game() is not SKIP: for tagname, tagvalue in self.headers.items(): visitor.visit_header(tagname, tagvalue) if visitor.end_headers() is not SKIP: board = self.board() visitor.visit_board(board) if self.comment: visitor.visit_comment(self.comment) if self.variations: self.variations[0].accept(visitor, _parent_board=board) visitor.visit_result(self.headers.get("Result", "*")) visitor.end_game() return visitor.result()
Traverses the game in PGN order using the given *visitor*. Returns the *visitor* result.
377,390
def _scan_response(self): voltage = struct.pack("<H", int(self.voltage*256)) reading = struct.pack("<HLLL", 0xFFFF, 0, 0, 0) response = voltage + reading return response
Create scan response data.
377,391
def get_secret(key, *args, **kwargs): env_value = os.environ.get(key.replace(, ).upper()) if not env_value: return _get_secret_from_vault(key, *args, **kwargs) return env_value
Retrieves a secret.
377,392
def rentry_exists_on_disk(self, name): rentry_exists = self.entry_exists_on_disk(name) if not rentry_exists: norm_name = _my_normcase(name) for rdir in self.get_all_rdirs(): try: node = rdir.entries[norm_name] if node: rentry_exists = True break except KeyError: if rdir.entry_exists_on_disk(name): rentry_exists = True break return rentry_exists
Searches through the file/dir entries of the current *and* all its remote directories (repos), and returns True if a physical entry with the given name could be found. The local directory (self) gets searched first, so repositories take a lower precedence regarding the searching order. @see entry_exists_on_disk
377,393
def token_submit(self, token_id, json_data={}): uri = % token_id post_body = json.dumps(json_data) resp, body = self.post(uri, post_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body)
Submits a given token, along with optional data
377,394
def _get_ldflags(): if sys.platform == : prefix = getattr(sys, , sys.prefix) libs = os.path.join(prefix, str()) return str().format(libs, *sys.version_info[:2]) cc = subprocess.check_output((, , )).decode().strip() with _tmpdir() as tmpdir: testf = os.path.join(tmpdir, ) with io.open(testf, ) as f: f.write() for lflag in LFLAGS: try: subprocess.check_call((cc, testf, lflag), cwd=tmpdir) return lflag except subprocess.CalledProcessError: pass else: return LFLAG_GCC
Determine the correct link flags. This attempts dummy compiles similar to how autotools does feature detection.
377,395
def has_port_by_name(self, port_name): with self._mutex: if self.get_port_by_name(port_name): return True return False
Check if this component has a port by the given name.
377,396
def uninitialize_ui(self): LOGGER.debug("> Uninitializing Component ui.".format(self.__class__.__name__)) self.Port_spinBox.valueChanged.disconnect(self.__Port_spinBox__valueChanged) self.Autostart_TCP_Server_checkBox.stateChanged.disconnect( self.__Autostart_TCP_Server_checkBox__stateChanged) self.Start_TCP_Server_pushButton.clicked.disconnect(self.__Start_TCP_Server_pushButton__clicked) self.Stop_TCP_Server_pushButton.clicked.disconnect(self.__Stop_TCP_Server_pushButton__clicked) self.initialized_ui = False return True
Uninitializes the Component ui. :return: Method success. :rtype: bool
377,397
def is_github_task(task): return any(( task.get(, {}).get(, ).startswith(), is_github_url(task.get(, {}).get(, )), ))
Determine if a task is related to GitHub. This function currently looks into the ``schedulerId``, ``extra.tasks_for``, and ``metadata.source``. Args: task (dict): the task definition to check. Returns: bool: True if a piece of data refers to GitHub
377,398
def list_dir(self, filter_fn=None) -> : path = str(self) items = os.listdir(path) if filter_fn is not None: items = filter(filter_fn, items) return tuple(Path(path_join((path, item))) for item in items)
* the `self` Path object is assumed to be a directory :param filter_fn: a `None` object or a predicative function `str -> bool` which will be applied on the filename/directory in `self` directory. :return: a tuple of Path objects each of which represents a file/directory in `self` directory. If the filter_fn is not None, each item in return tuple whose filename/directory name doesn't match the `filter_fn` will filtered. e.g: - Dir1 - File.py - File.pyi - File.pyx Dir1.list_dir(lambda path: '.py' in path) => [<Path object of File1.py>] Dir1.list_dir(lambda path: print(path)) IO: File.py File.pyi File.pyx => []
377,399
def meanOmega(self,dangle,oned=False,tdisrupt=None,approx=True, higherorder=None): if higherorder is None: higherorder= self._higherorderTrack if tdisrupt is None: tdisrupt= self._tdisrupt if approx: num= self._meanOmega_num_approx(dangle,tdisrupt, higherorder=higherorder) else: num=\ integrate.quad(lambda T: (T/(1-T*T)\ *numpy.sqrt(self._sortedSigOEig[2])\ +self._meandO)\ *numpy.sqrt(self._sortedSigOEig[2])\ *(1+T*T)/(1-T*T)**2.\ *self.pOparapar(T/(1-T*T)\ *numpy.sqrt(self._sortedSigOEig[2])\ +self._meandO,dangle), -1.,1.)[0] denom= self._density_par(dangle,tdisrupt=tdisrupt,approx=approx, higherorder=higherorder) dO1D= num/denom if oned: return dO1D else: return self._progenitor_Omega+dO1D*self._dsigomeanProgDirection\ *self._sigMeanSign
NAME: meanOmega PURPOSE: calculate the mean frequency as a function of angle, assuming a uniform time distribution up to a maximum time INPUT: dangle - angle offset oned= (False) if True, return the 1D offset from the progenitor (along the direction of disruption) approx= (True) if True, compute the mean Omega by direct integration of the spline representation higherorder= (object-wide default higherorderTrack) if True, include higher-order spline terms in the approximate computation OUTPUT: mean Omega HISTORY: 2015-11-17 - Written - Bovy (UofT)