Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
16,900
def getfile(self, project_id, file_path, ref): data = {: file_path, : ref} request = requests.get( .format(self.projects_url, project_id), headers=self.headers, data=data, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 200: return request.json() else: return False
Allows you to receive information about file in repository like name, size, content. Note that file content is Base64 encoded. :param project_id: project_id :param file_path: Full path to file. Ex. lib/class.rb :param ref: The name of branch, tag or commit :return:
16,901
def getUserAuthorizations(self, login, user): self.send_getUserAuthorizations(login, user) return self.recv_getUserAuthorizations()
Parameters: - login - user
16,902
def execute(options): package_name = options[] source_directory = options[] if options[] is True: upstream = True else: upstream = False sub_tasks = {: options[], : options[], : options[]} if sub_tasks == {: False, : False, : False}: sub_tasks = {: True, : True, : True} credentials = create_credentials(credentials_file=options[], service_email=options[], service_key=options[]) command = SyncCommand( package_name, source_directory, upstream, credentials, **sub_tasks) command.execute()
execute the tool with given options.
16,903
def apply_grad_zmat_tensor(grad_C, construction_table, cart_dist): if (construction_table.index != cart_dist.index).any(): message = "construction_table and cart_dist must use the same index" raise ValueError(message) X_dist = cart_dist.loc[:, [, , ]].values.T C_dist = np.tensordot(grad_C, X_dist, axes=([3, 2], [0, 1])).T if C_dist.dtype == np.dtype(): C_dist = C_dist.astype() try: C_dist[:, [1, 2]] = np.rad2deg(C_dist[:, [1, 2]]) except AttributeError: C_dist[:, [1, 2]] = sympy.deg(C_dist[:, [1, 2]]) from chemcoord.internal_coordinates.zmat_class_main import Zmat cols = [, , , , , , ] dtypes = [, , , , , , ] new = pd.DataFrame(data=np.zeros((len(construction_table), 7)), index=cart_dist.index, columns=cols, dtype=) new = new.astype(dict(zip(cols, dtypes))) new.loc[:, [, , ]] = construction_table new.loc[:, ] = cart_dist.loc[:, ] new.loc[:, [, , ]] = C_dist return Zmat(new, _metadata={: cart_dist})
Apply the gradient for transformation to Zmatrix space onto cart_dist. Args: grad_C (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array. The mathematical details of the index layout is explained in :meth:`~chemcoord.Cartesian.get_grad_zmat()`. construction_table (pandas.DataFrame): Explained in :meth:`~chemcoord.Cartesian.get_construction_table()`. cart_dist (:class:`~chemcoord.Cartesian`): Distortions in cartesian space. Returns: :class:`Zmat`: Distortions in Zmatrix space.
16,904
def load_csv_data(resource_name): data_bytes = pkgutil.get_data(, .format(resource_name)) if data_bytes is None: raise ValueError("No data resource found with name {}".format(resource_name)) else: data = data_bytes.decode() reader = csv.reader(data.splitlines()) next(reader, None) return [row[0] for row in reader]
Loads first column of specified CSV file from package data.
16,905
def encode_dataset_coordinates(dataset): non_dim_coord_names = set(dataset.coords) - set(dataset.dims) return _encode_coordinates(dataset._variables, dataset.attrs, non_dim_coord_names=non_dim_coord_names)
Encode coordinates on the given dataset object into variable specific and global attributes. When possible, this is done according to CF conventions. Parameters ---------- dataset : Dataset Object to encode. Returns ------- variables : dict attrs : dict
16,906
def partition_read( self, session, table, key_set, transaction=None, index=None, columns=None, partition_options=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "partition_read" not in self._inner_api_calls: self._inner_api_calls[ "partition_read" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.partition_read, default_retry=self._method_configs["PartitionRead"].retry, default_timeout=self._method_configs["PartitionRead"].timeout, client_info=self._client_info, ) request = spanner_pb2.PartitionReadRequest( session=session, table=table, key_set=key_set, transaction=transaction, index=index, columns=columns, partition_options=partition_options, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("session", session)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["partition_read"]( request, retry=retry, timeout=timeout, metadata=metadata )
Creates a set of partition tokens that can be used to execute a read operation in parallel. Each of the returned partition tokens can be used by ``StreamingRead`` to specify a subset of the read result to read. The same session and read-only transaction must be used by the PartitionReadRequest used to create the partition tokens and the ReadRequests that use the partition tokens. There are no ordering guarantees on rows returned among the returned partition tokens, or even within each individual StreamingRead call issued with a partition\_token. Partition tokens become invalid when the session used to create them is deleted, is idle for too long, begins a new transaction, or becomes too old. When any of these happen, it is not possible to resume the read, and the whole operation must be restarted from the beginning. Example: >>> from google.cloud import spanner_v1 >>> >>> client = spanner_v1.SpannerClient() >>> >>> session = client.session_path('[PROJECT]', '[INSTANCE]', '[DATABASE]', '[SESSION]') >>> >>> # TODO: Initialize `table`: >>> table = '' >>> >>> # TODO: Initialize `key_set`: >>> key_set = {} >>> >>> response = client.partition_read(session, table, key_set) Args: session (str): Required. The session used to create the partitions. table (str): Required. The name of the table in the database to be read. key_set (Union[dict, ~google.cloud.spanner_v1.types.KeySet]): Required. ``key_set`` identifies the rows to be yielded. ``key_set`` names the primary keys of the rows in ``table`` to be yielded, unless ``index`` is present. If ``index`` is present, then ``key_set`` instead names index keys in ``index``. It is not an error for the ``key_set`` to name rows that do not exist in the database. Read yields nothing for nonexistent rows. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.KeySet` transaction (Union[dict, ~google.cloud.spanner_v1.types.TransactionSelector]): Read only snapshot transactions are supported, read/write and single use transactions are not. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.TransactionSelector` index (str): If non-empty, the name of an index on ``table``. This index is used instead of the table primary key when interpreting ``key_set`` and sorting result rows. See ``key_set`` for further information. columns (list[str]): The columns of ``table`` to be returned for each row matching this request. partition_options (Union[dict, ~google.cloud.spanner_v1.types.PartitionOptions]): Additional options that affect how many partitions are created. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.PartitionOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.spanner_v1.types.PartitionResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
16,907
def write(self): writer = csv.writer(self.file) for f, b in zip(self.gb.result["forward"], self.gb.result["backward"]): f = f._asdict() b = b._asdict() if not self.check_same(f, b): raise AssertionError() args_info = ", ".join(["{}: {}".format(k, v) for k, v in f["args_info"]]) out = [f["parameter_scope"], f["function_name"], f["inputs_shape"], args_info, f["mean_time"], b["mean_time"], f["n_run"], b["n_run"]] writer.writerow(out) writer.writerow([]) writer.writerow(["forward all", self.gb.result["forward_all"]]) writer.writerow( ["forward_all_n_run", self.gb.result["n_run_forward_all"]]) writer.writerow([]) writer.writerow(["backward all", self.gb.result["backward_all"]]) writer.writerow( ["backward_all_n_run", self.gb.result["n_run_backward_all"]]) if set(self.gb.result.keys()) >= {"training", "n_run_training"}: writer.writerow([]) writer.writerow( ["training(forward + backward + update)", self.gb.result["training"]]) writer.writerow( ["training_n_run", self.gb.result["n_run_training"]])
Write result to the file. The output file is specified by ``file``.
16,908
def get_knownGene_hg19(self): if self._knownGene_hg19 is None: self._knownGene_hg19 = self._load_knownGene(self._get_path_knownGene_hg19()) return self._knownGene_hg19
Get UCSC knownGene table for Build 37. Returns ------- pandas.DataFrame knownGene table if loading was successful, else None
16,909
def Stichlmair_dry(Vg, rhog, mug, voidage, specific_area, C1, C2, C3, H=1.): r dp = 6*(1-voidage)/specific_area Re = Vg*rhog*dp/mug f0 = C1/Re + C2/Re**0.5 + C3 return 3/4.*f0*(1-voidage)/voidage**4.65*rhog*H/dp*Vg**2
r'''Calculates dry pressure drop across a packed column, using the Stichlmair [1]_ correlation. Uses three regressed constants for each type of packing, and voidage and specific area. Pressure drop is given by: .. math:: \Delta P_{dry} = \frac{3}{4} f_0 \frac{1-\epsilon}{\epsilon^{4.65}} \rho_G \frac{H}{d_p}V_g^2 .. math:: f_0 = \frac{C_1}{Re_g} + \frac{C_2}{Re_g^{0.5}} + C_3 .. math:: d_p = \frac{6(1-\epsilon)}{a} Parameters ---------- Vg : float Superficial velocity of gas, Q/A [m/s] rhog : float Density of gas [kg/m^3] mug : float Viscosity of gas [Pa*s] voidage : float Voidage of bed of packing material [] specific_area : float Specific area of the packing material [m^2/m^3] C1 : float Packing-specific constant [] C2 : float Packing-specific constant [] C3 : float Packing-specific constant [] H : float, optional Height of packing [m] Returns ------- dP_dry : float Pressure drop across dry packing [Pa] Notes ----- This model is used by most process simulation tools. If H is not provided, it defaults to 1. If Z is not provided, it defaults to 1. Examples -------- >>> Stichlmair_dry(Vg=0.4, rhog=5., mug=5E-5, voidage=0.68, ... specific_area=260., C1=32., C2=7, C3=1) 236.80904286559885 References ---------- .. [1] Stichlmair, J., J. L. Bravo, and J. R. Fair. "General Model for Prediction of Pressure Drop and Capacity of Countercurrent Gas/liquid Packed Columns." Gas Separation & Purification 3, no. 1 (March 1989): 19-28. doi:10.1016/0950-4214(89)80016-7.
16,910
def send_message(self, subject=None, text=None, markdown=None, message_dict=None): message = FiestaMessage(self.api, self, subject, text, markdown, message_dict) return message.send()
Helper function to send a message to a group
16,911
def getTraceCombosByIds(self, trace_ids, adjust): self.send_getTraceCombosByIds(trace_ids, adjust) return self.recv_getTraceCombosByIds()
Not content with just one of traces, summaries or timelines? Want it all? This is the method for you. Parameters: - trace_ids - adjust
16,912
def _make_annulus_path(patch_inner, patch_outer): import matplotlib.path as mpath path_inner = patch_inner.get_path() transform_inner = patch_inner.get_transform() path_inner = transform_inner.transform_path(path_inner) path_outer = patch_outer.get_path() transform_outer = patch_outer.get_transform() path_outer = transform_outer.transform_path(path_outer) verts_inner = path_inner.vertices[:-1][::-1] verts_inner = np.concatenate((verts_inner, [verts_inner[-1]])) verts = np.vstack((path_outer.vertices, verts_inner)) codes = np.hstack((path_outer.codes, path_inner.codes)) return mpath.Path(verts, codes)
Defines a matplotlib annulus path from two patches. This preserves the cubic Bezier curves (CURVE4) of the aperture paths. # This is borrowed from photutils aperture.
16,913
def cli(env, package_keyname, keyword, category): table = formatting.Table(COLUMNS) manager = ordering.OrderingManager(env.client) _filter = {: {}} if keyword: _filter[][] = {: % keyword} if category: _filter[][] = {: {: % category}} items = manager.list_items(package_keyname, filter=_filter) sorted_items = sort_items(items) categories = sorted_items.keys() for catname in sorted(categories): for item in sorted_items[catname]: table.add_row([catname, item[], item[], get_price(item)]) env.fout(table)
List package items used for ordering. The item keyNames listed can be used with `slcli order place` to specify the items that are being ordered in the package. .. Note:: Items with a numbered category, like disk0 or gpu0, can be included multiple times in an order to match how many of the item you want to order. :: # List all items in the VSI package slcli order item-list CLOUD_SERVER # List Ubuntu OSes from the os category of the Bare Metal package slcli order item-list BARE_METAL_SERVER --category os --keyword ubuntu
16,914
def absent(name, user, enc=, comment=, source=, options=None, config=, fingerprint_hash_type=None): s home directory, defaults to ".ssh/authorized_keys". Token expansion %u and %h for username and home path supported. fingerprint_hash_type The public key fingerprint hash type that the public key fingerprint was originally hashed with. This defaults to ``sha256`` if not specified. .. versionadded:: 2016.11.7 namechangesresultcommenttestresultcommentcp.get_file_str^(ssh\-|ecds).*\ncommentssh.rm_auth_key_from_file commentssh.rm_auth_key^(.*?)\s?((?:ssh\-|ecds)[\w-]+\s.+)$,commentssh.rm_auth_keycommentUser authorized keys file not presentresultcommentKey removedchangesRemoved' return ret
Verifies that the specified SSH key is absent name The SSH key to manage user The user who owns the SSH authorized keys file to modify enc Defines what type of key is being used; can be ed25519, ecdsa, ssh-rsa or ssh-dss comment The comment to be placed with the SSH public key options The options passed to the key, pass a list object source The source file for the key(s). Can contain any number of public keys, in standard "authorized_keys" format. If this is set, comment, enc and options will be ignored. .. versionadded:: 2015.8.0 config The location of the authorized keys file relative to the user's home directory, defaults to ".ssh/authorized_keys". Token expansion %u and %h for username and home path supported. fingerprint_hash_type The public key fingerprint hash type that the public key fingerprint was originally hashed with. This defaults to ``sha256`` if not specified. .. versionadded:: 2016.11.7
16,915
def load_dic28(): dataset_path = _load() X = _load_csv(dataset_path, ) y = X.pop().values graph1 = nx.Graph(nx.read_gml(os.path.join(dataset_path, ))) graph2 = nx.Graph(nx.read_gml(os.path.join(dataset_path, ))) graph = graph1.copy() graph.add_nodes_from(graph2.nodes(data=True)) graph.add_edges_from(graph2.edges) graph.add_edges_from(X[[, ]].values) graphs = { : graph1, : graph2, } return Dataset(load_dic28.__doc__, X, y, accuracy_score, stratify=True, graph=graph, graphs=graphs)
DIC28 Dataset from Pajek. This network represents connections among English words in a dictionary. It was generated from Knuth's dictionary. Two words are connected by an edge if we can reach one from the other by - changing a single character (e. g., work - word) - adding / removing a single character (e. g., ever - fever). There exist 52,652 words (vertices in a network) having 2 up to 8 characters in the dictionary. The obtained network has 89038 edges.
16,916
def mouse(table, day=None): where = (("day", day),) if day else () events = db.fetch(table, where=where, order="day") for e in events: e["dt"] = datetime.datetime.fromtimestamp(e["stamp"]) stats, positions, events = stats_mouse(events, table) days, input = db.fetch("counts", order="day", type=table), "mouse" return bottle.template("heatmap.tpl", locals(), conf=conf)
Handler for showing mouse statistics for specified type and day.
16,917
def set_domain_workgroup(workgroup): minion-id if six.PY2: workgroup = _to_unicode(workgroup) with salt.utils.winapi.Com(): conn = wmi.WMI() comp = conn.Win32_ComputerSystem()[0] res = comp.JoinDomainOrWorkgroup(Name=workgroup.upper()) return True if not res[0] else False
Set the domain or workgroup the computer belongs to. .. versionadded:: 2019.2.0 Returns: bool: ``True`` if successful, otherwise ``False`` CLI Example: .. code-block:: bash salt 'minion-id' system.set_domain_workgroup LOCAL
16,918
def values(self): "Returns all values this object can return via keys." return tuple(set(self.new.values()).union(self.old.values()))
Returns all values this object can return via keys.
16,919
def get_task_instances(self, state=None, session=None): from airflow.models.taskinstance import TaskInstance tis = session.query(TaskInstance).filter( TaskInstance.dag_id == self.dag_id, TaskInstance.execution_date == self.execution_date, ) if state: if isinstance(state, six.string_types): tis = tis.filter(TaskInstance.state == state) else: if None in state: tis = tis.filter( or_(TaskInstance.state.in_(state), TaskInstance.state.is_(None)) ) else: tis = tis.filter(TaskInstance.state.in_(state)) if self.dag and self.dag.partial: tis = tis.filter(TaskInstance.task_id.in_(self.dag.task_ids)) return tis.all()
Returns the task instances for this dag run
16,920
def write_bel_namespace(self, file: TextIO, use_names: bool = False) -> None: if not self.is_populated(): self.populate() if use_names and not self.has_names: raise ValueError values = ( self._get_namespace_name_to_encoding(desc=) if use_names else self._get_namespace_identifier_to_encoding(desc=) ) write_namespace( namespace_name=self._get_namespace_name(), namespace_keyword=self._get_namespace_keyword(), namespace_query_url=self.identifiers_url, values=values, file=file, )
Write as a BEL namespace file.
16,921
def _clear_screen(): if platform.system() == "Windows": tmp = os.system() else: tmp = os.system() return True
http://stackoverflow.com/questions/18937058/python-clear-screen-in-shell
16,922
def create_prefetch(self, addresses): with self._lock: for add in addresses: self._state[add] = _ContextFuture(address=add, wait_for_tree=True)
Create futures needed before starting the process of reading the address's value from the merkle tree. Args: addresses (list of str): addresses in the txn's inputs that aren't in any base context (or any in the chain).
16,923
def get_float_relative(strings: Sequence[str], prefix1: str, delta: int, prefix2: str, ignoreleadingcolon: bool = False) -> Optional[float]: return get_float_raw(get_string_relative( strings, prefix1, delta, prefix2, ignoreleadingcolon=ignoreleadingcolon))
Fetches a float parameter via :func:`get_string_relative`.
16,924
async def send_message(self, message, *, end=False): if not self._send_request_done: await self.send_request() if end and self._end_done: raise ProtocolError() with self._wrapper: message, = await self._dispatch.send_message(message) await send_message(self._stream, self._codec, message, self._send_type, end=end) self._send_message_count += 1 if end: self._end_done = True
Coroutine to send message to the server. If client sends UNARY request, then you should call this coroutine only once. If client sends STREAM request, then you can call this coroutine as many times as you need. .. warning:: It is important to finally end stream from the client-side when you finished sending messages. You can do this in two ways: - specify ``end=True`` argument while sending last message - and last DATA frame will include END_STREAM flag; - call :py:meth:`end` coroutine after sending last message - and extra HEADERS frame with END_STREAM flag will be sent. First approach is preferred, because it doesn't require sending additional HTTP/2 frame.
16,925
def get_candidate_config(self, merge=False, formal=False): command = "show configuration" if merge: command += " merge" if formal: command += " formal" response = self._execute_config_show(command) match = re.search(".*(!! IOS XR Configuration.*)$", response, re.DOTALL) if match is not None: response = match.group(1) return response
Retrieve the configuration loaded as candidate config in your configuration session. :param merge: Merge candidate config with running config to return the complete configuration including all changed :param formal: Return configuration in IOS-XR formal config format
16,926
def assert_raises_errno(exception, errno, msg_fmt="{msg}"): def check_errno(exc): if errno != exc.errno: msg = "wrong errno: {!r} != {!r}".format(errno, exc.errno) fail( msg_fmt.format( msg=msg, exc_type=exception, exc_name=exception.__name__, expected_errno=errno, actual_errno=exc.errno, ) ) context = AssertRaisesErrnoContext(exception, errno, msg_fmt) context.add_test(check_errno) return context
Fail unless an exception with a specific errno is raised with the context. >>> with assert_raises_errno(OSError, 42): ... raise OSError(42, "OS Error") ... >>> with assert_raises_errno(OSError, 44): ... raise OSError(17, "OS Error") ... Traceback (most recent call last): ... AssertionError: wrong errno: 44 != 17 The following msg_fmt arguments are supported: * msg - the default error message * exc_type - exception type that is expected * exc_name - expected exception type name * expected_errno - * actual_errno - raised errno or None if no matching exception was raised
16,927
def typevalue(self, key, value): def listconvert(value): try: return ast.literal_eval(value) except (SyntaxError, ValueError): if "," in value: return [x.strip() for x in value.split(",")] else: return value default = self.get(key) if inspect.isclass(default): t = default else: t = type(default) if t == bool: t = LayeredConfig.boolconvert elif t == list: t = listconvert elif t == date: t = LayeredConfig.dateconvert elif t == datetime: t = LayeredConfig.datetimeconvert return t(value)
Given a parameter identified by ``key`` and an untyped string, convert that string to the type that our version of key has.
16,928
def inform(self, reading): try: self._inform_callback(self._sensor, reading) except Exception: log.exception( .format(reading, self._sensor.name, self._sensor.type))
Inform strategy creator of the sensor status.
16,929
def files_read(self, path, offset=0, count=None, **kwargs): opts = {"offset": offset} if count is not None: opts["count"] = count kwargs.setdefault("opts", opts) args = (path,) return self._client.request(, args, **kwargs)
Reads a file stored in the MFS. .. code-block:: python >>> c.files_read("/bla/file") b'hi' Parameters ---------- path : str Filepath within the MFS offset : int Byte offset at which to begin reading at count : int Maximum number of bytes to read Returns ------- str : MFS file contents
16,930
def compare(testsuite, gold, select=): from delphin.mrs import simplemrs, compare as mrs_compare if not isinstance(testsuite, itsdb.TestSuite): if isinstance(testsuite, itsdb.ItsdbProfile): testsuite = testsuite.root testsuite = itsdb.TestSuite(testsuite) if not isinstance(gold, itsdb.TestSuite): if isinstance(gold, itsdb.ItsdbProfile): gold = gold.root gold = itsdb.TestSuite(gold) queryobj = tsql.inspect_query( + select) if len(queryobj[]) != 3: raise ValueError( + select) input_select = .format(queryobj[][0], queryobj[][1]) i_inputs = dict(tsql.select(input_select, testsuite)) matched_rows = itsdb.match_rows( tsql.select(select, testsuite), tsql.select(select, gold), 0) for (key, testrows, goldrows) in matched_rows: (test_unique, shared, gold_unique) = mrs_compare.compare_bags( [simplemrs.loads_one(row[2]) for row in testrows], [simplemrs.loads_one(row[2]) for row in goldrows]) yield {: key, : i_inputs[key], : test_unique, : shared, : gold_unique}
Compare two [incr tsdb()] profiles. Args: testsuite (str, TestSuite): path to the test [incr tsdb()] testsuite or a :class:`TestSuite` object gold (str, TestSuite): path to the gold [incr tsdb()] testsuite or a :class:`TestSuite` object select: TSQL query to select (id, input, mrs) triples (default: `i-id i-input mrs`) Yields: dict: Comparison results as:: {"id": "item identifier", "input": "input sentence", "test": number_of_unique_results_in_test, "shared": number_of_shared_results, "gold": number_of_unique_results_in_gold}
16,931
def find_path(name, path=None, exact=False): path = os.environ.get(, os.defpath) if path is None else path dpaths = path.split(os.pathsep) if isinstance(path, six.string_types) else path candidates = (join(dpath, name) for dpath in dpaths) if exact: if WIN32: pathext = [] + os.environ.get(, ).split(os.pathsep) candidates = (p + ext for p in candidates for ext in pathext) candidates = filter(exists, candidates) else: import glob candidates = it.chain.from_iterable( glob.glob(pattern) for pattern in candidates) return candidates
Search for a file or directory on your local filesystem by name (file must be in a directory specified in a PATH environment variable) Args: fname (PathLike or str): file name to match. If exact is False this may be a glob pattern path (str or Iterable[PathLike]): list of directories to search either specified as an os.pathsep separated string or a list of directories. Defaults to environment PATH. exact (bool): if True, only returns exact matches. Default False. Notes: For recursive behavior set `path=(d for d, _, _ in os.walk('.'))`, where '.' might be replaced by the root directory of interest. Example: >>> list(find_path('ping', exact=True)) >>> list(find_path('bin')) >>> list(find_path('bin')) >>> list(find_path('*cc*')) >>> list(find_path('cmake*')) Example: >>> import ubelt as ub >>> from os.path import dirname >>> path = dirname(dirname(ub.util_platform.__file__)) >>> res = sorted(find_path('ubelt/util_*.py', path=path)) >>> assert len(res) >= 10 >>> res = sorted(find_path('ubelt/util_platform.py', path=path, exact=True)) >>> print(res) >>> assert len(res) == 1
16,932
def lrange(self, key, start, stop): redis_list = self._get_list(key, ) start, stop = self._translate_range(len(redis_list), start, stop) return redis_list[start:stop + 1]
Emulate lrange.
16,933
def make_exttrig_file(cp, ifos, sci_seg, out_dir): xmldoc = ligolw.Document() xmldoc.appendChild(ligolw.LIGO_LW()) tbl = lsctables.New(lsctables.ExtTriggersTable) cols = tbl.validcolumns xmldoc.childNodes[-1].appendChild(tbl) row = tbl.appendRow() setattr(row, "event_ra", float(cp.get("workflow", "ra"))) setattr(row, "event_dec", float(cp.get("workflow", "dec"))) setattr(row, "start_time", int(cp.get("workflow", "trigger-time"))) setattr(row, "event_number_grb", str(cp.get("workflow", "trigger-name"))) for entry in cols.keys(): if not hasattr(row, entry): if cols[entry] in [,]: setattr(row,entry,0.) elif cols[entry] == : setattr(row,entry,0) elif cols[entry] == : setattr(row,entry,) elif entry == : row.process_id = ilwd.ilwdchar("external_trigger:process_id:0") elif entry == : row.event_id = ilwd.ilwdchar("external_trigger:event_id:0") else: print("Column %s not recognized" %(entry), file=sys.stderr) raise ValueError xml_file_name = "triggerGRB%s.xml" % str(cp.get("workflow", "trigger-name")) xml_file_path = os.path.join(out_dir, xml_file_name) utils.write_filename(xmldoc, xml_file_path) xml_file_url = urlparse.urljoin("file:", urllib.pathname2url(xml_file_path)) xml_file = File(ifos, xml_file_name, sci_seg, file_url=xml_file_url) xml_file.PFN(xml_file_url, site="local") return xml_file
Make an ExtTrig xml file containing information on the external trigger Parameters ---------- cp : pycbc.workflow.configuration.WorkflowConfigParser object The parsed configuration options of a pycbc.workflow.core.Workflow. ifos : str String containing the analysis interferometer IDs. sci_seg : ligo.segments.segment The science segment for the analysis run. out_dir : str The output directory, destination for xml file. Returns ------- xml_file : pycbc.workflow.File object The xml file with external trigger information.
16,934
def to_google(self, type, label, issuer, counter=None): warnings.warn(, DeprecationWarning) return self.to_uri(type, label, issuer, counter)
Generate the otpauth protocal string for Google Authenticator. .. deprecated:: 0.2.0 Use :func:`to_uri` instead.
16,935
def get_bestnr(self, index=4.0, nhigh=3.0, null_snr_threshold=4.25,\ null_grad_thresh=20., null_grad_val = 1./5.): bestnr = self.get_new_snr(index=index, nhigh=nhigh, column="chisq") if len(self.get_ifos()) < 3: return bestnr if self.snr > null_grad_thresh: null_snr_threshold += (self.snr - null_grad_thresh) * null_grad_val if self.get_null_snr() > null_snr_threshold: bestnr /= 1 + self.get_null_snr() - null_snr_threshold return bestnr
Return the BestNR statistic for this row.
16,936
def as_bel(self) -> str: return .format( str(self[IDENTIFIER]), .join(.format(self[x]) for x in PMOD_ORDER[2:] if x in self) )
Return this protein modification variant as a BEL string.
16,937
def numSegments(self, cell=None): if cell is not None: return len(self._cells[cell]._segments) return self._nextFlatIdx - len(self._freeFlatIdxs)
Returns the number of segments. :param cell: (int) Optional parameter to get the number of segments on a cell. :returns: (int) Number of segments on all cells if cell is not specified, or on a specific specified cell
16,938
def patch_namespaced_custom_object(self, group, version, namespace, plural, name, body, **kwargs): kwargs[] = True if kwargs.get(): return self.patch_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs) else: (data) = self.patch_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, body, **kwargs) return data
patch the specified namespace scoped custom object This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.patch_namespaced_custom_object(group, version, namespace, plural, name, body, async_req=True) >>> result = thread.get() :param async_req bool :param str group: the custom resource's group (required) :param str version: the custom resource's version (required) :param str namespace: The custom resource's namespace (required) :param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required) :param str name: the custom object's name (required) :param object body: The JSON schema of the Resource to patch. (required) :return: object If the method is called asynchronously, returns the request thread.
16,939
def _delete_reminders_from_list( self, listName): self.log.info() applescript = % locals() cmd = "\n".join(["osascript << EOT", applescript, "EOT"]) p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() if len(stderr): self.log.error(stderr) sys.exit(0) self.log.info() return None
* delete reminders from list* **Key Arguments:** - ``listName`` -- the name of the reminders list
16,940
def get_image(self, component_info=None, data=None, component_position=None): components = [] append_components = components.append for _ in range(component_info.image_count): component_position, image_info = QRTPacket._get_exact( RTImage, data, component_position ) append_components((image_info, data[component_position:-1])) return components
Get image.
16,941
def render(self, rect, data): size = self.get_minimum_size(data) extra_width = rect.w - size.x extra_height = rect.h - size.y if self.scaling_col is None or not 0 <= self.scaling_col < self.cols: width_per_col = extra_width / float(self.cols) col_widths = [ width + width_per_col for width in self.col_widths ] else: col_widths = self.col_widths[:] col_widths[self.scaling_col] += extra_width if self.scaling_row is None or not 0 <= self.scaling_row < self.rows: height_per_row = extra_height / float(self.rows) row_heights = [ height + height_per_row for height in self.row_heights ] else: row_heights = self.row_heights[:] row_heights[self.scaling_row] += extra_height col_xs = [] last_x = rect.left + self.outside_margin for width in col_widths: col_xs.append((last_x, last_x + width)) last_x += width + self.margin row_ys = [] last_y = rect.top - self.outside_margin for height in row_heights: row_ys.append((last_y, last_y - height)) last_y -= height + self.margin for col, row, cols, rows, element in self.elements: x_start = col_xs[col][0] y_start = row_ys[row][0] x_end = col_xs[col+cols-1][1] y_end = row_ys[row+rows-1][1] element.render(datatypes.Rectangle( x_start, y_end, x_end-x_start, y_start-y_end ), data) def _get_value(array, index, sign): if index <= 0: return array[0][0]-self.outside_margin*sign elif index >= len(array): return array[-1][1]+self.outside_margin*sign else: return (array[index-1][1] + array[index][0])*0.5 for start_col, start_row, end_col, end_row, width, color in self.rules: x_start = _get_value(col_xs, start_col, 1) y_start = _get_value(row_ys, start_row, -1) x_end = _get_value(col_xs, end_col, 1) y_end = _get_value(row_ys, end_row, -1) data[].line( x_start, y_start, x_end, y_end, stroke=color, stroke_width=width )
Draws the cells in grid.
16,942
def password_option(*param_decls, **attrs): def decorator(f): attrs.setdefault(, True) attrs.setdefault(, True) attrs.setdefault(, True) return option(*(param_decls or (,)), **attrs)(f) return decorator
Shortcut for password prompts. This is equivalent to decorating a function with :func:`option` with the following parameters:: @click.command() @click.option('--password', prompt=True, confirmation_prompt=True, hide_input=True) def changeadmin(password): pass
16,943
def _zip_files(files, root): zip_data = StringIO() with ZipFile(zip_data, , ZIP_DEFLATED) as zip_file: for fname in files: zip_file.write(os.path.join(root, fname), fname) for zip_entry in zip_file.filelist: perms = (zip_entry.external_attr & ZIP_PERMS_MASK) >> 16 if perms & stat.S_IXUSR != 0: new_perms = 0o755 else: new_perms = 0o644 if new_perms != perms: logger.debug("lambda: fixing perms: %s: %o => %o", zip_entry.filename, perms, new_perms) new_attr = ((zip_entry.external_attr & ~ZIP_PERMS_MASK) | (new_perms << 16)) zip_entry.external_attr = new_attr contents = zip_data.getvalue() zip_data.close() content_hash = _calculate_hash(files, root) return contents, content_hash
Generates a ZIP file in-memory from a list of files. Files will be stored in the archive with relative names, and have their UNIX permissions forced to 755 or 644 (depending on whether they are user-executable in the source filesystem). Args: files (list[str]): file names to add to the archive, relative to ``root``. root (str): base directory to retrieve files from. Returns: str: content of the ZIP file as a byte string. str: A calculated hash of all the files.
16,944
def key_binding(self, keydef, mode=): def register(fun): fun.mpv_key_bindings = getattr(fun, , []) + [keydef] def unregister_all(): for keydef in fun.mpv_key_bindings: self.unregister_key_binding(keydef) fun.unregister_mpv_key_bindings = unregister_all self.register_key_binding(keydef, fun, mode) return fun return register
Function decorator to register a low-level key binding. The callback function signature is ``fun(key_state, key_name)`` where ``key_state`` is either ``'U'`` for "key up" or ``'D'`` for "key down". The keydef format is: ``[Shift+][Ctrl+][Alt+][Meta+]<key>`` where ``<key>`` is either the literal character the key produces (ASCII or Unicode character), or a symbolic name (as printed by ``mpv --input-keylist``). To unregister the callback function, you can call its ``unregister_mpv_key_bindings`` attribute:: player = mpv.MPV() @player.key_binding('Q') def binding(state, name): print('blep') binding.unregister_mpv_key_bindings() WARNING: For a single keydef only a single callback/command can be registered at the same time. If you register a binding multiple times older bindings will be overwritten and there is a possibility of references leaking. So don't do that. BIG FAT WARNING: mpv's key binding mechanism is pretty powerful. This means, you essentially get arbitrary code exectution through key bindings. This interface makes some limited effort to sanitize the keydef given in the first parameter, but YOU SHOULD NOT RELY ON THIS IN FOR SECURITY. If your input comes from config files, this is completely fine--but, if you are about to pass untrusted input into this parameter, better double-check whether this is secure in your case.
16,945
def send(self, message, *args, **kwargs): self._messages.put((message, args, kwargs), False)
Sends provided message to all listeners. Message is only added to queue and will be processed on next tick. :param Message message: Message to send.
16,946
def get_resource_url(self): name = self.__class__.resource_name url = self.__class__.rest_base_url() return "%s/%s" % (url, name)
Get resource complete url
16,947
def start(self, listen_ip=LISTEN_IP, listen_port=0): coro = self.loop.create_datagram_endpoint( lambda: self, local_addr=(listen_ip, listen_port)) self.task = self.loop.create_task(coro) return self.task
Start discovery task.
16,948
def argmin(self): return tuple(centres[index] for centres, index in zip(self.centres(), numpy.unravel_index(self.array.argmin(), self.array.shape)))
Return the co-ordinates of the bin centre containing the minimum value. Same as numpy.argmin(), converting the indexes to bin co-ordinates.
16,949
def __fetch_crate_owner_user(self, crate_id): raw_owner_user = self.client.crate_attribute(crate_id, ) owner_user = json.loads(raw_owner_user) return owner_user
Get crate user owners
16,950
def create_from_string(self, string, context=EMPTY_CONTEXT, *args, **kwargs): if not PY2 and not isinstance(string, bytes): raise TypeError("string should be an instance of bytes in Python 3") io = StringIO(string) instance = self.create_from_stream(io, context, *args, **kwargs) io.close() return instance
Deserializes a new instance from a string. This is a convenience method that creates a StringIO object and calls create_instance_from_stream().
16,951
def execute_cql_query(self, query, compression): self._seqid += 1 d = self._reqs[self._seqid] = defer.Deferred() self.send_execute_cql_query(query, compression) return d
Executes a CQL (Cassandra Query Language) statement and returns a CqlResult containing the results. Parameters: - query - compression
16,952
def _DeserializeAttributeContainer(self, container_type, serialized_data): if not serialized_data: return None if self._serializers_profiler: self._serializers_profiler.StartTiming(container_type) try: serialized_string = serialized_data.decode() except UnicodeDecodeError as exception: raise IOError(.format( exception)) attribute_container = self._serializer.ReadSerialized(serialized_string) if self._serializers_profiler: self._serializers_profiler.StopTiming(container_type) return attribute_container
Deserializes an attribute container. Args: container_type (str): attribute container type. serialized_data (bytes): serialized attribute container data. Returns: AttributeContainer: attribute container or None. Raises: IOError: if the serialized data cannot be decoded. OSError: if the serialized data cannot be decoded.
16,953
def label_from_lists(self, train_labels:Iterator, valid_labels:Iterator, label_cls:Callable=None, **kwargs)->: "Use the labels in `train_labels` and `valid_labels` to label the data. `label_cls` will overwrite the default." label_cls = self.train.get_label_cls(train_labels, label_cls) self.train = self.train._label_list(x=self.train, y=label_cls(train_labels, **kwargs)) self.valid = self.valid._label_list(x=self.valid, y=self.train.y.new(valid_labels, **kwargs)) self.__class__ = LabelLists self.process() return self
Use the labels in `train_labels` and `valid_labels` to label the data. `label_cls` will overwrite the default.
16,954
def stack(recs, fields=None): if fields is None: fields = list(set.intersection( *[set(rec.dtype.names) for rec in recs])) if set(fields) == set(recs[0].dtype.names): fields = list(recs[0].dtype.names) return np.hstack([rec[fields] for rec in recs])
Stack common fields in multiple record arrays (concatenate them). Parameters ---------- recs : list List of NumPy record arrays fields : list of strings, optional (default=None) The list of fields to include in the stacked array. If None, then include the fields in common to all the record arrays. Returns ------- rec : NumPy record array The stacked array.
16,955
def computeISI(spikeTrains): zeroCount = 0 isi = [] cells = 0 for i in range(np.shape(spikeTrains)[0]): if cells > 0 and cells % 250 == 0: print str(cells) + " cells processed" for j in range(np.shape(spikeTrains)[1]): if spikeTrains[i][j] == 0: zeroCount += 1 elif zeroCount > 0: isi.append(zeroCount) zeroCount = 0 zeroCount = 0 cells += 1 print "**All cells processed**" return isi
Estimates the inter-spike interval from a spike train matrix. @param spikeTrains (array) matrix of spike trains @return isi (array) matrix with the inter-spike interval obtained from the spike train. Each entry in this matrix represents the number of time-steps in-between 2 spikes as the algorithm scans the spike train matrix.
16,956
def button_clicked(self, button): if button is self.idx_ok: chans = self.get_channels() group = self.one_grp cycle = self.get_cycles() stage = self.idx_stage.selectedItems() params = {k: v.get_value() for k, v in self.index.items()} name = self.name.get_value() if None in [params[], params[]]: self.parent.statusBar().showMessage( ) return if params[] is None: self.parent.statusBar().showMessage() return elif params[] >= 30: self.parent.statusBar().showMessage( ) return if stage == []: stage = None else: stage = [x.text() for x in self.idx_stage.selectedItems()] chan_full = None reject_artf = False if params[] == : chan_full = [i + + self.idx_group.currentText() + for i in chans] chans = None reject_artf = True elif params[] == : reject_artf = True data = fetch(self.parent.info.dataset, self.parent.notes.annot, cat=(1, 1, 1, 0), stage=stage, cycle=cycle, chan_full=chan_full, min_dur=params[], reject_epoch=params[], reject_artf=reject_artf) if not data.segments: msg = error_dialog = QErrorMessage(self) error_dialog.setWindowTitle() error_dialog.showMessage(msg) return ding = data.read_data(chans, group[], group[], parent=self) if not ding: self.parent.statusBar().showMessage() return data = data[0][] if params[]: low_cut = params[] high_cut = params[] data = filter_(data, axis=, low_cut=low_cut, high_cut=high_cut) if params[]: data = math(data, operator_name=, axis=) self.parent.notes.detect_events(data, self.method, params, label=name) self.accept() if button is self.idx_cancel: self.reject()
Action when button was clicked. Parameters ---------- button : instance of QPushButton which button was pressed
16,957
def telegram(): if not exists(, msg=): run() run() run() with warn_only(): run() run() else: print() run(, msg="\nCreate executable :")
Install Telegram desktop client for linux (x64). More infos: https://telegram.org https://desktop.telegram.org/
16,958
def connect_text(instance, prop, widget): def update_prop(): val = widget.text() setattr(instance, prop, val) def update_widget(val): if hasattr(widget, ): widget.blockSignals(True) widget.setText(val) widget.blockSignals(False) widget.editingFinished.emit() else: widget.setText(val) add_callback(instance, prop, update_widget) try: widget.editingFinished.connect(update_prop) except AttributeError: pass update_widget(getattr(instance, prop))
Connect a string callback property with a Qt widget containing text. Parameters ---------- instance : object The class instance that the callback property is attached to prop : str The name of the callback property widget : QtWidget The Qt widget to connect. This should implement the ``setText`` and ``text`` methods as well optionally the ``editingFinished`` signal.
16,959
def do_WhoIsRequest(self, apdu): if _debug: WhoIsIAmServices._debug("do_WhoIsRequest %r", apdu) if not self.localDevice: if _debug: WhoIsIAmServices._debug(" - no local device") return low_limit = apdu.deviceInstanceRangeLowLimit high_limit = apdu.deviceInstanceRangeHighLimit if (low_limit is not None): if (high_limit is None): raise MissingRequiredParameter("deviceInstanceRangeHighLimit required") if (low_limit < 0) or (low_limit > 4194303): raise ParameterOutOfRange("deviceInstanceRangeLowLimit out of range") if (high_limit is not None): if (low_limit is None): raise MissingRequiredParameter("deviceInstanceRangeLowLimit required") if (high_limit < 0) or (high_limit > 4194303): raise ParameterOutOfRange("deviceInstanceRangeHighLimit out of range") if (low_limit is not None): if (self.localDevice.objectIdentifier[1] < low_limit): return if (high_limit is not None): if (self.localDevice.objectIdentifier[1] > high_limit): return self.i_am(address=apdu.pduSource)
Respond to a Who-Is request.
16,960
def get_admin_url(obj, page=None): if obj is None: return None if page is None: page = "change" if page not in ADMIN_ALL_PAGES: raise ValueError("Invalid page name . Available pages are: {}.".format(page, ADMIN_ALL_PAGES)) app_label = obj.__class__._meta.app_label object_name = obj.__class__._meta.object_name.lower() if page in ADMIN_GLOBAL_PAGES: url_name = page else: url_name = "{}_{}_{}".format(app_label, object_name, page) if page == "app_list": url_args = (app_label,) elif page == "view_on_site": content_type = ContentType.objects.get_for_model(obj.__class__) url_args = (content_type, obj._get_pk_val()) elif page in ADMIN_DETAIL_PAGES: url_args = (obj._get_pk_val(),) else: url_args = None return reverse("admin:{}".format(url_name), args=url_args)
Return the URL to admin pages for this object.
16,961
async def start(self, remoteParameters): assert self._state == State.NEW assert len(remoteParameters.fingerprints) if self.transport.role == : self._role = lib.SSL_set_accept_state(self.ssl) else: self._role = lib.SSL_set_connect_state(self.ssl) self._set_state(State.CONNECTING) try: while not self.encrypted: result = lib.SSL_do_handshake(self.ssl) await self._write_ssl() if result > 0: self.encrypted = True break error = lib.SSL_get_error(self.ssl, result) if error == lib.SSL_ERROR_WANT_READ: await self._recv_next() else: self.__log_debug(, error) for info in get_error_queue(): self.__log_debug(, .join(info)) self._set_state(State.FAILED) return except ConnectionError: self.__log_debug() self._set_state(State.FAILED) return x509 = lib.SSL_get_peer_certificate(self.ssl) remote_fingerprint = certificate_digest(x509) fingerprint_is_valid = False for f in remoteParameters.fingerprints: if f.algorithm.lower() == and f.value.lower() == remote_fingerprint.lower(): fingerprint_is_valid = True break if not fingerprint_is_valid: self.__log_debug() self._set_state(State.FAILED) return buf = ffi.new(, 2 * (SRTP_KEY_LEN + SRTP_SALT_LEN)) extractor = b _openssl_assert(lib.SSL_export_keying_material( self.ssl, buf, len(buf), extractor, len(extractor), ffi.NULL, 0, 0) == 1) view = ffi.buffer(buf) if self._role == : srtp_tx_key = get_srtp_key_salt(view, 1) srtp_rx_key = get_srtp_key_salt(view, 0) else: srtp_tx_key = get_srtp_key_salt(view, 0) srtp_rx_key = get_srtp_key_salt(view, 1) rx_policy = Policy(key=srtp_rx_key, ssrc_type=Policy.SSRC_ANY_INBOUND) rx_policy.allow_repeat_tx = True rx_policy.window_size = 1024 self._rx_srtp = Session(rx_policy) tx_policy = Policy(key=srtp_tx_key, ssrc_type=Policy.SSRC_ANY_OUTBOUND) tx_policy.allow_repeat_tx = True tx_policy.window_size = 1024 self._tx_srtp = Session(tx_policy) self.__log_debug() self._set_state(State.CONNECTED) self._task = asyncio.ensure_future(self.__run())
Start DTLS transport negotiation with the parameters of the remote DTLS transport. :param: remoteParameters: An :class:`RTCDtlsParameters`.
16,962
def qteImportModule(self, fileName: str): path, name = os.path.split(fileName) name, ext = os.path.splitext(name) if path == : path = sys.path else: path = [path] try: fp, pathname, desc = imp.find_module(name, path) except ImportError: msg = .format(fileName) self.qteLogger.error(msg) return None try: mod = imp.load_module(name, fp, pathname, desc) return mod except ImportError: msg = .format(fileName) self.qteLogger.error(msg) return None finally: if fp: fp.close()
Import ``fileName`` at run-time. If ``fileName`` has no path prefix then it must be in the standard Python module path. Relative path names are possible. |Args| * ``fileName`` (**str**): file name (with full path) of module to import. |Returns| * **module**: the imported Python module, or **None** if an error occurred. |Raises| * **None**
16,963
def unindent(self): cursor = self.textCursor() if not cursor.hasSelection(): cursor.movePosition(QTextCursor.StartOfBlock) line = foundations.strings.to_string(self.document().findBlockByNumber(cursor.blockNumber()).text()) indent_marker = re.match(r"({0})".format(self.__indent_marker), line) if indent_marker: foundations.common.repeat(cursor.deleteChar, len(indent_marker.group(1))) else: block = self.document().findBlock(cursor.selectionStart()) while True: block_cursor = self.textCursor() block_cursor.setPosition(block.position()) indent_marker = re.match(r"({0})".format(self.__indent_marker), block.text()) if indent_marker: foundations.common.repeat(block_cursor.deleteChar, len(indent_marker.group(1))) if block.contains(cursor.selectionEnd()): break block = block.next() return True
Unindents the document text under cursor. :return: Method success. :rtype: bool
16,964
async def request_resource(self, type: Type[T_Resource], name: str = ) -> T_Resource: value = self.get_resource(type, name) if value is not None: return value signals = [ctx.resource_added for ctx in self.context_chain] await wait_event( signals, lambda event: event.resource_name == name and type in event.resource_types) return self.require_resource(type, name)
Look up a resource in the chain of contexts. This is like :meth:`get_resource` except that if the resource is not already available, it will wait for one to become available. :param type: type of the requested resource :param name: name of the requested resource :return: the requested resource
16,965
def _run_module_as_main(mod_name, alter_argv=True): try: if alter_argv or mod_name != "__main__": mod_name, loader, code, fname = _get_module_details(mod_name) else: mod_name, loader, code, fname = _get_main_module_details() except ImportError as exc: msg = "%s: %s" % (sys.executable, str(exc)) sys.exit(msg) pkg_name = mod_name.rpartition()[0] main_globals = sys.modules["__main__"].__dict__ if alter_argv: sys.argv[0] = fname return _run_code(code, main_globals, None, "__main__", fname, loader, pkg_name)
Runs the designated module in the __main__ namespace Note that the executed module will have full access to the __main__ namespace. If this is not desirable, the run_module() function should be used to run the module code in a fresh namespace. At the very least, these variables in __main__ will be overwritten: __name__ __file__ __loader__ __package__
16,966
def delete(path, dryrun=False, recursive=True, verbose=None, print_exists=True, ignore_errors=True): if verbose is None: verbose = VERBOSE if not QUIET: verbose = 1 if verbose > 0: print( % path) exists_flag = exists(path) link_flag = islink(path) if not exists_flag and not link_flag: if print_exists and verbose: print() flag = False else: rmargs = dict(verbose=verbose > 1, ignore_errors=ignore_errors, dryrun=dryrun) if islink(path): os.unlink(path) flag = True elif isdir(path): flag = remove_files_in_dir(path, recursive=recursive, **rmargs) flag = flag and remove_dirs(path, **rmargs) elif isfile(path): flag = remove_file(path, **rmargs) else: raise ValueError( % (path,)) if verbose > 0: print( % path) return flag
Removes a file, directory, or symlink
16,967
def retrieveVals(self): ntpinfo = NTPinfo() stats = ntpinfo.getPeerStats() if stats: if self.hasGraph(): self.setGraphVal(, , stats.get()) if self.hasGraph(): self.setGraphVal(, , stats.get()) self.setGraphVal(, , stats.get()) self.setGraphVal(, , stats.get())
Retrieve values for graphs.
16,968
def Serialize(self, val, info): self._Serialize(val, info, self.defaultNS)
Serialize an object
16,969
def is_compatible(self): for pattern in OPTIONS[]: if fnmatch(self.package.lower(), pattern): return True return False
Check if package name is matched by compatible_patterns
16,970
def SetAuth(self, style, user=None, password=None): self.auth_style, self.auth_user, self.auth_pass = \ style, user, password return self
Change auth style, return object to user.
16,971
def get_storage_conn(storage_account=None, storage_key=None, opts=None): if opts is None: opts = {} if not storage_account: storage_account = opts.get(, None) if not storage_key: storage_key = opts.get(, None) return azure.storage.BlobService(storage_account, storage_key)
.. versionadded:: 2015.8.0 Return a storage_conn object for the storage account
16,972
def absdir(path): if not os.path.isabs(path): path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(), path))) if path is None or not os.path.isdir(path): return None return path
Return absolute, normalized path to directory, if it exists; None otherwise.
16,973
def get_update_object(self, form): pk = form.cleaned_data[] queryset = self.get_queryset() try: obj = queryset.get(pk=pk) except queryset.model.DoesNotExist: obj = None return obj
Retrieves the target object based on the update form's ``pk`` and the table's queryset.
16,974
def cached_property(prop): def cache_wrapper(self): if not hasattr(self, "_cache"): self._cache = {} if prop.__name__ not in self._cache: return_value = prop(self) if isgenerator(return_value): return_value = tuple(return_value) self._cache[prop.__name__] = return_value return self._cache[prop.__name__] return property(cache_wrapper)
A replacement for the property decorator that will only compute the attribute's value on the first call and serve a cached copy from then on.
16,975
def run_cmd(cmd, out=os.path.devnull, err=os.path.devnull): logger.debug(.join(cmd)) with open(out, ) as hout: proc = subprocess.Popen(cmd, stdout=hout, stderr=subprocess.PIPE) err_msg = proc.communicate()[1].decode() with open(err, ) as herr: herr.write(str(err_msg)) msg = .format(.join(cmd), err_msg) if proc.returncode != 0: logger.error(msg) raise RuntimeError(msg)
Runs an external command :param list cmd: Command to run. :param str out: Output file :param str err: Error file :raises: RuntimeError
16,976
def dynacRepresentation(self): details = [ self.energyDefnFlag.val, self.energy.val, self.phase.val, self.x.val, self.y.val, self.radius.val, ] return [, [details]]
Return the Pynac representation of this Set4DAperture instance.
16,977
def _make_datablock(self): section_ids = sorted(self.sections) id_to_insert_id = {} row_count = 0 for section_id in section_ids: row_count += len(self.sections[section_id].points) id_to_insert_id[section_id] = row_count - 1 datablock = np.empty((row_count, COLS.COL_COUNT), dtype=np.float) datablock[:, COLS.ID] = np.arange(len(datablock)) datablock[:, COLS.P] = datablock[:, COLS.ID] - 1 sections = [] insert_index = 0 for id_ in section_ids: sec = self.sections[id_] points, section_type, parent_id = sec.points, sec.section_type, sec.parent_id idx = slice(insert_index, insert_index + len(points)) datablock[idx, COLS.XYZR] = points datablock[idx, COLS.TYPE] = section_type datablock[idx.start, COLS.P] = id_to_insert_id.get(parent_id, ROOT_ID) sections.append(DataBlockSection(idx, section_type, parent_id)) insert_index = idx.stop return datablock, sections
Make a data_block and sections list as required by DataWrapper
16,978
async def create_turn_endpoint(protocol_factory, server_addr, username, password, lifetime=600, ssl=False, transport=): loop = asyncio.get_event_loop() if transport == : _, inner_protocol = await loop.create_connection( lambda: TurnClientTcpProtocol(server_addr, username=username, password=password, lifetime=lifetime), host=server_addr[0], port=server_addr[1], ssl=ssl) else: _, inner_protocol = await loop.create_datagram_endpoint( lambda: TurnClientUdpProtocol(server_addr, username=username, password=password, lifetime=lifetime), remote_addr=server_addr) protocol = protocol_factory() transport = TurnTransport(protocol, inner_protocol) await transport._connect() return transport, protocol
Create datagram connection relayed over TURN.
16,979
def copyMakeBorder(src, top, bot, left, right, border_type=cv2.BORDER_CONSTANT, value=0): hdl = NDArrayHandle() check_call(_LIB.MXCVcopyMakeBorder(src.handle, ctypes.c_int(top), ctypes.c_int(bot), ctypes.c_int(left), ctypes.c_int(right), ctypes.c_int(border_type), ctypes.c_double(value), ctypes.byref(hdl))) return mx.nd.NDArray(hdl)
Pad image border Wrapper for cv2.copyMakeBorder that uses mx.nd.NDArray Parameters ---------- src : NDArray Image in (width, height, channels). Others are the same with cv2.copyMakeBorder Returns ------- img : NDArray padded image
16,980
def _set_cdn_defaults(self): if self._cdn_enabled is FAULT: self._cdn_enabled = False self._cdn_uri = None self._cdn_ttl = DEFAULT_CDN_TTL self._cdn_ssl_uri = None self._cdn_streaming_uri = None self._cdn_ios_uri = None self._cdn_log_retention = False
Sets all the CDN-related attributes to default values.
16,981
def get_epithet_index(): _dict = {} for k, v in AUTHOR_EPITHET.items(): _dict[k] = set(v) return _dict
Return dict of epithets (key) to a set of all author ids of that epithet (value).
16,982
def map_seqprop_resnums_to_seqprop_resnums(self, resnums, seqprop1, seqprop2): resnums = ssbio.utils.force_list(resnums) alignment = self._get_seqprop_to_seqprop_alignment(seqprop1=seqprop1, seqprop2=seqprop2) mapped = ssbio.protein.sequence.utils.alignment.map_resnum_a_to_resnum_b(resnums=resnums, a_aln=alignment[0], b_aln=alignment[1]) return mapped
Map a residue number in any SeqProp to another SeqProp using the pairwise alignment information. Args: resnums (int, list): Residue numbers in seqprop1 seqprop1 (SeqProp): SeqProp object the resnums match to seqprop2 (SeqProp): SeqProp object you want to map the resnums to Returns: dict: Mapping of seqprop1 residue numbers to seqprop2 residue numbers. If mappings don't exist in this dictionary, that means the residue number cannot be mapped according to alignment!
16,983
def create_java_executor(self, dist=None): dist = dist or self.dist if self.execution_strategy == self.NAILGUN: classpath = os.pathsep.join(self.tool_classpath()) return NailgunExecutor(self._identity, self._executor_workdir, classpath, dist, startup_timeout=self.get_options().nailgun_subprocess_startup_timeout, connect_timeout=self.get_options().nailgun_timeout_seconds, connect_attempts=self.get_options().nailgun_connect_attempts) else: return SubprocessExecutor(dist)
Create java executor that uses this task's ng daemon, if allowed. Call only in execute() or later. TODO: Enforce this.
16,984
def register_text_type(content_type, default_encoding, dumper, loader): content_type = headers.parse_content_type(content_type) content_type.parameters.clear() key = str(content_type) _content_types[key] = content_type handler = _content_handlers.setdefault(key, _ContentHandler(key)) handler.dict_to_string = dumper handler.string_to_dict = loader handler.default_encoding = default_encoding or handler.default_encoding
Register handling for a text-based content type. :param str content_type: content type to register the hooks for :param str default_encoding: encoding to use if none is present in the request :param dumper: called to decode a string into a dictionary. Calling convention: ``dumper(obj_dict).encode(encoding) -> bytes`` :param loader: called to encode a dictionary to a string. Calling convention: ``loader(obj_bytes.decode(encoding)) -> dict`` The decoding of a text content body takes into account decoding the binary request body into a string before calling the underlying dump/load routines.
16,985
def MAPGenoToTrans(parsedGTF,feature): GenTransMap=parsedGTF[parsedGTF["feature"]==feature] def getExonsPositions(df): start=int(df["start"]) stop=int(df["end"]) strand=df["strand"] r=range(start,stop+1) if strand=="-": r.sort(reverse=True) r=[ str(s) for s in r] return ",".join(r) GenTransMap["feature_bases"]=GenTransMap.apply(getExonsPositions, axis=1) GenTransMap=GenTransMap.sort_values(by=["transcript_id","exon_number"],ascending=True) def CombineExons(df): return pd.Series(dict( feature_bases = .join(df[]) ) ) GenTransMap=GenTransMap.groupby("transcript_id").apply(CombineExons) GenTransMap=GenTransMap.to_dict().get("feature_bases") return GenTransMap
Gets all positions of all bases in an exon :param df: a Pandas dataframe with 'start','end', and 'strand' information for each entry. df must contain 'seqname','feature','start','end','strand','frame','gene_id', 'transcript_id','exon_id','exon_number'] :param feature: feature upon wich to generate the map, eg. 'exon' or 'transcript' :returns: a string with the comma separated positions of all bases in the exon
16,986
def count(self, filter=None, session=None, **kwargs): warnings.warn("count is deprecated. Use estimated_document_count or " "count_documents instead. Please note that $where must " "be replaced by $expr, $near must be replaced by " "$geoWithin with $center, and $nearSphere must be " "replaced by $geoWithin with $centerSphere", DeprecationWarning, stacklevel=2) cmd = SON([("count", self.__name)]) if filter is not None: if "query" in kwargs: raise ConfigurationError("cancollation', None)) cmd.update(kwargs) return self._count(cmd, collation, session)
**DEPRECATED** - Get the number of documents in this collection. The :meth:`count` method is deprecated and **not** supported in a transaction. Please use :meth:`count_documents` or :meth:`estimated_document_count` instead. All optional count parameters should be passed as keyword arguments to this method. Valid options include: - `skip` (int): The number of matching documents to skip before returning results. - `limit` (int): The maximum number of documents to count. A limit of 0 (the default) is equivalent to setting no limit. - `maxTimeMS` (int): The maximum amount of time to allow the count command to run, in milliseconds. - `collation` (optional): An instance of :class:`~pymongo.collation.Collation`. This option is only supported on MongoDB 3.4 and above. - `hint` (string or list of tuples): The index to use. Specify either the index name as a string or the index specification as a list of tuples (e.g. [('a', pymongo.ASCENDING), ('b', pymongo.ASCENDING)]). The :meth:`count` method obeys the :attr:`read_preference` of this :class:`Collection`. .. note:: When migrating from :meth:`count` to :meth:`count_documents` the following query operators must be replaced: +-------------+-------------------------------------+ | Operator | Replacement | +=============+=====================================+ | $where | `$expr`_ | +-------------+-------------------------------------+ | $near | `$geoWithin`_ with `$center`_ | +-------------+-------------------------------------+ | $nearSphere | `$geoWithin`_ with `$centerSphere`_ | +-------------+-------------------------------------+ $expr requires MongoDB 3.6+ :Parameters: - `filter` (optional): A query document that selects which documents to count in the collection. - `session` (optional): a :class:`~pymongo.client_session.ClientSession`. - `**kwargs` (optional): See list of options above. .. versionchanged:: 3.7 Deprecated. .. versionchanged:: 3.6 Added ``session`` parameter. .. versionchanged:: 3.4 Support the `collation` option. .. _$expr: https://docs.mongodb.com/manual/reference/operator/query/expr/ .. _$geoWithin: https://docs.mongodb.com/manual/reference/operator/query/geoWithin/ .. _$center: https://docs.mongodb.com/manual/reference/operator/query/center/#op._S_center .. _$centerSphere: https://docs.mongodb.com/manual/reference/operator/query/centerSphere/#op._S_centerSphere
16,987
def solve(self, b_any, b, check_finite=True, p=None): if self.schur_solver is None and self.A_any_solver is None: assert ( (b is None) or (b.shape[0]==0) ) and ( (b_any is None) or (b_any.shape[0]==0) ), "shape missmatch" return b, b_any elif self.schur_solver is None: assert (b is None) or (b.shape[0]==0), "shape missmatch" solution_any = self.A_any_solver.solve(b=b_any,p=p) return b,solution_any elif self.A_any_solver is None: assert (b_any is None) or (b_any.shape[0]==0), "shape missmatch" solution = self.schur_solver.solve(b=b, check_finite=check_finite) return solution, b_any else: assert p is None, "p is not None" cross_term = np.tensordot(self.DinvC,b_any,axes=([0,1],[0,1])) solution = self.schur_solver.solve(b=(b - cross_term), check_finite=check_finite) solution_any = self.A_any_solver.solve(b=b_any, check_finite=check_finite, p=p) solution_any -= self.DinvC.dot(solution) return solution, solution_any
solve A \ b
16,988
def visit(self, node): for child in node: yield child for subchild in self.visit(child): yield subchild
Returns a generator that walks all children recursively.
16,989
def setup_size(self, width, height): self._iconw = max(0, width - 7) self._iconh = max(0, height - 6) self.update_all_buttons()
Set the width and height for one cell in the tooltip This is inderectly acomplished by setting the iconsizes for the buttons. :param width: the width of one cell, min. is 7 -> icon width = 0 :type width: int :param height: the height of one cell, min. is 6 -> icon height = 0 :type height: int :returns: None :rtype: None :raises: None
16,990
def writelines(self, lines, fmt): if isinstance(fmt, basestring): fmt = [fmt] * len(lines) for f, line in zip(fmt, lines): self.writeline(f, line, self.endian)
Write `lines` with given `format`.
16,991
def resolve_variable(provided_variable, blueprint_name): value = None if provided_variable: if not provided_variable.resolved: raise UnresolvedVariable(blueprint_name, provided_variable) value = provided_variable.value return value
Resolve a provided variable value against the variable definition. This acts as a subset of resolve_variable logic in the base module, leaving out everything that doesn't apply to CFN parameters. Args: provided_variable (:class:`stacker.variables.Variable`): The variable value provided to the blueprint. blueprint_name (str): The name of the blueprint that the variable is being applied to. Returns: object: The resolved variable string value. Raises: UnresolvedVariable: Raised when the provided variable is not already resolved.
16,992
def xpathCompareValues(self, inf, strict): ret = libxml2mod.xmlXPathCompareValues(self._o, inf, strict) return ret
Implement the compare operation on XPath objects: @arg1 < @arg2 (1, 1, ... @arg1 <= @arg2 (1, 0, ... @arg1 > @arg2 (0, 1, ... @arg1 >= @arg2 (0, 0, ... When neither object to be compared is a node-set and the operator is <=, <, >=, >, then the objects are compared by converted both objects to numbers and comparing the numbers according to IEEE 754. The < comparison will be true if and only if the first number is less than the second number. The <= comparison will be true if and only if the first number is less than or equal to the second number. The > comparison will be true if and only if the first number is greater than the second number. The >= comparison will be true if and only if the first number is greater than or equal to the second number.
16,993
def get_runtime_vars(varset, experiment, token): ?words=at the thing&color=red&globalname=globalvalue url = if experiment in varset: variables = dict() if token in varset[experiment]: for k,v in varset[experiment][token].items(): variables[k] = v if "*" in varset[experiment]: for k,v in varset[experiment][].items(): if k not in variables: variables[k] = v varlist = ["%s=%s" %(k,v) for k,v in variables.items()] url = .join(varlist) bot.debug( %url) return url
get_runtime_vars will return the urlparsed string of one or more runtime variables. If None are present, None is returned. Parameters ========== varset: the variable set, a dictionary lookup with exp_id, token, vars experiment: the exp_id to look up token: the participant id (or token) that must be defined. Returns ======= url: the variable portion of the url to be passed to experiment, e.g, '?words=at the thing&color=red&globalname=globalvalue'
16,994
def password_reset_email_handler(notification): base_subject = _().format(domain=notification.site.domain) subject = getattr(settings, , base_subject) notification.email_subject = subject email_handler(notification, password_reset_email_context)
Password reset email handler.
16,995
def _make_result(cls, values, now, timezone): date = None time = None if Component.MONTH in values: year = cls._year_from_2digits(values.get(Component.YEAR, now.year), now.year) month = values[Component.MONTH] day = values.get(Component.DAY, 1) try: date = datetime.date(year, month, day) except ValueError: return None if (Component.HOUR in values and Component.MINUTE in values) or Component.HOUR_AND_MINUTE in values: if Component.HOUR_AND_MINUTE in values: combined = values[Component.HOUR_AND_MINUTE] hour = combined // 100 minute = combined - (hour * 100) second = 0 nano = 0 else: hour = values[Component.HOUR] minute = values[Component.MINUTE] second = values.get(Component.SECOND, 0) nano = values.get(Component.NANO, 0) if hour < 12 and values.get(Component.AM_PM) == cls.PM: hour += 12 elif hour == 12 and values.get(Component.AM_PM) == cls.AM: hour -= 12 try: time = datetime.time(hour, minute, second, microsecond=nano // 1000) except ValueError: return None if Component.OFFSET in values: timezone = pytz.FixedOffset(values[Component.OFFSET] // 60) if date is not None and time is not None: return timezone.localize(datetime.datetime.combine(date, time)) elif date is not None: return date elif time is not None: return time else: return None
Makes a date or datetime or time object from a map of component values :param values: the component values :param now: the current now :param timezone: the current timezone :return: the date, datetime, time or none if values are invalid
16,996
def parse_string(xml): string = "" dom = XML(xml) for sentence in dom(XML_SENTENCE): _anchors.clear() _attachments.clear() language = sentence.get(XML_LANGUAGE, "en") format = sentence.get(XML_TOKEN, [WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA]) format = not isinstance(format, basestring) and format or format.replace(" ","").split(",") tokens = [] for chunk in sentence: tokens.extend(_parse_tokens(chunk, format)) if ANCHOR in format: A, P, a, i = _anchors, _attachments, 1, format.index(ANCHOR) for id in sorted(A.keys()): for token in A[id]: token[i] += "-"+"-".join(["A"+str(a+p) for p in range(len(P[id]))]) token[i] = token[i].strip("O-") for p, pnp in enumerate(P[id]): for token in pnp: token[i] += "-"+"P"+str(a+p) token[i] = token[i].strip("O-") a += len(P[id]) tokens = ["/".join([tag for tag in token]) for token in tokens] tokens = " ".join(tokens) string += tokens + "\n" try: if MBSP: from mbsp import TokenString return TokenString(string.strip(), tags=format, language=language) except: return TaggedString(string.strip(), tags=format, language=language)
Returns a slash-formatted string from the given XML representation. The return value is a TokenString (for MBSP) or TaggedString (for Pattern).
16,997
def activations(self): activation = lib.EnvGetNextActivation(self._env, ffi.NULL) while activation != ffi.NULL: yield Activation(self._env, activation) activation = lib.EnvGetNextActivation(self._env, activation)
Iterate over the Activations in the Agenda.
16,998
def set_own_module(self, path): log = self._params.get(, self._discard) self._name = path self.module_add(event_target(self, , key=path, log=log), path)
This is provided so the calling process can arrange for processing to be stopped and a LegionReset exception raised when any part of the program's own module tree changes.
16,999
def show(self): self.iren.Initialize() self.ren_win.SetSize(800, 800) self.ren_win.SetWindowName(self.title) self.ren_win.Render() self.iren.Start()
Display the visualizer.