Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
10,100
def Scroll_up(self, n, dl = 0): self.Delay(dl) self.mouse.scroll(vertical = n)
鼠标滚轮向上n次
10,101
def update_portal(self, portal_obj): headers = { : self.user_agent(), } headers.update(self.headers()) r = requests.put( self.portals_url()++self.portal_id(), data=json.dumps(portal_obj), headers=headers, auth=self.auth()) if HTTP_STATUS.OK == r.status_code: return r.json() else: print("update_portal: Something went wrong: <{0}>: {1}".format( r.status_code, r.reason)) r.raise_for_status()
Implements the Update device Portals API. This function is extremely dangerous. The portal object you pass in will completely overwrite the portal. http://docs.exosite.com/portals/#update-portal
10,102
def merge_single_qubit_gates_into_phased_x_z( circuit: circuits.Circuit, atol: float = 1e-8) -> None: def synth(qubit: ops.Qid, matrix: np.ndarray) -> List[ops.Operation]: out_gates = decompositions.single_qubit_matrix_to_phased_x_z( matrix, atol) return [gate(qubit) for gate in out_gates] MergeSingleQubitGates(synthesizer=synth).optimize_circuit(circuit)
Canonicalizes runs of single-qubit rotations in a circuit. Specifically, any run of non-parameterized circuits will be replaced by an optional PhasedX operation followed by an optional Z operation. Args: circuit: The circuit to rewrite. This value is mutated in-place. atol: Absolute tolerance to angle error. Larger values allow more negligible gates to be dropped, smaller values increase accuracy.
10,103
def transform(self): if self.dcmf1 is None or self.dcmf2 is None: return np.inf for field_name in self.field_weights: if (str(getattr(self.dcmf1, field_name, )) != str(getattr(self.dcmf2, field_name, ))): return False return True
Check the field values in self.dcmf1 and self.dcmf2 and returns True if all the field values are the same, False otherwise. Returns ------- bool
10,104
def GetMetadataLegacy(client, token=None): if isinstance(client, rdfvalue.RDFURN): client_fd = aff4.FACTORY.Open(client, mode="r", token=token) else: client_fd = client metadata = ExportedMetadata() metadata.client_urn = client_fd.urn metadata.client_age = client_fd.urn.age metadata.hostname = utils.SmartUnicode( client_fd.Get(client_fd.Schema.HOSTNAME, "")) metadata.os = utils.SmartUnicode(client_fd.Get(client_fd.Schema.SYSTEM, "")) metadata.uname = utils.SmartUnicode(client_fd.Get(client_fd.Schema.UNAME, "")) metadata.os_release = utils.SmartUnicode( client_fd.Get(client_fd.Schema.OS_RELEASE, "")) metadata.os_version = utils.SmartUnicode( client_fd.Get(client_fd.Schema.OS_VERSION, "")) kb = client_fd.Get(client_fd.Schema.KNOWLEDGE_BASE) usernames = "" if kb: usernames = [user.username for user in kb.users] or "" metadata.usernames = utils.SmartUnicode(usernames) metadata.mac_address = utils.SmartUnicode( client_fd.Get(client_fd.Schema.MAC_ADDRESS, "")) system_labels = set() user_labels = set() for l in client_fd.GetLabels(): if l.owner == "GRR": system_labels.add(l.name) else: user_labels.add(l.name) metadata.labels = ",".join(sorted(system_labels | user_labels)) metadata.system_labels = ",".join(sorted(system_labels)) metadata.user_labels = ",".join(sorted(user_labels)) metadata.hardware_info = client_fd.Get(client_fd.Schema.HARDWARE_INFO) metadata.kernel_version = client_fd.Get(client_fd.Schema.KERNEL) return metadata
Builds ExportedMetadata object for a given client id. Note: This is a legacy aff4-only implementation. TODO(user): deprecate as soon as REL_DB migration is done. Args: client: RDFURN of a client or VFSGRRClient object itself. token: Security token. Returns: ExportedMetadata object with metadata of the client.
10,105
def get_injuries_by_team(self, season, week, team_id): result = self._method_call("Injuries/{season}/{week}/{team_id}", "stats", season=season, week=week, team_id=team_id) return result
Injuries by week and team
10,106
def get_cluster(self, label): for cluster in self._clusters: if label == cluster[]: return self._get_connection(cluster) raise AttributeError( % label)
Returns a connection to a mongo-clusters. Args: label (string): the label of a cluster. Returns: A connection to the cluster labeld with label. Raises: AttributeError: there is no cluster with the given label in the config
10,107
def calcFontScaling(self): self.ypx = self.figure.get_size_inches()[1]*self.figure.dpi self.xpx = self.figure.get_size_inches()[0]*self.figure.dpi self.fontSize = self.vertSize*(self.ypx/2.0) self.leftPos = self.axes.get_xlim()[0] self.rightPos = self.axes.get_xlim()[1]
Calculates the current font size and left position for the current window.
10,108
async def probe_message(self, _message, context): client_id = context.user_data await self.probe(client_id)
Handle a probe message. See :meth:`AbstractDeviceAdapter.probe`.
10,109
def issubset(self, other): return (set(self).issubset(set(other)) and set(self.iter_links()).issubset(set(other.iter_links())) and all(set(self[chip]).issubset(other[chip]) and all(self[chip][r] <= other[chip][r] for r in self[chip]) for chip in self))
Test whether the resources available in this machine description are a (non-strict) subset of those available in another machine. .. note:: This test being False does not imply that the this machine is a superset of the other machine; machines may have disjoint resources.
10,110
def key_exists(self, namespace, key): return namespace in self.__data and key in self.__data[namespace]
Checks a namespace for the existence of a specific key Args: namespace (str): Namespace to check in key (str): Name of the key to check for Returns: `True` if key exists in the namespace, else `False`
10,111
def execute(self, shell = True): process = Popen(self.command, stdout=PIPE, stderr=PIPE, shell=shell) self.output, self.errors = process.communicate()
Executes the command setted into class Args: shell (boolean): Set True if command is a shell command. Default: True
10,112
def remove(item): if os.path.isdir(item): shutil.rmtree(item) else: os.remove(item)
Delete item, whether it's a file, a folder, or a folder full of other files and folders.
10,113
def scale_degree_to_bitmap(scale_degree, modulo=False, length=BITMAP_LENGTH): sign = 1 if scale_degree.startswith("*"): sign = -1 scale_degree = scale_degree.strip("*") edit_map = [0] * length sd_idx = scale_degree_to_semitone(scale_degree) if sd_idx < length or modulo: edit_map[sd_idx % length] = sign return np.array(edit_map)
Create a bitmap representation of a scale degree. Note that values in the bitmap may be negative, indicating that the semitone is to be removed. Parameters ---------- scale_degree : str Spelling of a relative scale degree, e.g. 'b3', '7', '#5' modulo : bool, default=True If a scale degree exceeds the length of the bit-vector, modulo the scale degree back into the bit-vector; otherwise it is discarded. length : int, default=12 Length of the bit-vector to produce Returns ------- bitmap : np.ndarray, in [-1, 0, 1], len=`length` Bitmap representation of this scale degree.
10,114
def triplify(self, data, parent=None): if data is None: return if self.is_object: for res in self._triplify_object(data, parent): yield res elif self.is_array: for item in data: for res in self.items.triplify(item, parent): yield res else: type_name = typecast.name(data) obj = typecast.stringify(type_name, data) if obj is not None: obj = obj.strip() yield (parent, self.predicate, obj, type_name)
Recursively generate statements from the data supplied.
10,115
def randrange(seq): seq = seq.copy() choose = rng().choice remove = seq.remove for x in range(len(seq)): y = choose(seq) remove(y) yield y
Yields random values from @seq until @seq is empty
10,116
def next_partname(self, template): partnames = {part.partname for part in self.iter_parts()} for n in range(1, len(partnames) + 2): candidate_partname = template % n if candidate_partname not in partnames: return PackURI(candidate_partname)
Return a |PackURI| instance representing partname matching *template*. The returned part-name has the next available numeric suffix to distinguish it from other parts of its type. *template* is a printf (%)-style template string containing a single replacement item, a '%d' to be used to insert the integer portion of the partname. Example: "/word/header%d.xml"
10,117
def main(): parser = argparse.ArgumentParser(description="An interface to CarbonBlack environments") parser.add_argument(, , choices=auth.CredentialStore("response").get_profiles(), help=-t production\) parser.add_argument(, , type=str, help=production\, default=) subparsers = parser.add_subparsers(dest=) cbinterface_commands = [ , , , , , ] parser_vx = subparsers.add_parser(, help="search cbsandbox for processes in vxstream report, show detections") parser_vx.add_argument(, help=) parser_vx.add_argument(, , action=, help=) parser_usb = subparsers.add_parser(, help="Show recent removable drive activity on the sensor") parser_usb.add_argument(, help=) parser_usb.add_argument(, , action=, help=) parser_proc = subparsers.add_parser(, help="analyze a process GUID. for more") parser_proc.add_argument(, help="the process GUID to analyze") parser_proc.add_argument(, action=, help="Warn before printing large executions") parser_proc.add_argument(, , action=, help="walk and analyze the process tree") parser_proc.add_argument(, , action=, help="print details on the process ancestry") parser_proc.add_argument(, , action=, help="show binary and process information") parser_proc.add_argument(,, action=, help="only print process children event details") parser_proc.add_argument(, , action=, help="print network connections") parser_proc.add_argument(, , action=, help="print file modifications") parser_proc.add_argument(, , action=, help="print registry modifications") parser_proc.add_argument(, , action=, help="print unsigned modloads") parser_proc.add_argument(, , action=, help="print modloads") parser_proc.add_argument(, , action=, help="print crossprocs") parser_proc.add_argument(, action=, help="Don--jsonstore_trueoutput process summary in json--segment-limitstorestop processing events into json after this many process segmentsprocess_namechildproc_nameusernameparent_namepathhostnameparent_pidcomms_ipprocess_md5startgroupinterface_ipmodload_countchildproc_countcmdlineregmod_countprocess_pidparent_idos_typerocessblock_countcrossproc_countnetconn_countparent_md5host_typelast_updatefilemod_countqueryquery -hqueryd like to execute") parser_query.add_argument(, , action=, help="Only return processes with events after given date/time stamp\ (server’s clock). Format: eastern time") parser_query.add_argument(, , action=, help="Set the maximum last update time. Format: eastern time") parser_query.add_argument(, action=, choices=facet_args, help=) parser_query.add_argument(, action=, help="Don-lh--logon-historystore_truecollectperform LR collection tasks on a hostsensor-f--filepathstorecollect file-c--command-execstorecommand to execute-p--process-liststore_trueshow processes running on sensor-m--memdumpstoreALLMEM?dump memory on a specific process-id-lr--regkeypathstoreList all registry values from the specified registry key.-r--regkeyvaluestoreReturns the associated value of the specified registry key.-i--infostore_trueprint sensor information-gst--get-taskstore_trueget scheduled tasks or specifc task-mc--multi-collectstorepath to ini file listing files and regs to collectremediateremediate a hostsensor-i--isolatetoggle host isolationstore_true-f--remediation-filepathhelp-dst--delete-scheduled-task-kpname--kill-process-name-kpid--kill-pid-df--delete-file-dr--delete-regkey-dd--delete-directoryremediatehelpremediate-h-h All VxStream related stuff may be removed in a future version vxdetectvxstreamenvtypeproduction,envtype,querycollectremediateenumerate_usbt found in any environments".format(args.sensor)) return 0 elif len(cb_results) > 1: LOGGER.error("A sensor by hostname {} was found in multiple environments".format(args.sensor)) for r in cb_results: print("Results:") print("Profile {}: {} (SID:{})".format(r[1],r[0].hostname,r[0].id)) return 1 results = cb_results[0] profile = results[1] sensor = results[0] if args.command == : enumerate_usb(sensor, args.start_time) config = {} try: default_profile = auth.default_profile default_profile[] = None config = auth.CredentialStore("response").get_credentials(profile=profile) except: pass if args.command == : hyper_lr = hyperLiveResponse(sensor) if args.info: print(hyper_lr) return True lr_session = hyper_lr.go_live() if args.multi_collect: filepaths = regpaths = full_collect = None config = ConfigParser() config.read(args.multi_collect) try: filepaths = config.items("files") except: filepaths = [] try: regpaths = config.items("registry_paths") except: regpaths = [] try: full_collect = config.get(, ) except: pass if regpaths is not None: for regpath in regpaths: if isinstance(regpath, tuple): regpath = regpath[1] print("~ Trying to get {}".format(regpath)) try: result = lr_session.get_registry_value(regpath) if result: localfname = args.sensor + + result[] + ".txt" with open(localfname,) as f: f.write(bytes(result[], )) print("\t+ Data written to: {}".format(localfname)) except Exception as e: print("[!] Error: {}".format(str(e))) if filepaths is not None: for filepath in filepaths: try: hyper_lr.getFile_with_timeout(filepath[1]) except Exception as e: print("[!] Error: {}".format(str(e))) if full_collect == : return False return True elif args.filepath: hyper_lr.getFile_with_timeout(args.filepath) elif args.process_list: hyper_lr.print_processes() elif args.memdump: config = ConfigParser() config.read(CONFIG_PATH) cb_compress = config[].getboolean() custom_compress = config[].getboolean() custom_compress_file = config[][] auto_collect_mem = config[].getboolean() lerc_collect_mem = config[].getboolean() path_to_procdump = config[][] if args.memdump == "ALLMEM": return hyper_lr.dump_sensor_memory(cb_compress=cb_compress, custom_compress=custom_compress, custom_compress_file=custom_compress_file, auto_collect_result=auto_collect_mem) else: return hyper_lr.dump_process_memory(args.memdump, path_to_procdump=path_to_procdump) elif args.command_exec: print("executing on {}".format(args.command_exec, args.sensor)) result = lr_session.create_process(args.command_exec, wait_timeout=60, wait_for_output=True) print("\n-------------------------") result = result.decode() print(result + "\n-------------------------") print() elif args.regkeypath: print("\n\t{}".format(args.regkeypath)) results = lr_session.list_registry_keys(args.regkeypath) for result in results: print("\t-------------------------") print("\tName: {}".format(result[])) print("\tType: {}".format(result[])) print("\tData: {}".format(result[])) print() elif args.regkeyvalue: print("\n\t{}".format(args.regkeyvalue)) result = lr_session.get_registry_value(args.regkeyvalue) print("\t-------------------------") print("\tName: {}".format(result[])) print("\tType: {}".format(result[])) print("\tData: {}".format(result[])) print() elif args.get_task: return hyper_lr.get_scheduled_tasks() else: if config[]: result = hyper_lr.get_lerc_status() if not result or result == or result == : if not hyper_lr.deploy_lerc(config[]): LOGGER.warn("LERC deployment failed") else: LOGGER.info("{} environment is not configrued for LERC deployment".format(profile)) return LR_collection(hyper_lr, args) if args.command == : return Remediation(sensor, args) process_tree = None if args.command == : proc = proc_search_environments(profiles, args.process) if not proc: return 1 sp = SuperProcess(proc) if args.proc_info: print(sp) elif args.walk_tree: sp.walk_process_tree() print() print(sp.process_tree) for process in sp.process_tree: if process.is_suppressed: print("+ [DATA SUPPRESSED] {} (PID:{}) - {}".format(process.name, process.pid, process.id)) continue print("+ {} (PID:{}) - {}".format(process.name, process.pid, process.id)) if args.filemods: process.print_filemods() args.no_analysis = True if args.netconns: process.print_netconns() args.no_analysis = True if args.regmods: process.print_regmods() args.no_analysis = True if args.unsigned_modloads: process.print_unsigned_modloads() args.no_analysis = True if args.modloads: process.print_modloads() args.no_analysis = True if args.crossprocs: process.print_crossprocs() args.no_analysis = True if args.walk_parents: sp.show_ancestry() args.no_analysis = True if args.no_analysis != True: if args.json: if args.segment_limit: print(process.events_to_json(segment_limit=args.segment_limit)) else: print(process.events_to_json()) else: process.default_print() else: print() print(sp.process_tree) if args.walk_parents: sp.show_ancestry() args.no_analysis = True if args.filemods: sp.print_filemods() args.no_analysis = True if args.netconns: sp.print_netconns() args.no_analysis = True if args.regmods: sp.print_regmods() args.no_analysis = True if args.unsigned_modloads: sp.print_unsigned_modloads() args.no_analysis = True if args.modloads: sp.print_modloads() args.no_analysis = True if args.crossprocs: sp.print_crossprocs() args.no_analysis = True if args.show_children: sp.print_child_events() args.no_analysis = True if args.no_analysis != True: if args.json: if args.segment_limit: print(sp.events_to_json(segment_limit=args.segment_limit)) else: print(sp.events_to_json()) else: sp.default_print() print() return True
All VxStream related stuff may be removed in a future version
10,118
def edit_config_input_target_config_target_running_running(self, **kwargs): config = ET.Element("config") edit_config = ET.Element("edit_config") config = edit_config input = ET.SubElement(edit_config, "input") target = ET.SubElement(input, "target") config_target = ET.SubElement(target, "config-target") running = ET.SubElement(config_target, "running") running = ET.SubElement(running, "running") callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
10,119
def checkCache(fnm, strip=0, upx=0): if ((not strip and not upx and not is_darwin and not is_win) or fnm.lower().endswith(".manifest")): return fnm if strip: strip = 1 else: strip = 0 if upx: upx = 1 else: upx = 0 cachedir = os.path.join(CONFIGDIR, % (strip, upx)) if not os.path.exists(cachedir): os.makedirs(cachedir) cacheindexfn = os.path.join(cachedir, "index.dat") if os.path.exists(cacheindexfn): cache_index = _load_data(cacheindexfn) else: cache_index = {} try: res = winmanifest.GetManifestResources(os.path.abspath(cachedfile)) except winresource.pywintypes.error, e: if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT: pass else: logger.error(os.path.abspath(cachedfile)) raise else: if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]): for name in res[winmanifest.RT_MANIFEST]: for language in res[winmanifest.RT_MANIFEST][name]: try: manifest = winmanifest.Manifest() manifest.filename = ":".join([cachedfile, str(winmanifest.RT_MANIFEST), str(name), str(language)]) manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language], False) except Exception, exc: logger.error("Cannot parse manifest resource %s, " "%s from", name, language) logger.error(cachedfile) logger.exception(exc) else: olen = len(manifest.dependentAssemblies) _depNames = set([dep.name for dep in manifest.dependentAssemblies]) for pydep in pyasm: if not pydep.name in _depNames: logger.info("Adding %r to dependent " "assemblies of %r", pydep.name, cachedfile) manifest.dependentAssemblies.append(pydep) _depNames.update(pydep.name) if len(manifest.dependentAssemblies) > olen: try: manifest.update_resources(os.path.abspath(cachedfile), [name], [language]) except Exception, e: logger.error(os.path.abspath(cachedfile)) raise if cmd: try: compat.exec_command(*cmd) except OSError, e: raise SystemExit("Execution failed: %s" % e) cache_index[basenm] = digest _save_data(cacheindexfn, cache_index) if is_darwin: dylib.mac_set_relative_dylib_deps(cachedfile) return cachedfile
Cache prevents preprocessing binary files again and again.
10,120
def reset_defaults(self): self.save_login.setChecked(False) self.save_password.setChecked(False) self.save_url.setChecked(False) set_setting(GEONODE_USER, ) set_setting(GEONODE_PASSWORD, ) set_setting(GEONODE_URL, ) self.login.setText() self.password.setText() self.url.setText()
Reset login and password in QgsSettings.
10,121
def run_process(command, environ): log.info(, command, environ) env = dict(os.environ) env.update(environ) try: p = subprocess.Popen(args=command, env=env) except OSError as e: raise OSError( % (command, e)) log.debug(, p.pid) ret = p.wait() log.debug(, p.pid, ret) return ret
Run the specified process and wait until it finishes. Use environ dict for environment variables.
10,122
def make_unique_script_attr(attributes): filtered_attr = [] script_list = [] for attr in attributes: if attr.Usage != TransactionAttributeUsage.Script: filtered_attr.append(attr) else: data = attr.Data if isinstance(data, UInt160): data = attr.Data.ToArray() if data not in script_list: script_list.append(data) filtered_attr.append(attr) return filtered_attr
Filter out duplicate `Script` TransactionAttributeUsage types. Args: attributes: a list of TransactionAttribute's Returns: list:
10,123
def _validate_indexers( self, indexers: Mapping, ) -> List[Tuple[Any, Union[slice, Variable]]]: from .dataarray import DataArray invalid = [k for k in indexers if k not in self.dims] if invalid: raise ValueError("dimensions %r do not exist" % invalid) indexers_list = [] for k, v in indexers.items(): if isinstance(v, slice): indexers_list.append((k, v)) continue if isinstance(v, Variable): pass elif isinstance(v, DataArray): v = v.variable elif isinstance(v, tuple): v = as_variable(v) elif isinstance(v, Dataset): raise TypeError() elif isinstance(v, Sequence) and len(v) == 0: v = IndexVariable((k, ), np.zeros((0,), dtype=)) else: v = np.asarray(v) if v.dtype.kind == or v.dtype.kind == : index = self.indexes[k] if isinstance(index, pd.DatetimeIndex): v = v.astype() elif isinstance(index, xr.CFTimeIndex): v = _parse_array_of_cftime_strings(v, index.date_type) if v.ndim == 0: v = Variable((), v) elif v.ndim == 1: v = IndexVariable((k,), v) else: raise IndexError( "Unlabeled multi-dimensional array cannot be " "used for indexing: {}".format(k)) if v.ndim == 1: v = v.to_index_variable() indexers_list.append((k, v)) return indexers_list
Here we make sure + indexer has a valid keys + indexer is in a valid data type + string indexers are cast to the appropriate date type if the associated index is a DatetimeIndex or CFTimeIndex
10,124
def rateServiceTypeInResult(discoveryResponse): if discoveryResponse is None: return 0 serviceType = discoveryResponse.service if serviceType.startswith("urn:dslforum-org:device"): return 11 if serviceType.startswith("urn:dslforum-org:service"): return 10 if serviceType.startswith("urn:dslforum-org:"): return 9 if serviceType.startswith("urn:schemas-upnp-org:device"): return 8 if serviceType.startswith("urn:schemas-upnp-org:service"): return 7 if serviceType.startswith("urn:schemas-upnp-org:"): return 6 if serviceType.startswith("urn:schemas-"): return 5 if serviceType.startswith("urn:"): return 4 if serviceType.startswith("upnp:rootdevice"): return 3 if serviceType.startswith("uuid:"): return 2 return 1
Gives a quality rating for a given service type in a result, higher is better. Several UpnP devices reply to a discovery request with multiple responses with different service type announcements. To find the most specific one we need to be able rate the service types against each other. Usually this is an internal method and just exported for convenience reasons. :param DiscoveryResponse discoveryResponse: the response to rate :return: a rating of the quality of the given service type :rtype: int
10,125
async def create_link_secret(self, label: str) -> None: LOGGER.debug(, label) if not self.handle: LOGGER.debug(, self.name) raise WalletState(.format(self.name)) try: await anoncreds.prover_create_master_secret(self.handle, label) await self._write_link_secret_label(label) except IndyError as x_indy: if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError: LOGGER.warning( , self.name) await self._write_link_secret_label(label) else: LOGGER.debug( , self.name, x_indy.error_code) raise LOGGER.debug()
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the current link secret does not already correspond to the input link secret label. Raise WalletState if wallet is closed, or any other IndyError causing failure to set link secret in wallet. :param label: label for link secret; indy-sdk uses label to generate link secret
10,126
def export_disks( self, standalone=True, dst_dir=None, compress=False, collect_only=False, with_threads=True, *args, **kwargs ): return self.provider.export_disks( standalone, dst_dir, compress, collect_only, with_threads, *args, **kwargs )
Thin method that just uses the provider
10,127
def user_invite(self, username, email, roles): uri = data = { "username": username, "email": email, "roles": list(set(roles)) } post_body = json.dumps(data) resp, body = self.post(uri, body=post_body) self.expected_success(200, resp.status) body = json.loads(body) return rest_client.ResponseBody(resp, body)
Invite a user to the tenant.
10,128
def remove_tar_files(file_list): for f in file_list: if file_exists(f) and f.endswith(): os.remove(f)
Public function that removes temporary tar archive files in a local directory
10,129
def get_GET_array(request, var_name, fail_silently=True): vals = request.GET.getlist(var_name) if not vals: if fail_silently: return [] else: raise Exception, _("No array called in GET variables") % {: var_name} return vals
Returns the GET array's contents for the specified variable.
10,130
def get_stock_codes(self, cached=True, as_json=False): url = self.stocks_csv_url req = Request(url, None, self.headers) res_dict = {} if cached is not True or self.__CODECACHE__ is None: res = self.opener.open(req) if res is not None: res = byte_adaptor(res) for line in res.read().split(): if line != and re.search(, line): (code, name) = line.split()[0:2] res_dict[code] = name else: raise Exception() self.__CODECACHE__ = res_dict return self.render_response(self.__CODECACHE__, as_json)
returns a dictionary with key as stock code and value as stock name. It also implements cache functionality and hits the server only if user insists or cache is empty :return: dict
10,131
def add_attachment(self, attachment): log = logging.getLogger(self.cls_logger + ) if not isinstance(attachment, SlackAttachment): msg = log.error(msg) raise ValueError(msg) self.attachments.append(attachment.attachment) log.debug(.format(a=attachment))
Adds an attachment to the SlackMessage payload This public method adds a slack message to the attachment list. :param attachment: SlackAttachment object :return: None
10,132
def create_channel(cls, address="spanner.googleapis.com:443", credentials=None): grpc_gcp_config = grpc_gcp.api_config_from_text_pb( pkg_resources.resource_string(__name__, _SPANNER_GRPC_CONFIG) ) options = [(grpc_gcp.API_CONFIG_CHANNEL_ARG, grpc_gcp_config)] return google.api_core.grpc_helpers.create_channel( address, credentials=credentials, scopes=cls._OAUTH_SCOPES )
Create and return a gRPC channel object. Args: address (str): The host for the channel to use. credentials (~.Credentials): The authorization credentials to attach to requests. These credentials identify this application to the service. If none are specified, the client will attempt to ascertain the credentials from the environment. Returns: grpc.Channel: A gRPC channel object.
10,133
def replace(self, record_id, fields, typecast=False): record_url = self.record_url(record_id) return self._put(record_url, json_data={"fields": fields, "typecast": typecast})
Replaces a record by its record id. All Fields are updated to match the new ``fields`` provided. If a field is not included in ``fields``, value will bet set to null. To update only selected fields, use :any:`update`. >>> record = airtable.match('Seat Number', '22A') >>> fields = {'PassangerName': 'Mike', 'Passport': 'YASD232-23'} >>> airtable.replace(record['id'], fields) Args: record_id(``str``): Id of Record to update fields(``dict``): Fields to replace with. Must be dictionary with Column names as Key. typecast(``boolean``): Automatic data conversion from string values. Returns: record (``dict``): New record
10,134
def _get_all_headers(self, method, endpoint, request_bytes, custom_headers): headers = self._get_default_headers() headers.update(custom_headers) if self._api_context.token is not None: headers[self.HEADER_AUTHENTICATION] = self._api_context.token headers[self.HEADER_SIGNATURE] = security.sign_request( self._api_context.installation_context.private_key_client, method, endpoint, request_bytes, headers ) return headers
:type method: str :type endpoint: str :type request_bytes: bytes :type custom_headers: dict[str, str] :rtype: dict[str, str]
10,135
def add_user_role(self, user, role): self.project_service.set_auth(self._token_project) self.project_service.add_user_role(user, role)
Add role to given user. Args: user (string): User name. role (string): Role to assign. Raises: requests.HTTPError on failure.
10,136
def get_namespace(self, key): namespace = self.shared_namespaces.get(key) if namespace: return namespace ns = SharedNamespace(self.manager) self.shared_namespaces[key] = ns return ns
Returns a :class:`~bang.util.SharedNamespace` for the given :attr:`key`. These are used by :class:`~bang.deployers.deployer.Deployer` objects of the same ``deployer_class`` to coordinate control over multiple deployed instances of like resources. E.g. With 5 clones of an application server, 5 :class:`~bang.deployers.deployer.Deployer` objects in separate, concurrent processes will use the same shared namespace to ensure that each object/process controls a distinct server. :param str key: Unique ID for the namespace. :class:`~bang.deployers.deployer.Deployer` objects that call :meth:`get_namespace` with the same :attr:`key` will receive the same :class:`~bang.util.SharedNamespace` object.
10,137
def _save_nb(nb_name): display(Javascript()) display(Javascript()) print(, end=) if _wait_for_save(nb_name): print("Saved .".format(nb_name)) else: logging.warning( "Could not save your notebook (timed out waiting for " "IPython save). Make sure your notebook is saved " "and export again." )
Attempts to save notebook. If unsuccessful, shows a warning.
10,138
def transform(self, X, lenscale=None): r N, d = X.shape lenscale = self._check_dim(d, lenscale) return expit(cdist(X / lenscale, self.C / lenscale, ))
r""" Apply the sigmoid basis function to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: float the length scale (scalar) of the RBFs to apply to X. If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, D) where D is number of centres.
10,139
def _opt_to_args(cls, opt, val): no_value = ( "alloptions", "all-logs", "batch", "build", "debug", "experimental", "list-plugins", "list-presets", "list-profiles", "noreport", "quiet", "verify" ) count = ("verbose",) if opt in no_value: return ["--%s" % opt] if opt in count: return ["--%s" % opt for d in range(0, int(val))] return ["--" + opt + "=" + val]
Convert a named option and optional value to command line argument notation, correctly handling options that take no value or that have special representations (e.g. verify and verbose).
10,140
def control_valve_noise_g_2011(m, P1, P2, T1, rho, gamma, MW, Kv, d, Di, t_pipe, Fd, FL, FLP=None, FP=None, rho_pipe=7800.0, c_pipe=5000.0, P_air=101325.0, rho_air=1.2, c_air=343.0, An=-3.8, Stp=0.2, T2=None, beta=0.93): r k = gamma C = Kv_to_Cv(Kv) N14 = 4.6E-3 N16 = 4.89E4 fs = 1.0 P_air_std = 101325.0 if T2 is None: T2 = T1 x = (P1 - P2)/P1 FL_term = FLP/FP if FP is not None else FL P_vc = P1*(1.0 - x/FL_term**2) x_vcc = 1.0 - (2.0/(k + 1.0))**(k/(k - 1.0)) xc = FL_term**2*x_vcc alpha = (1.0 - x_vcc)/(1.0 - xc) xB = 1.0 - 1.0/alpha*(1.0/k)**((k/(k - 1.0))) xCE = 1.0 - 1.0/(22.0*alpha) den = (rho2*c2 + 2.0*pi*t_pipe*fi*rho_pipe*eta_s)/(415.0*Gy) + 1.0 TL_fi = 10.0*log10(8.25E-7*(c2/(t_pipe*fi))**2*Gx/den*P_air_ratio) - dTL if M2 > 0.3: term = LpiSf else: term = Lpif L_pe1m_fi = term + TL_fi - 10.0*log10((Di + 2.0*t_pipe + 2.0)/(Di + 2.0*t_pipe)) L_pe1m_fis.append(L_pe1m_fi) LpAe1m_sum += 10.0**(0.1*(L_pe1m_fi + A_weight)) LpAe1m = 10.0*log10(LpAe1m_sum) return LpAe1m
r'''Calculates the sound made by a gas flowing through a control valve according to the standard IEC 60534-8-3 (2011) [1]_. Parameters ---------- m : float Mass flow rate of gas through the control valve, [kg/s] P1 : float Inlet pressure of the gas before valves and reducers [Pa] P2 : float Outlet pressure of the gas after valves and reducers [Pa] T1 : float Inlet gas temperature, [K] rho : float Density of the gas at the inlet [kg/m^3] gamma : float Specific heat capacity ratio [-] MW : float Molecular weight of the gas [g/mol] Kv : float Metric Kv valve flow coefficient (flow rate of water at a pressure drop of 1 bar) [m^3/hr] d : float Diameter of the valve [m] Di : float Internal diameter of the pipe before and after the valve [m] t_pipe : float Wall thickness of the pipe after the valve, [m] Fd : float Valve style modifier (0.1 to 1; varies tremendously depending on the type of valve and position; do not use the default at all!) [-] FL : float Liquid pressure recovery factor of a control valve without attached fittings (normally 0.8-0.9 at full open and decreasing as opened further to below 0.5; use default very cautiously!) [-] FLP : float, optional Combined liquid pressure recovery factor with piping geometry factor, for a control valve with attached fittings [-] FP : float, optional Piping geometry factor [-] rho_pipe : float, optional Density of the pipe wall material at flowing conditions, [kg/m^3] c_pipe : float, optional Speed of sound of the pipe wall material at flowing conditions, [m/s] P_air : float, optional Pressure of the air surrounding the valve and pipe wall, [Pa] rho_air : float, optional Density of the air surrounding the valve and pipe wall, [kg/m^3] c_air : float, optional Speed of sound of the air surrounding the valve and pipe wall, [m/s] An : float, optional Valve correction factor for acoustic efficiency Stp : float, optional Strouhal number at the peak `fp`; between 0.1 and 0.3 typically, [-] T2 : float, optional Outlet gas temperature; assumed `T1` if not provided (a PH flash should be used to obtain this if possible), [K] beta : float, optional Valve outlet / expander inlet contraction coefficient, [-] Returns ------- LpAe1m : float A weighted sound pressure level 1 m from the pipe wall, 1 m distance dowstream of the valve (at reference sound pressure level 2E-5), [dBA] Notes ----- For formulas see [1]_. This takes on the order of 100 us to compute. For values of `An`, see [1]_. This model was checked against six examples in [1]_; they match to all given decimals. Several additional formulas are given for multihole trim valves, control valves with two or more fixed area stages, and multipath, multistage trim valves. Examples -------- >>> control_valve_noise_g_2011(m=2.22, P1=1E6, P2=7.2E5, T1=450, rho=5.3, ... gamma=1.22, MW=19.8, Kv=77.85, d=0.1, Di=0.2031, FL=None, FLP=0.792, ... FP=0.98, Fd=0.296, t_pipe=0.008, rho_pipe=8000.0, c_pipe=5000.0, ... rho_air=1.293, c_air=343.0, An=-3.8, Stp=0.2) 91.67702674629604 References ---------- .. [1] IEC 60534-8-3 : Industrial-Process Control Valves - Part 8-3: Noise Considerations - Control Valve Aerodynamic Noise Prediction Method."
10,141
def filter(self, func=None, **query): if callable(func): result = filter(func, self) result.insert(0, self.default_columns) return TableFu(result, **self.options) else: result = self for column, value in query.items(): result = result.filter(lambda r: r[column] == value) return result
Tables can be filtered in one of two ways: - Simple keyword arguments return rows where values match *exactly* - Pass in a function and return rows where that function evaluates to True In either case, a new TableFu instance is returned
10,142
def getElements(self, zero_based=True, pared=False): points = self._points[:] elements = self._elements[:] offset = 0 if not zero_based: offset = 1 np = None if pared: np = NodePare() np.addPoints(points) np.parePoints() if pared or not zero_based: modified_elements = [] for element in elements: modified_element = [index + offset if np is None else np.getParedIndex(index) + offset for index in element] modified_elements.append(modified_element) elements = modified_elements return elements
Get the elements of the mesh as a list of point index list. :param zero_based: use zero based index of points if true otherwise use 1-based index of points. :param pared: use the pared down list of points :return: A list of point index lists
10,143
def get_bond(iface): * path = os.path.join(_DEB_NETWORK_CONF_FILES, .format(iface)) return _read_file(path)
Return the content of a bond script CLI Example: .. code-block:: bash salt '*' ip.get_bond bond0
10,144
def update(self, date_expiry=values.unset, ttl=values.unset, mode=values.unset, status=values.unset, participants=values.unset): return self._proxy.update( date_expiry=date_expiry, ttl=ttl, mode=mode, status=status, participants=participants, )
Update the SessionInstance :param datetime date_expiry: The ISO 8601 date when the Session should expire :param unicode ttl: When the session will expire :param SessionInstance.Mode mode: The Mode of the Session :param SessionInstance.Status status: The new status of the resource :param dict participants: The Participant objects to include in the session :returns: Updated SessionInstance :rtype: twilio.rest.proxy.v1.service.session.SessionInstance
10,145
def dok15_s(k15): A, B = design(15) sbar = np.dot(B, k15) t = (sbar[0] + sbar[1] + sbar[2]) bulk = old_div(t, 3.) Kbar = np.dot(A, sbar) dels = k15 - Kbar dels, sbar = old_div(dels, t), old_div(sbar, t) So = sum(dels**2) sigma = np.sqrt(old_div(So, 9.)) return sbar, sigma, bulk
calculates least-squares matrix for 15 measurements from Jelinek [1976]
10,146
def register(self, type): def _decorator(func): if isinstance(type, tuple): for t in type: self.func_registry[t] = func else: self.func_registry[type] = func return func return _decorator
Registers a custom formatting function with ub.repr2
10,147
def apply_rotation_scheme(self, backups_by_frequency, most_recent_backup): if not self.rotation_scheme: raise ValueError("Refusing to use empty rotation scheme! (all backups would be deleted)") for frequency, backups in backups_by_frequency.items(): if frequency not in self.rotation_scheme: backups.clear() else: for period, backups_in_period in backups.items(): index = -1 if self.prefer_recent else 0 selected_backup = sorted(backups_in_period)[index] backups[period] = [selected_backup] retention_period = self.rotation_scheme[frequency] if retention_period != : if self.strict: minimum_date = most_recent_backup - SUPPORTED_FREQUENCIES[frequency] * retention_period for period, backups_in_period in list(backups.items()): for backup in backups_in_period: if backup.timestamp < minimum_date: backups_in_period.remove(backup) if not backups_in_period: backups.pop(period) items_to_preserve = sorted(backups.items())[-retention_period:] backups_by_frequency[frequency] = dict(items_to_preserve)
Apply the user defined rotation scheme to the result of :func:`group_backups()`. :param backups_by_frequency: A :class:`dict` in the format generated by :func:`group_backups()`. :param most_recent_backup: The :class:`~datetime.datetime` of the most recent backup. :raises: :exc:`~exceptions.ValueError` when the rotation scheme dictionary is empty (this would cause all backups to be deleted). .. note:: This method mutates the given data structure by removing all backups that should be removed to apply the user defined rotation scheme.
10,148
def ExportMigrations(): from django.db.migrations.executor import MigrationExecutor if in connections and ( type(connections[]) == DatabaseWrapper): return for alias in connections.databases: executor = MigrationExecutor(connections[alias]) ExportMigrationsForDatabase(alias, executor)
Exports counts of unapplied migrations. This is meant to be called during app startup, ideally by django_prometheus.apps.AppConfig.
10,149
def wake_lock_size(self): output = self.adb_shell(WAKE_LOCK_SIZE_CMD) if not output: return None return int(output.split("=")[1].strip())
Get the size of the current wake lock.
10,150
def increment_slug(s): slug_parts = s.split() try: slug_parts.append() return .join(slug_parts)
Generate next slug for a series. Some docstore types will use slugs (see above) as document ids. To support unique ids, we'll serialize them as follows: TestUserA/my-test TestUserA/my-test-2 TestUserA/my-test-3 ...
10,151
def main(): table_data = [ [, ], ] table = SingleTable(table_data) max_width = table.column_max_width(1) wrapped_string = .join(wrap(LONG_STRING, max_width)) table.table_data[0][1] = wrapped_string print(table.table)
Main function.
10,152
def get_tag(self, tagname, tagidx): return % (tagname, decode(getattr(self, tagname)[tagidx]))
:returns: the tag associated to the given tagname and tag index
10,153
def _GetBytes(partition_key): if isinstance(partition_key, six.string_types): return bytearray(partition_key, encoding=) else: raise ValueError("Unsupported " + str(type(partition_key)) + " for partitionKey.")
Gets the bytes representing the value of the partition key.
10,154
def print_version(ctx, value): if not value: return import pkg_resources version = None try: version = pkg_resources.get_distribution().version finally: del pkg_resources click.echo(version) ctx.exit()
Print the current version of sandman and exit.
10,155
def bust_self(self, obj): if self.func.__name__ in obj.__dict__: delattr(obj, self.func.__name__)
Remove the value that is being stored on `obj` for this :class:`.cached_property` object. :param obj: The instance on which to bust the cache.
10,156
def load_template_source(self, template_name, template_dirs=None): raise TemplateDoesNotExist(template_name)
Template loader that loads templates from zipped modules.
10,157
def has_ended(self): assessment_offered = self.get_assessment_offered() now = DateTime.utcnow() else: return False
Tests if this assessment has ended. return: (boolean) - ``true`` if the assessment has ended, ``false`` otherwise *compliance: mandatory -- This method must be implemented.*
10,158
def p_expr_number(p): "number : NUMBER" p[0] = node.number(p[1], lineno=p.lineno(1), lexpos=p.lexpos(1))
number : NUMBER
10,159
def config(self): if self._config: return self._config else: self._config = p_config.ProsperConfig(self.config_path) return self._config
uses "global config" for cfg
10,160
def _SnakeCaseToCamelCase(path_name): result = [] after_underscore = False for c in path_name: if c.isupper(): raise Error( .format(path_name)) if after_underscore: if c.islower(): result.append(c.upper()) after_underscore = False else: raise Error( .format(path_name)) elif c == : after_underscore = True else: result += c if after_underscore: raise Error( .format(path_name)) return .join(result)
Converts a path name from snake_case to camelCase.
10,161
def Rsky(self): return np.sqrt(self.position.x**2 + self.position.y**2)
Projected sky separation of stars
10,162
def bbox(self): if not hasattr(self, ): self._bbox = extract_bbox(self) return self._bbox
(left, top, right, bottom) tuple.
10,163
def do_array(self, parent=None, ident=0): log_debug("[array]", ident) _, classdesc = self._read_and_exec_opcode( ident=ident + 1, expect=( self.TC_CLASSDESC, self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE, ), ) array = JavaArray(classdesc) self._add_reference(array, ident) (size,) = self._readStruct(">i") log_debug("size: {0}".format(size), ident) type_char = classdesc.name[0] assert type_char == self.TYPE_ARRAY type_char = classdesc.name[1] if type_char == self.TYPE_OBJECT or type_char == self.TYPE_ARRAY: for _ in range(size): _, res = self._read_and_exec_opcode(ident=ident + 1) log_debug("Object value: {0}".format(res), ident) array.append(res) elif type_char == self.TYPE_BYTE: array = JavaByteArray(self.object_stream.read(size), classdesc) elif self.use_numpy_arrays: import numpy array = numpy.fromfile( self.object_stream, dtype=JavaObjectConstants.NUMPY_TYPE_MAP[type_char], count=size, ) else: for _ in range(size): res = self._read_value(type_char, ident) log_debug("Native value: {0}".format(repr(res)), ident) array.append(res) return array
Handles a TC_ARRAY opcode :param parent: :param ident: Log indentation level :return: A list of deserialized objects
10,164
def users(self): result = self.db.read("", {"q": "ls"}) if result is None or result.json() is None: return [] users = [] for u in result.json(): usr = self(u["name"]) usr.metadata = u users.append(usr) return users
Returns the list of users in the database
10,165
def _default_commands(self): commands = [c() for c in find_commands(Command)] for ep in pkg_resources.iter_entry_points( group="enaml_native_command"): c = ep.load() if not issubclass(c, Command): print("Warning: entry point {} did not return a valid enaml " "cli command! This command will be ignored!".format( ep.name)) commands.append(c()) return commands
Build the list of CLI commands by finding subclasses of the Command class Also allows commands to be installed using the "enaml_native_command" entry point. This entry point should return a Command subclass
10,166
def expand_branch_name(self, name): if not name: return self.default_revision unambiguous_name = prefix + name logger.debug("Branch name %r matches remote branch %r.", name, unambiguous_name) return unambiguous_name logger.debug("Failed to expand branch name %r.", name) return name
Expand branch names to their unambiguous form. :param name: The name of a local or remote branch (a string). :returns: The unambiguous form of the branch name (a string). This internal method is used by methods like :func:`find_revision_id()` and :func:`find_revision_number()` to detect and expand remote branch names into their unambiguous form which is accepted by commands like ``git rev-parse`` and ``git rev-list --count``.
10,167
def forceValue(self, newVal, noteEdited=False): if newVal is None: newVal = "" self.choice.set(newVal) if noteEdited: self.widgetEdited(val=newVal, skipDups=False)
Force-set a parameter entry to the given value
10,168
def list_connections(self, status=None): if status is None: status = response, status_code = self.__pod__.Connection.get_v1_connection_list( sessionToken=self.__session__, status=status ).result() self.logger.debug( % (status_code, response)) return status_code, response
list connections
10,169
def get_forced_variation(self, experiment, user_id): forced_variations = experiment.forcedVariations if forced_variations and user_id in forced_variations: variation_key = forced_variations.get(user_id) variation = self.config.get_variation_from_key(experiment.key, variation_key) if variation: self.logger.info( % (user_id, variation_key)) return variation return None
Determine if a user is forced into a variation for the given experiment and return that variation. Args: experiment: Object representing the experiment for which user is to be bucketed. user_id: ID for the user. Returns: Variation in which the user with ID user_id is forced into. None if no variation.
10,170
def _next_server(self): if self.options["dont_randomize"]: server = self._server_pool.pop(0) self._server_pool.append(server) else: shuffle(self._server_pool) s = None for server in self._server_pool: if self.options["max_reconnect_attempts"] > 0 and ( server.reconnects > self.options["max_reconnect_attempts"]): continue else: s = server return s
Chooses next available server to connect.
10,171
def substring(ctx, full, start, length): full = next(string_arg(ctx, full), ) start = int(next(to_number(start))) length = int(next(to_number(length))) yield full[start-1:start-1+length]
Yields one string
10,172
def get_solvers(self, refresh=False, order_by=, **filters): def covers_op(prop, val): if not isinstance(prop, (list, tuple)) or not len(prop) == 2: raise ValueError("2-element list/tuple range required for LHS value") llo, lhi = min(prop), max(prop) if isinstance(val, (list, tuple)) and len(val) == 2: rlo, rhi = min(val), max(val) return llo <= rlo and lhi >= rhi else: return llo <= val <= lhi def within_op(prop, val): try: return covers_op(val, prop) except ValueError: raise ValueError("2-element list/tuple range required for RHS value") def _set(iterable): first = next(iter(iterable)) if isinstance(first, list): return set(tuple(x) for x in iterable) return set(iterable) def with_valid_lhs(op): @wraps(op) def _wrapper(prop, val): if prop is None: return False return op(prop, val) return _wrapper ops = { : with_valid_lhs(operator.lt), : with_valid_lhs(operator.le), : with_valid_lhs(operator.gt), : with_valid_lhs(operator.ge), : operator.eq, : lambda prop, val: prop is not None if val else prop is None, : with_valid_lhs(lambda prop, val: re.match("^{}$".format(val), prop)), : with_valid_lhs(covers_op), : with_valid_lhs(within_op), : lambda prop, val: prop in val, : with_valid_lhs(lambda prop, val: val in prop), : with_valid_lhs(lambda prop, val: _set(prop).issubset(_set(val))), : with_valid_lhs(lambda prop, val: _set(prop).issuperset(_set(val))), } def predicate(solver, query, val): assert len(query) >= 1 potential_path, potential_op_name = query[:-1], query[-1] if potential_op_name in ops: op_name = potential_op_name else: op_name = None potential_path = query path = .join(potential_path) if path in solver.derived_properties: op = ops[op_name or ] return op(getattr(solver, path), val) elif pluck(solver.parameters, path, None) is not None: op = ops[op_name or ] return op(pluck(solver.parameters, path), val) elif pluck(solver.properties, path, None) is not None: op = ops[op_name or ] return op(pluck(solver.properties, path), val) else: op = ops[op_name or ] return op(None, val) sort_reverse = False if not order_by: sort_key = None elif isinstance(order_by, six.string_types): if order_by[0] == : sort_reverse = True order_by = order_by[1:] if not order_by: sort_key = None else: sort_key = lambda solver: pluck(solver, order_by, None) elif callable(order_by): sort_key = order_by else: raise TypeError("expected string or callable for ") filters.setdefault(, True) predicates = [] for lhs, val in filters.items(): query = lhs.split() predicates.append(partial(predicate, query=query, val=val)) _LOGGER.debug("Filtering solvers with predicates=%r", predicates) query = dict(refresh_=refresh) if in filters: query[] = filters[] if in filters: query[] = filters[] solvers = self._fetch_solvers(**query) solvers = [s for s in solvers if all(p(s) for p in predicates)] if sort_key is not None: solvers_with_keys = [(sort_key(solver), solver) for solver in solvers] solvers_with_invalid_keys = [(key, solver) for key, solver in solvers_with_keys if key is None] solvers_with_valid_keys = [(key, solver) for key, solver in solvers_with_keys if key is not None] solvers_with_valid_keys.sort(key=operator.itemgetter(0)) solvers = [solver for key, solver in chain(solvers_with_valid_keys, solvers_with_invalid_keys)] if sort_reverse: solvers.reverse() return solvers
Return a filtered list of solvers handled by this client. Args: refresh (bool, default=False): Force refresh of cached list of solvers/properties. order_by (callable/str/None, default='avg_load'): Solver sorting key function (or :class:`Solver` attribute/item dot-separated path). By default, solvers are sorted by average load. To explicitly not sort the solvers (and use the API-returned order), set ``order_by=None``. Signature of the `key` `callable` is:: key :: (Solver s, Ord k) => s -> k Basic structure of the `key` string path is:: "-"? (attr|item) ( "." (attr|item) )* For example, to use solver property named ``max_anneal_schedule_points``, available in ``Solver.properties`` dict, you can either specify a callable `key`:: key=lambda solver: solver.properties['max_anneal_schedule_points'] or, you can use a short string path based key:: key='properties.max_anneal_schedule_points' Solver derived properties, available as :class:`Solver` properties can also be used (e.g. ``num_active_qubits``, ``online``, ``avg_load``, etc). Ascending sort order is implied, unless the key string path does not start with ``-``, in which case descending sort is used. Note: the sort used for ordering solvers by `key` is **stable**, meaning that if multiple solvers have the same value for the key, their relative order is preserved, and effectively they are in the same order as returned by the API. Note: solvers with ``None`` for key appear last in the list of solvers. When providing a key callable, ensure all values returned are of the same type (particularly in Python 3). For solvers with undefined key value, return ``None``. **filters: See `Filtering forms` and `Operators` below. Solver filters are defined, similarly to Django QuerySet filters, with keyword arguments of form `<key1>__...__<keyN>[__<operator>]=<value>`. Each `<operator>` is a predicate (boolean) function that acts on two arguments: value of feature `<name>` (described with keys path `<key1.key2...keyN>`) and the required `<value>`. Feature `<name>` can be: 1) a derived solver property, available as an identically named :class:`Solver`'s property (`name`, `qpu`, `software`, `online`, `num_active_qubits`, `avg_load`) 2) a solver parameter, available in :obj:`Solver.parameters` 3) a solver property, available in :obj:`Solver.properties` 4) a path describing a property in nested dictionaries Filtering forms are: * <derived_property>__<operator> (object <value>) * <derived_property> (bool) This form ensures the value of solver's property bound to `derived_property`, after applying `operator` equals the `value`. The default operator is `eq`. For example:: >>> client.get_solvers(avg_load__gt=0.5) but also:: >>> client.get_solvers(online=True) >>> # identical to: >>> client.get_solvers(online__eq=True) * <parameter>__<operator> (object <value>) * <parameter> (bool) This form ensures that the solver supports `parameter`. General operator form can be used but usually does not make sense for parameters, since values are human-readable descriptions. The default operator is `available`. Example:: >>> client.get_solvers(flux_biases=True) >>> # identical to: >>> client.get_solvers(flux_biases__available=True) * <property>__<operator> (object <value>) * <property> (bool) This form ensures the value of the solver's `property`, after applying `operator` equals the righthand side `value`. The default operator is `eq`. Note: if a non-existing parameter/property name/key given, the default operator is `eq`. Operators are: * `available` (<name>: str, <value>: bool): Test availability of <name> feature. * `eq`, `lt`, `lte`, `gt`, `gte` (<name>: str, <value>: any): Standard relational operators that compare feature <name> value with <value>. * `regex` (<name>: str, <value>: str): Test regular expression matching feature value. * `covers` (<name>: str, <value>: single value or range expressed as 2-tuple/list): Test feature <name> value (which should be a *range*) covers a given value or a subrange. * `within` (<name>: str, <value>: range expressed as 2-tuple/list): Test feature <name> value (which can be a *single value* or a *range*) is within a given range. * `in` (<name>: str, <value>: container type): Test feature <name> value is *in* <value> container. * `contains` (<name>: str, <value>: any): Test feature <name> value (container type) *contains* <value>. * `issubset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a subset of <value>. * `issuperset` (<name>: str, <value>: container type): Test feature <name> value (container type) is a superset of <value>. Derived properies are: * `name` (str): Solver name/id. * `qpu` (bool): Is solver QPU based? * `software` (bool): Is solver software based? * `online` (bool, default=True): Is solver online? * `num_active_qubits` (int): Number of active qubits. Less then or equal to `num_qubits`. * `avg_load` (float): Solver's average load (similar to Unix load average). Common solver parameters are: * `flux_biases`: Should solver accept flux biases? * `anneal_schedule`: Should solver accept anneal schedule? Common solver properties are: * `num_qubits` (int): Number of qubits available. * `vfyc` (bool): Should solver work on "virtual full-yield chip"? * `max_anneal_schedule_points` (int): Piecewise linear annealing schedule points. * `h_range` ([int,int]), j_range ([int,int]): Biases/couplings values range. * `num_reads_range` ([int,int]): Range of allowed values for `num_reads` parameter. Returns: list[Solver]: List of all solvers that satisfy the conditions. Note: Client subclasses (e.g. :class:`dwave.cloud.qpu.Client` or :class:`dwave.cloud.sw.Client`) already filter solvers by resource type, so for `qpu` and `software` filters to have effect, call :meth:`.get_solvers` on base class :class:`~dwave.cloud.client.Client`. Examples:: client.get_solvers( num_qubits__gt=2000, # we need more than 2000 qubits num_qubits__lt=4000, # ... but fewer than 4000 qubits num_qubits__within=(2000, 4000), # an alternative to the previous two lines num_active_qubits=1089, # we want a particular number of active qubits vfyc=True, # we require a fully yielded Chimera vfyc__in=[False, None], # inverse of the previous filter vfyc__available=False, # we want solvers that do not advertize the vfyc property anneal_schedule=True, # we need support for custom anneal schedule max_anneal_schedule_points__gte=4, # we need at least 4 points for our anneal schedule num_reads_range__covers=1000, # our solver must support returning 1000 reads extended_j_range__covers=[-2, 2], # we need extended J range to contain subrange [-2,2] couplings__contains=[0, 128], # coupling (edge between) qubits (0,128) must exist couplings__issuperset=[[0,128], [0,4]], # two couplings required: (0,128) and (0,4) qubits__issuperset={0, 4, 215}, # qubits 0, 4 and 215 must exist supported_problem_types__issubset={'ising', 'qubo'}, # require Ising, QUBO or both to be supported name='DW_2000Q_3', # full solver name/ID match name__regex='.*2000.*', # partial/regex-based solver name match chip_id__regex='DW_.*', # chip ID prefix must be DW_ topology__type__eq="chimera" # topology.type must be chimera )
10,173
def atan(x): if isinstance(x, UncertainFunction): mcpts = np.arctan(x._mcpts) return UncertainFunction(mcpts) else: return np.arctan(x)
Inverse tangent
10,174
def run_cmd(cmd, echo=False, fail_silently=False, **kwargs): r out, err = None, None if echo: cmd_str = cmd if isinstance(cmd, string_types) else .join(cmd) kwargs[], kwargs[] = sys.stdout, sys.stderr print_message(.format(cmd_str)) else: out, err = get_temp_streams() kwargs[], kwargs[] = out, err try: retcode = subprocess.call(cmd, **kwargs) except subprocess.CalledProcessError as err: if fail_silently: return False print_error(str(err) if IS_PY3 else unicode(err)) finally: if out: out.close() if err: err.close() if retcode and echo and not fail_silently: print_error(. format(cmd_str, retcode)) return retcode
r"""Call given command with ``subprocess.call`` function. :param cmd: Command to run. :type cmd: tuple or str :param echo: If enabled show command to call and its output in STDOUT, otherwise hide all output. By default: False :param fail_silently: Do not raise exception on error. By default: False :param \*\*kwargs: Additional keyword arguments to be passed to ``subprocess.call`` function. STDOUT and STDERR streams would be setup inside of function to ensure hiding command output in case of disabling ``echo``.
10,175
def from_json(cls, data): assert in data, assert in data, assert in data, collection = cls(Header.from_json(data[]), data[], [DateTime.from_json(dat) for dat in data[]]) if in data: collection._validated_a_period = data[] return collection
Create a Data Collection from a dictionary. Args: { "header": A Ladybug Header, "values": An array of values, "datetimes": An array of datetimes, "validated_a_period": Boolean for whether header analysis_period is valid }
10,176
def ensure_directory(path): dirname = os.path.dirname(path) if not os.path.exists(dirname): os.makedirs(dirname)
Ensure directory exists for a given file path.
10,177
def extras_to_string(extras): if isinstance(extras, six.string_types): if extras.startswith("["): return extras else: extras = [extras] if not extras: return "" return "[{0}]".format(",".join(sorted(set(extras))))
Turn a list of extras into a string
10,178
def convert_body_to_unicode(resp): if type(resp) is not dict: return _convert_string_to_unicode(resp) else: body = resp.get() if body is not None: try: body[] = _convert_string_to_unicode( body[] ) except (KeyError, TypeError, AttributeError): resp[] = _convert_string_to_unicode(body) return resp
If the request or responses body is bytes, decode it to a string (for python3 support)
10,179
def process_word(word: str, to_lower: bool = False, append_case: Optional[str] = None) -> Tuple[str]: if all(x.isupper() for x in word) and len(word) > 1: uppercase = "<ALL_UPPER>" elif word[0].isupper(): uppercase = "<FIRST_UPPER>" else: uppercase = None if to_lower: word = word.lower() if word.isdigit(): answer = ["<DIGIT>"] elif word.startswith("http://") or word.startswith("www."): answer = ["<HTTP>"] else: answer = list(word) if to_lower and uppercase is not None: if append_case == "first": answer = [uppercase] + answer elif append_case == "last": answer = answer + [uppercase] return tuple(answer)
Converts word to a tuple of symbols, optionally converts it to lowercase and adds capitalization label. Args: word: input word to_lower: whether to lowercase append_case: whether to add case mark ('<FIRST_UPPER>' for first capital and '<ALL_UPPER>' for all caps) Returns: a preprocessed word
10,180
def template_sunmoon(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy[] = kwargs.get(, self.dataset(**kwargs)) kwargs_copy[] = kwargs.get( , self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.templatesunmoon_format.format(**kwargs_copy) if kwargs.get(, False): return self.fullpath(localpath=localpath) return localpath
return the file name for sun or moon template files
10,181
def _preserve_settings(method: T.Callable) -> T.Callable: @functools.wraps(method) def _wrapper( old: "ObservableProperty", handler: T.Callable ) -> "ObservableProperty": new = method(old, handler) new.event = old.event new.observable = old.observable return new return _wrapper
Decorator that ensures ObservableProperty-specific attributes are kept when using methods to change deleter, getter or setter.
10,182
def run(self): self.setup() if self.detach: self.econtext.detach() try: return self._run() finally: self.revert()
Set up the process environment in preparation for running an Ansible module. This monkey-patches the Ansible libraries in various places to prevent it from trying to kill the process on completion, and to prevent it from reading sys.stdin. :returns: Module result dictionary.
10,183
def path_to_text(self, path): rsrcmgr = PDFResourceManager() retstr = StringIO() codec = laparams = LAParams() device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams) fp = open(path, ) interpreter = PDFPageInterpreter(rsrcmgr, device) password = "" maxpages = 0 caching = True pagenos = set() pages_data = PDFPage.get_pages( fp, pagenos, maxpages=maxpages, password=password, caching=caching, check_extractable=True ) for page in pages_data: interpreter.process_page(page) text = retstr.getvalue() text = text.replace("\n", "") fp.close() device.close() retstr.close() return text
Transform local PDF file to string. Args: path: path to PDF file. Returns: string.
10,184
def dropwhile(self, func=None): func = _make_callable(func) return Collection(dropwhile(func, self._items))
Return a new Collection with the first few items removed. Parameters: func : function(Node) -> Node Returns: A new Collection, discarding all items before the first item where bool(func(item)) == True
10,185
def apply_tfa_magseries(lcfile, timecol, magcol, errcol, templateinfo, mintemplatedist_arcmin=10.0, lcformat=, lcformatdir=None, interp=, sigclip=5.0): ve stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that try: formatinfo = get_lcformat(lcformat, use_lcformat_dir=lcformatdir) if formatinfo: (dfileglob, readerfunc, dtimecols, dmagcols, derrcols, magsarefluxes, normfunc) = formatinfo else: LOGERROR("cant figure out the light curve format") return None if isinstance(templateinfo,str) and os.path.exists(templateinfo): with open(templateinfo,) as infd: templateinfo = pickle.load(infd) lcdict = readerfunc(lcfile) if ((isinstance(lcdict, (tuple, list))) and isinstance(lcdict[0], dict)): lcdict = lcdict[0] objectid = lcdict[] tmagseries = templateinfo[magcol][ ][::] if objectid in templateinfo[magcol][]: LOGWARNING( % objectid) templateind = templateinfo[magcol][] == objectid lcdict[] = outdict outfile = os.path.join( os.path.dirname(lcfile), % ( squeeze(objectid).replace(,), magcol ) ) with open(outfile,) as outfd: pickle.dump(lcdict, outfd, pickle.HIGHEST_PROTOCOL) return outfile
This applies the TFA correction to an LC given TFA template information. Parameters ---------- lcfile : str This is the light curve file to apply the TFA correction to. timecol,magcol,errcol : str These are the column keys in the lcdict for the LC file to apply the TFA correction to. templateinfo : dict or str This is either the dict produced by `tfa_templates_lclist` or the pickle produced by the same function. mintemplatedist_arcmin : float This sets the minimum distance required from the target object for objects in the TFA template ensemble. Objects closer than this distance will be removed from the ensemble. lcformat : str This is the `formatkey` associated with your light curve format, which you previously passed in to the `lcproc.register_lcformat` function. This will be used to look up how to find and read the light curves specified in `basedir` or `use_list_of_filenames`. lcformatdir : str or None If this is provided, gives the path to a directory when you've stored your lcformat description JSONs, other than the usual directories lcproc knows to search for them in. Use this along with `lcformat` to specify an LC format JSON file that's not currently registered with lcproc. interp : str This is passed to scipy.interpolate.interp1d as the kind of interpolation to use when reforming this light curve to the timebase of the TFA templates. sigclip : float or sequence of two floats or None This is the sigma clip to apply to this light curve before running TFA on it. Returns ------- str This returns the filename of the light curve file generated after TFA applications. This is a pickle (that can be read by `lcproc.read_pklc`) in the same directory as `lcfile`. The `magcol` will be encoded in the filename, so each `magcol` in `lcfile` gets its own output file.
10,186
def delete_expired_requests(): InclusionRequest.query.filter_by( InclusionRequest.expiry_date > datetime.utcnow()).delete() db.session.commit()
Delete expired inclusion requests.
10,187
def get_user(self, username="~"): url = self._build_url("users/%s/" % username, _prepend_namespace=False) response = self._get(url) check_response(response) return response
get info about user (if no user specified, use the one initiating request) :param username: str, name of user to get info about, default="~" :return: dict
10,188
def from_name(cls, name): result = cls.list({: 500}) webaccs = {} for webacc in result: webaccs[webacc[]] = webacc[] return webaccs.get(name)
Retrieve webacc id associated to a webacc name.
10,189
def switch_opt(default, shortname, help_msg): return ConfOpt(bool(default), True, shortname, dict(action=internal.Switch), True, help_msg, None)
Define a switchable ConfOpt. This creates a boolean option. If you use it in your CLI, it can be switched on and off by prepending + or - to its name: +opt / -opt. Args: default (bool): the default value of the swith option. shortname (str): short name of the option, no shortname will be used if it is set to None. help_msg (str): short description of the option. Returns: :class:`~loam.manager.ConfOpt`: a configuration option with the given properties.
10,190
def set_working_dir(self, working_dir): yield from self.send(.format(working_dir)) self._working_dir = working_dir log.debug("Working directory set to {}".format(self._working_dir))
Sets the working directory for this hypervisor. :param working_dir: path to the working directory
10,191
def run(cls, raw_data): logger.debug("{}.ReceivedFromKafka: {}".format( cls.__name__, raw_data )) try: kmsg = cls._onmessage(cls.TRANSPORT.loads(raw_data)) except Exception as exc: logger.error( "{}.ImportError: Failed to load data from kafka: {}".format( cls.__name__, exc ), extra=dict(kafka_raw_data=raw_data) ) return Result.from_exception(exc) try: cls.start_processing(kmsg) if kmsg.entrypoint not in cls.ENTRYPOINTS: raise ValidationError( "Entrypoint not registred".format(kmsg.entrypoint), extra=dict( uuid=kmsg.uuid, entrypoint=kmsg.entrypoint, allowed=list(cls.ENTRYPOINTS.keys()) ) ) result = cls.ENTRYPOINTS[kmsg.entrypoint].from_Message( kmsg ).execute() except Exception as exc: result = Result.from_exception(exc, kmsg.uuid) finally: cls.stop_processing() if result and result.retcode < 300: return cls._onsuccess(kmsg=kmsg, result=result) else: return cls._onerror(kmsg=kmsg, result=result)
description of run
10,192
def user_show(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/users api_path = "/api/v2/users/{id}.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/users#show-user
10,193
def parse(cls, xml_path): parser = etree.XMLParser(target=cls.xml_parse()) return etree.parse(xml_path, parser)
Parses an xml_path with the inherited xml parser :param xml_path: :return:
10,194
def stop_plugins(watcher_plugin, health_plugin): logging.debug("Stopping health-check monitor...") health_plugin.stop() logging.debug("Stopping config change observer...") watcher_plugin.stop()
Stops all plugins.
10,195
def wind_speed_hub(self, weather_df): r if self.power_plant.hub_height in weather_df[]: wind_speed_hub = weather_df[][ self.power_plant.hub_height] elif self.wind_speed_model == : logging.debug( ) closest_height = weather_df[].columns[ min(range(len(weather_df[].columns)), key=lambda i: abs(weather_df[].columns[i] - self.power_plant.hub_height))] wind_speed_hub = wind_speed.logarithmic_profile( weather_df[][closest_height], closest_height, self.power_plant.hub_height, weather_df[].iloc[:, 0], self.obstacle_height) elif self.wind_speed_model == : logging.debug() closest_height = weather_df[].columns[ min(range(len(weather_df[].columns)), key=lambda i: abs(weather_df[].columns[i] - self.power_plant.hub_height))] wind_speed_hub = wind_speed.hellman( weather_df[][closest_height], closest_height, self.power_plant.hub_height, weather_df[].iloc[:, 0], self.hellman_exp) elif self.wind_speed_model == : logging.debug( ) wind_speed_hub = tools.linear_interpolation_extrapolation( weather_df[], self.power_plant.hub_height) elif self.wind_speed_model == : logging.debug( ) wind_speed_hub = tools.logarithmic_interpolation_extrapolation( weather_df[], self.power_plant.hub_height) else: raise ValueError(" is an invalid value. ".format( self.wind_speed_model) + "`wind_speed_model` must be " ", , " + "or .") return wind_speed_hub
r""" Calculates the wind speed at hub height. The method specified by the parameter `wind_speed_model` is used. Parameters ---------- weather_df : pandas.DataFrame DataFrame with time series for wind speed `wind_speed` in m/s and roughness length `roughness_length` in m. The columns of the DataFrame are a MultiIndex where the first level contains the variable name (e.g. wind_speed) and the second level contains the height at which it applies (e.g. 10, if it was measured at a height of 10 m). See documentation of :func:`ModelChain.run_model` for an example on how to create the weather_df DataFrame. Returns ------- wind_speed_hub : pandas.Series or numpy.array Wind speed in m/s at hub height. Notes ----- If `weather_df` contains wind speeds at different heights the given wind speed(s) closest to the hub height are used.
10,196
def decompose(self, noise=False, verbosity=0, logic=, **kwargs): matrix = self.get_dm(noise) est_scale = None if self._pruning_option == options.PRUNING_NONE: kp = len(matrix) - 1 mask = np.ones(matrix.shape, dtype=bool) elif self._pruning_option == options.PRUNING_MANUAL: kp = self._manual_pruning mask = kmask(matrix, self._manual_pruning, logic=logic) elif self._pruning_option == options.PRUNING_ESTIMATE: kp, mask, est_scale = binsearch_mask(matrix, logic=logic) else: raise ValueError("Unexpected error: not set") if self._scale_option == options.LOCAL_SCALE_MEDIAN: dist = np.median(matrix, axis=1) scale = np.outer(dist, dist) elif self._scale_option == options.LOCAL_SCALE_MANUAL: scale = kscale(matrix, self._manual_scale) elif self._scale_option == options.LOCAL_SCALE_ESTIMATE: if est_scale is None: _, _, scale = binsearch_mask(matrix, logic=logic) else: scale = est_scale else: raise ValueError("Unexpected error: not set") if not (scale > 1e-5).all(): if verbosity > 0: print() _, _, scale = binsearch_mask(matrix, logic=logic) assert (scale > 1e-5).all() aff = affinity(matrix, mask, scale) aff.flat[::len(aff)+1] = 1.0 return aff
Use prune to remove links between distant points: prune is None: no pruning prune={int > 0}: prunes links beyond `prune` nearest neighbours prune='estimate': searches for the smallest value that retains a fully connected graph
10,197
def _find_classes_param(self): for attr in ["classes_"]: try: return getattr(self.estimator, attr) except AttributeError: continue raise YellowbrickTypeError( "could not find classes_ param on {}".format( self.estimator.__class__.__name__ ) )
Searches the wrapped model for the classes_ parameter.
10,198
def stop(self): if self.__end.is_set(): return self.__end.set() self.__send_retry_requests_timer.cancel() self.__threadpool.stop() self.__crud_threadpool.stop() self.__amqplink.stop() self.__network_retry_thread.join() with self.__requests: shutdown = LinkShutdownException() for req in self.__requests.values(): req.exception = shutdown req._set() self.__clear_references(req, remove_request=False) if self.__requests: logger.warning(, len(self.__requests)) self.__requests.clear() self.__network_retry_thread = None self.__network_retry_queue = None self.__container_params = None
Stop the Client, disconnect from queue
10,199
def execute_action(self, agent, action): if action == : agent.location = loc_B agent.performance -= 1 elif action == : agent.location = loc_A agent.performance -= 1 elif action == : if self.status[agent.location] == : agent.performance += 10 self.status[agent.location] =
Change agent's location and/or location's status; track performance. Score 10 for each dirt cleaned; -1 for each move.