Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
21,000
def tarball_context(url, target_dir=None, runner=None, pushd=pushd): if target_dir is None: target_dir = os.path.basename(url).replace(, ).replace( , ) if runner is None: runner = functools.partial(subprocess.check_call, shell=True) runner(.format(**vars())) try: getter = extract = cmd = .join((getter, extract)) runner(cmd.format(compression=infer_compression(url), **vars())) with pushd(target_dir): yield target_dir finally: runner(.format(**vars()))
Get a tarball, extract it, change to that directory, yield, then clean up. `runner` is the function to invoke commands. `pushd` is a context manager for changing the directory.
21,001
def delete_external_feed_groups(self, group_id, external_feed_id): path = {} data = {} params = {} path["group_id"] = group_id path["external_feed_id"] = external_feed_id self.logger.debug("DELETE /api/v1/groups/{group_id}/external_feeds/{external_feed_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/groups/{group_id}/external_feeds/{external_feed_id}".format(**path), data=data, params=params, single_item=True)
Delete an external feed. Deletes the external feed.
21,002
def get_device_hybrid_interfaces(auth, url, devid=None, devip=None): if devip is not None: devid = get_dev_details(devip, auth, url)[] get_hybrid_interface_vlan_url = "/imcrs/vlan/hybrid?devId=" + str(devid) + \ "&start=1&size=500&total=false" f_url = url + get_hybrid_interface_vlan_url response = requests.get(f_url, auth=auth, headers=HEADERS) try: if response.status_code == 200: dev_hybrid_interfaces = (json.loads(response.text)) if len(dev_hybrid_interfaces) == 2: dev_hybrid = dev_hybrid_interfaces[] if isinstance(dev_hybrid, dict): dev_hybrid = [dev_hybrid] return dev_hybrid else: dev_hybrid_interfaces[] = ["No hybrid inteface"] return dev_hybrid_interfaces[] except requests.exceptions.RequestException as error: return "Error:\n" + str(error) + " get_device_hybrid_interfaces: An Error has occured"
Function takes devId as input to RESTFUL call to HP IMC platform :param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class :param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass :param devid: str requires devid of the target device :param devip: str of ipv4 address of the target device :return: list of dictionaries where each element of the list represents an interface which has been configured as a VLAN access port :rtype: list >>> from pyhpeimc.auth import * >>> from pyhpeimc.plat.vlanm import * >>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin") >>> hybrid_interfaces = get_device_hybrid_interfaces('10', auth.creds, auth.url) >>> assert type(access_interfaces) is list >>> assert (len(access_interfaces[0])) is 2 >>> assert 'ifIndex' in access_interfaces[0] >>> assert 'pvid' in access_interfaces[0]
21,003
def body_block_supplementary_material_render(supp_tags, base_url=None): source_data = [] for supp_tag in supp_tags: for block_content in body_block_content_render(supp_tag, base_url=base_url): if block_content != {}: if "content" in block_content: del block_content["content"] source_data.append(block_content) return source_data
fig and media tag caption may have supplementary material
21,004
def _encode(data, convert_to_float): ctf = convert_to_float if isinstance(data, (OctaveVariablePtr)): return _encode(data.value, ctf) if isinstance(data, OctaveUserClass): return _encode(OctaveUserClass.to_value(data), ctf) if isinstance(data, (OctaveFunctionPtr, MatlabFunction)): raise Oct2PyError() if isinstance(data, MatlabObject): view = data.view(np.ndarray) out = MatlabObject(data, data.classname) for name in out.dtype.names: out[name] = _encode(view[name], ctf) return out if isinstance(data, (DataFrame, Series)): return _encode(data.values, ctf) if isinstance(data, dict): out = dict() for (key, value) in data.items(): out[key] = _encode(value, ctf) return out if data is None: return np.NaN if isinstance(data, set): return _encode(list(data), ctf) if isinstance(data, list): if _is_simple_numeric(data): return _encode(np.array(data), ctf) return _encode(tuple(data), ctf) if isinstance(data, tuple): obj = np.empty(len(data), dtype=object) for (i, item) in enumerate(data): obj[i] = _encode(item, ctf) return obj if isinstance(data, spmatrix): return data.astype(np.float64) if not isinstance(data, np.ndarray): return data if data.dtype.kind in : out = np.empty(data.size, dtype=data.dtype) for (i, item) in enumerate(data.ravel()): if data.dtype.names: for name in data.dtype.names: out[i][name] = _encode(item[name], ctf) else: out[i] = _encode(item, ctf) return out.reshape(data.shape) if data.dtype.name == : return data.astype(np.complex128) if ctf and data.dtype.kind in : return data.astype(np.float64) return data
Convert the Python values to values suitable to send to Octave.
21,005
def select_mask(cls, dataset, selection): select_mask = None for dim, k in selection.items(): if isinstance(k, tuple): k = slice(*k) masks = [] alias = dataset.get_dimension(dim).name series = dataset.data[alias] if isinstance(k, slice): if k.start is not None: kval = util.numpy_scalar_to_python(k.start) masks.append(kval <= series) if k.stop is not None: kval = util.numpy_scalar_to_python(k.stop) masks.append(series < kval) elif isinstance(k, (set, list)): iter_slc = None for ik in k: mask = series == ik if iter_slc is None: iter_slc = mask else: iter_slc |= mask masks.append(iter_slc) elif callable(k): masks.append(k(series)) else: masks.append(series == k) for mask in masks: if select_mask is not None: select_mask &= mask else: select_mask = mask return select_mask
Given a Dataset object and a dictionary with dimension keys and selection keys (i.e tuple ranges, slices, sets, lists or literals) return a boolean mask over the rows in the Dataset object that have been selected.
21,006
def handle_validation_error(self, error, bundle_errors): error_str = six.text_type(error) error_msg = self.help.format(error_msg=error_str) if self.help else error_str msg = {self.name: error_msg} if current_app.config.get("BUNDLE_ERRORS", False) or bundle_errors: return error, msg flask_restful.abort(400, message=msg)
Called when an error is raised while parsing. Aborts the request with a 400 status and an error message :param error: the error that was raised :param bundle_errors: do not abort when first error occurs, return a dict with the name of the argument and the error message to be bundled
21,007
def predict(self, data, output_margin=False, ntree_limit=0, pred_leaf=False, pred_contribs=False, approx_contribs=False, pred_interactions=False, validate_features=True): option_mask = 0x00 if output_margin: option_mask |= 0x01 if pred_leaf: option_mask |= 0x02 if pred_contribs: option_mask |= 0x04 if approx_contribs: option_mask |= 0x08 if pred_interactions: option_mask |= 0x10 if validate_features: self._validate_features(data) length = c_bst_ulong() preds = ctypes.POINTER(ctypes.c_float)() _check_call(_LIB.XGBoosterPredict(self.handle, data.handle, ctypes.c_int(option_mask), ctypes.c_uint(ntree_limit), ctypes.byref(length), ctypes.byref(preds))) preds = ctypes2numpy(preds, length.value, np.float32) if pred_leaf: preds = preds.astype(np.int32) nrow = data.num_row() if preds.size != nrow and preds.size % nrow == 0: chunk_size = int(preds.size / nrow) if pred_interactions: ngroup = int(chunk_size / ((data.num_col() + 1) * (data.num_col() + 1))) if ngroup == 1: preds = preds.reshape(nrow, data.num_col() + 1, data.num_col() + 1) else: preds = preds.reshape(nrow, ngroup, data.num_col() + 1, data.num_col() + 1) elif pred_contribs: ngroup = int(chunk_size / (data.num_col() + 1)) if ngroup == 1: preds = preds.reshape(nrow, data.num_col() + 1) else: preds = preds.reshape(nrow, ngroup, data.num_col() + 1) else: preds = preds.reshape(nrow, chunk_size) return preds
Predict with data. .. note:: This function is not thread safe. For each booster object, predict can only be called from one thread. If you want to run prediction using multiple thread, call ``bst.copy()`` to make copies of model object and then call ``predict()``. .. note:: Using ``predict()`` with DART booster If the booster object is DART type, ``predict()`` will perform dropouts, i.e. only some of the trees will be evaluated. This will produce incorrect results if ``data`` is not the training data. To obtain correct results on test sets, set ``ntree_limit`` to a nonzero value, e.g. .. code-block:: python preds = bst.predict(dtest, ntree_limit=num_round) Parameters ---------- data : DMatrix The dmatrix storing the input. output_margin : bool Whether to output the raw untransformed margin value. ntree_limit : int Limit number of trees in the prediction; defaults to 0 (use all trees). pred_leaf : bool When this option is on, the output will be a matrix of (nsample, ntrees) with each record indicating the predicted leaf index of each sample in each tree. Note that the leaf index of a tree is unique per tree, so you may find leaf 1 in both tree 1 and tree 0. pred_contribs : bool When this is True the output will be a matrix of size (nsample, nfeats + 1) with each record indicating the feature contributions (SHAP values) for that prediction. The sum of all feature contributions is equal to the raw untransformed margin value of the prediction. Note the final column is the bias term. approx_contribs : bool Approximate the contributions of each feature pred_interactions : bool When this is True the output will be a matrix of size (nsample, nfeats + 1, nfeats + 1) indicating the SHAP interaction values for each pair of features. The sum of each row (or column) of the interaction values equals the corresponding SHAP value (from pred_contribs), and the sum of the entire matrix equals the raw untransformed margin value of the prediction. Note the last row and column correspond to the bias term. validate_features : bool When this is True, validate that the Booster's and data's feature_names are identical. Otherwise, it is assumed that the feature_names are the same. Returns ------- prediction : numpy array
21,008
def sanitize(self, val): if self.type == NUMBER: try: return clamp(self.min, self.max, float(val)) except ValueError: return 0.0 elif self.type == TEXT: try: return unicode(str(val), "utf_8", "replace") except: return "" elif self.type == BOOLEAN: if unicode(val).lower() in ("true", "1", "yes"): return True else: return False
Given a Variable and a value, cleans it out
21,009
def run_cli(): "Command line interface to hiwenet." features_path, groups_path, weight_method, num_bins, edge_range, \ trim_outliers, trim_percentile, return_networkx_graph, out_weights_path = parse_args() features, groups = read_features_and_groups(features_path, groups_path) extract(features, groups, weight_method=weight_method, num_bins=num_bins, edge_range=edge_range, trim_outliers=trim_outliers, trim_percentile=trim_percentile, return_networkx_graph=return_networkx_graph, out_weights_path=out_weights_path)
Command line interface to hiwenet.
21,010
def escape_velocity(M,R): ve = np.sqrt(2.*grav_const*M*msun_g/(R*rsun_cm)) ve = ve*1.e-5 return ve
escape velocity. Parameters ---------- M : float Mass in solar masses. R : float Radius in solar radiu. Returns ------- v_escape in km/s.
21,011
def _RemoveForwardedIps(self, forwarded_ips, interface): for address in forwarded_ips: self.ip_forwarding_utils.RemoveForwardedIp(address, interface)
Remove the forwarded IP addresses from the network interface. Args: forwarded_ips: list, the forwarded IP address strings to delete. interface: string, the output device to use.
21,012
def restart_container(self, ip): url = .format(self.host, ip) return self.__post(url)
重启容器 重启指定IP的容器。 Args: - ip: 容器ip Returns: 返回一个tuple对象,其格式为(<result>, <ResponseInfo>) - result 成功返回空dict{},失败返回{"error": "<errMsg string>"} - ResponseInfo 请求的Response信息
21,013
def single(C, namespace=None): if namespace is None: B = C()._ else: B = C(default=namespace, _=namespace)._ return B
An element maker with a single namespace that uses that namespace as the default
21,014
def get_thermostat_state_by_name(self, name): self._validate_thermostat_state_name(name) return next((state for state in self.thermostat_states if state.name.lower() == name.lower()), None)
Retrieves a thermostat state object by its assigned name :param name: The name of the thermostat state :return: The thermostat state object
21,015
def clean_username(self): username = self.cleaned_data.get("username") if username.lower() != slugify(username).lower(): raise forms.ValidationError( ugettext("Username can only contain letters, numbers, dashes " "or underscores.")) lookup = {"username__iexact": username} try: User.objects.exclude(id=self.instance.id).get(**lookup) except User.DoesNotExist: return username raise forms.ValidationError( ugettext("This username is already registered"))
Ensure the username doesn't exist or contain invalid chars. We limit it to slugifiable chars since it's used as the slug for the user's profile view.
21,016
def oem_init(self): if self._oemknown: return self._oem, self._oemknown = get_oem_handler(self._get_device_id(), self)
Initialize the command object for OEM capabilities A number of capabilities are either totally OEM defined or else augmented somehow by knowledge of the OEM. This method does an interrogation to identify the OEM.
21,017
def device_measurement(device, ts=None, part=None, result=None, code=None, **kwargs): if ts is None: ts = local_now() payload = MeasurementPayload(device=device, part=part) m = Measurement(ts, result, code, list(kwargs)) payload.measurements.append(m) m.add_sample(ts, **kwargs) return dumps(payload)
Returns a JSON MeasurementPayload ready to be send through a transport. If `ts` is not given, the current time is used. `part` is an optional `Part` object, and `result` and `code` are the respective fields of the `Measurement` object. All other arguments are interpreted as dimensions. Minimal example, using a `Device` object to send two measurements: >>> d = Device("12345") >>> def publish(msg): ... pass >>> publish(d.measurement(temperature=22.8)) >>> publish(d.measurement(pressure=4.1))
21,018
def IndexedDB_requestData(self, securityOrigin, databaseName, objectStoreName, indexName, skipCount, pageSize, **kwargs): assert isinstance(securityOrigin, (str,) ), "Argument must be of type str. Received type: " % type( securityOrigin) assert isinstance(databaseName, (str,) ), "Argument must be of type str. Received type: " % type( databaseName) assert isinstance(objectStoreName, (str,) ), "Argument must be of type str. Received type: " % type( objectStoreName) assert isinstance(indexName, (str,) ), "Argument must be of type str. Received type: " % type( indexName) assert isinstance(skipCount, (int,) ), "Argument must be of type int. Received type: " % type( skipCount) assert isinstance(pageSize, (int,) ), "Argument must be of type int. Received type: " % type( pageSize) expected = [] passed_keys = list(kwargs.keys()) assert all([(key in expected) for key in passed_keys] ), "Allowed kwargs are []. Passed kwargs: %s" % passed_keys subdom_funcs = self.synchronous_command(, securityOrigin=securityOrigin, databaseName=databaseName, objectStoreName=objectStoreName, indexName=indexName, skipCount= skipCount, pageSize=pageSize, **kwargs) return subdom_funcs
Function path: IndexedDB.requestData Domain: IndexedDB Method name: requestData Parameters: Required arguments: 'securityOrigin' (type: string) -> Security origin. 'databaseName' (type: string) -> Database name. 'objectStoreName' (type: string) -> Object store name. 'indexName' (type: string) -> Index name, empty string for object store data requests. 'skipCount' (type: integer) -> Number of records to skip. 'pageSize' (type: integer) -> Number of records to fetch. Optional arguments: 'keyRange' (type: KeyRange) -> Key range. Returns: 'objectStoreDataEntries' (type: array) -> Array of object store data entries. 'hasMore' (type: boolean) -> If true, there are more entries to fetch in the given range. Description: Requests data from object store or index.
21,019
def getInput(): input = if sys.platform == : import msvcrt if msvcrt.kbhit(): input += msvcrt.getch() print_(input) else: time.sleep(.1) else: sock = sys.stdin.fileno() while len(select.select([sock], [], [], 0.1)[0]) > 0: input += decode(os.read(sock, 4096)) return input
Read the input buffer without blocking the system.
21,020
def register(cls, barset, name=None): return cls_register(cls, barset, BarSet, (, ), name=name)
Register a new BarSet as a member/attribute of this class. Returns the new BarSet. Arguments: barset : An existing BarSet, or an iterable of strings. name : New name for the BarSet, also used as the classes attribute name. If the `barset` object has not `name` attribute, this argument is required. It must not be empty when given.
21,021
def get_and_update(cls, id, **kwargs): model = cls.get(id) for k, v in cls._preprocess_params(kwargs).items(): setattr(model, k, v) cls.session.commit() return model
Returns an updated instance of the service's model class. Args: model: the model to update **kwargs: update parameters
21,022
def reload(self): plugin_modules self.clear() for plugin in self.app.config.get(, ()): self.load_plugin(plugin)
Clear plugin manager state and reload plugins. This method will make use of :meth:`clear` and :meth:`load_plugin`, so all internal state will be cleared, and all plugins defined in :data:`self.app.config['plugin_modules']` will be loaded.
21,023
def init_runner(self, parser, tracers, projinfo): self.parser = parser self.tracers = tracers self.proj_info = projinfo
initial some instances for preparing to run test case @note: should not override @param parser: instance of TestCaseParser @param tracers: dict type for the instance of Tracer. Such as {"":tracer_obj} or {"192.168.0.1:5555":tracer_obj1, "192.168.0.2:5555":tracer_obj2} @param proj_info: dict type of test case. use like: self.proj_info["module"], self.proj_info["name"] yaml case like: - project: name: xxx module: xxxx dict case like: {"project": {"name": xxx, "module": xxxx}}
21,024
def _fully_random_weights(n_features, lam_scale, prng): weights = np.zeros((n_features, n_features)) n_off_diag = int((n_features ** 2 - n_features) / 2) weights[np.triu_indices(n_features, k=1)] = 0.1 * lam_scale * prng.randn( n_off_diag ) + (0.25 * lam_scale) weights[weights < 0] = 0 weights = weights + weights.T return weights
Generate a symmetric random matrix with zeros along the diagonal.
21,025
def ConnectionUpdate(self, settings): connection_path = self.connection_path NM = dbusmock.get_object(MANAGER_OBJ) settings_obj = dbusmock.get_object(SETTINGS_OBJ) main_connections = settings_obj.ListConnections() if connection_path not in main_connections: raise dbus.exceptions.DBusException( % connection_path, name=MANAGER_IFACE + ,) for setting_name in settings: setting = settings[setting_name] for k in setting: if setting_name not in self.settings: self.settings[setting_name] = {} self.settings[setting_name][k] = setting[k] self.EmitSignal(CSETTINGS_IFACE, , , []) auto_connect = False if in settings[]: auto_connect = settings[][] if auto_connect: dev = None devices = NM.GetDevices() if len(devices) > 0: dev = devices[0] if dev: activate_connection(NM, connection_path, dev, connection_path) return connection_path
Update settings on a connection. settings is a String String Variant Map Map. See https://developer.gnome.org/NetworkManager/0.9/spec.html #type-String_String_Variant_Map_Map
21,026
def _check_lods(parts, tumor_thresh, normal_thresh, indexes): try: gl_index = parts[8].split(":").index("GL") except ValueError: return True try: tumor_gls = [float(x) for x in parts[indexes["tumor"]].strip().split(":")[gl_index].split(",") if x != "."] if tumor_gls: tumor_lod = max(tumor_gls[i] - tumor_gls[0] for i in range(1, len(tumor_gls))) else: tumor_lod = -1.0 except IndexError: tumor_lod = -1.0 try: normal_gls = [float(x) for x in parts[indexes["normal"]].strip().split(":")[gl_index].split(",") if x != "."] if normal_gls: normal_lod = min(normal_gls[0] - normal_gls[i] for i in range(1, len(normal_gls))) else: normal_lod = normal_thresh except IndexError: normal_lod = normal_thresh return normal_lod >= normal_thresh and tumor_lod >= tumor_thresh
Ensure likelihoods for tumor and normal pass thresholds. Skipped if no FreeBayes GL annotations available.
21,027
def p_case_clause(self, p): p[0] = self.asttypes.Case(expr=p[2], elements=p[4]) p[0].setpos(p)
case_clause : CASE expr COLON source_elements
21,028
def from_key(cls, *args): key = args if len(args) > 1 else args[0] return cls._instances.get(key, None)
Return flyweight object with specified key, if it has already been created. Returns: cls or None: Previously constructed flyweight object with given key or None if key not found
21,029
def add_component(self, kind, **kwargs): func = _get_add_func(component, kind) if kwargs.get(, False) is None: self._kwargs_checks(kwargs, warning_only=True) return self.get_component(**metawargs)
Add a new component (star or orbit) to the system. If not provided, 'component' (the name of the new star or orbit) will be created for you and can be accessed by the 'component' attribute of the returned ParameterSet. >>> b.add_component(component.star) or >>> b.add_component('orbit', period=2.5) Available kinds include: * :func:`phoebe.parameters.component.star` * :func:`phoebe.parameters.component.orbit` :parameter kind: function to call that returns a ParameterSet or list of parameters. This must either be a callable function that accepts nothing but default values, or the name of a function (as a string) that can be found in the :mod:`phoebe.parameters.component` module (ie. 'star', 'orbit') :type kind: str or callable :parameter str component: (optional) name of the newly-created component :parameter **kwargs: default values for any of the newly-created parameters :return: :class:`phoebe.parameters.parameters.ParameterSet` of all parameters that have been added :raises NotImplementedError: if required constraint is not implemented
21,030
def set_timezone(self, timezone: str): data = {"timezoneId": timezone} return self._restCall("home/setTimezone", body=json.dumps(data))
sets the timezone for the AP. e.g. "Europe/Berlin" Args: timezone(str): the new timezone
21,031
def get_mysql_cfg(): environment = get_project_configuration()[] cfg = get_database_configuration() if environment == : mysql = cfg[] else: mysql = cfg[] return mysql
Get the appropriate MySQL configuration
21,032
def save(self, obj): session = self.get_db_session() session.add(obj) session.commit() return obj
Add ``obj`` to the SQLAlchemy session and commit the changes back to the database. :param obj: SQLAlchemy object being saved :returns: The saved object
21,033
def _convert_pooling_param(param): param_string = "pooling_convention=, " if param.global_pooling: param_string += "global_pool=True, kernel=(1,1)" else: param_string += "pad=(%d,%d), kernel=(%d,%d), stride=(%d,%d)" % ( param.pad, param.pad, param.kernel_size, param.kernel_size, param.stride, param.stride) if param.pool == 0: param_string += ", pool_type=" elif param.pool == 1: param_string += ", pool_type=" else: raise ValueError("Unknown Pooling Method!") return param_string
Convert the pooling layer parameter
21,034
def parseConfig(opt): places = ctllib.Places(config=opt[], messages=opt[]) restarter = functools.partial(ctllib.restart, places) path = filepath.FilePath(opt[]) return restarter, path
Parse configuration :params opt: dict-like object with config and messages keys :returns: restarter, path
21,035
def init_class(self, class_, step_func=None): if self.is_class_initialized(class_): l.debug("Class %r already initialized.", class_) return l.debug("Initialize class %r.", class_) self.initialized_classes.add(class_) if not class_.is_loaded: l.warning("Class %r is not loaded in CLE. Skip initializiation.", class_) return clinit_method = resolve_method(self.state, , class_.name, include_superclasses=False, init_class=False) if clinit_method.is_loaded: javavm_simos = self.state.project.simos clinit_state = javavm_simos.state_call(addr=SootAddressDescriptor(clinit_method, 0, 0), base_state=self.state, ret_addr=SootAddressTerminator()) simgr = self.state.project.factory.simgr(clinit_state) l.info(">"*15 + " Run class initializer %r ... " + ">"*15, clinit_method) simgr.run(step_func=step_func) l.debug("<"*15 + " Run class initializer %r ... done " + "<"*15, clinit_method) self.state.memory.vm_static_table = simgr.deadended[-1].memory.vm_static_table.copy() self.state.memory.heap = simgr.deadended[-1].memory.heap.copy() else: l.debug("Class initializer <clinit> is not loaded in CLE. Skip initializiation.")
This method simulates the loading of a class by the JVM, during which parts of the class (e.g. static fields) are initialized. For this, we run the class initializer method <clinit> (if available) and update the state accordingly. Note: Initialization is skipped, if the class has already been initialized (or if it's not loaded in CLE).
21,036
def printStatistics(completion, concordance, tpedSamples, oldSamples, prefix): none_zero_indexes = np.where(completion[1] != 0) completionPercentage = np.zeros(len(completion[0]), dtype=float) completionPercentage[none_zero_indexes] = np.true_divide( completion[0, none_zero_indexes], completion[1, none_zero_indexes], ) outputFile = None try: outputFile = open(prefix + ".summary", "w") except IOError: msg = "%(prefix)s.summary: can't write file" % locals() raise ProgramError(msg) print >>outputFile, "\t".join(["origIndex", "dupIndex", "famID", "indID", "% completion", "completion", "mean concordance"]) for sampleID, indexes in tpedSamples.iteritems(): for i, index in enumerate(indexes): toPrint = [str(oldSamples[sampleID][i]+1), str(index+1)] toPrint.extend(list(sampleID)) toPrint.append("%.8f" % completionPercentage[index]) toPrint.append("%d/%d" % (completion[0][index], completion[1][index])) indexToKeep = list(set(range(len(indexes))) - set([i])) values = np.ravel( np.asarray(concordance[sampleID][0][i, indexToKeep]) ) total_values = np.ravel( np.asarray(concordance[sampleID][1][i, indexToKeep]) ) currConcordance = np.zeros(len(indexToKeep), dtype=float) none_zero_indexes = np.where(total_values != 0) currConcordance[none_zero_indexes] = np.true_divide( values[none_zero_indexes], total_values[none_zero_indexes], ) currConcordance = np.mean(currConcordance) toPrint.append("%.8f" % currConcordance) print >>outputFile, "\t".join(toPrint) outputFile.close() return completionPercentage
Print the statistics in a file. :param completion: the completion of each duplicated samples. :param concordance: the concordance of each duplicated samples. :param tpedSamples: the updated position of the samples in the tped containing only duplicated samples. :param oldSamples: the original duplicated sample positions. :param prefix: the prefix of all the files. :type completion: :py:class:`numpy.array` :type concordance: dict :type tpedSamples: dict :type oldSamples: dict :type prefix: str :returns: the completion for each duplicated samples, as a :py:class:`numpy.array`. Prints the statistics (completion of each samples and pairwise concordance between duplicated samples) in a file (``prefix.summary``).
21,037
def set_color(index, color): if OS == "Darwin" and index < 20: return "\033]P%1x%s\033\\" % (index, color.strip(" return "\033]4;%s;%s\033\\" % (index, color)
Convert a hex color to a text color sequence.
21,038
def fetch_token(self, **kwargs): if not in kwargs: kwargs.update(client_secret=self.client_secret) return self.session.fetch_token(token_url, **kwargs)
Fetch a new token using the supplied code. :param str code: A previously obtained auth code.
21,039
def get_trend(self): ts = self.get_ts() last = ts[][len(ts[]) - 1] prev = ts[][len(ts[]) - 2] trend = last - prev trend_percentage = None if last == 0: if prev > 0: trend_percentage = -100 else: trend_percentage = 0 else: trend_percentage = int((trend / last) * 100) return (last, trend_percentage)
Get the trend for the last two metric values using the interval defined in the metric :return: a tuple with the metric value for the last interval and the trend percentage between the last two intervals
21,040
def commit(self, message, parent_commits=None, head=True, author=None, committer=None, author_date=None, commit_date=None, skip_hooks=False): if not skip_hooks: run_commit_hook(, self) self._write_commit_editmsg(message) run_commit_hook(, self, self._commit_editmsg_filepath()) message = self._read_commit_editmsg() self._remove_commit_editmsg() tree = self.write_tree() rval = Commit.create_from_tree(self.repo, tree, message, parent_commits, head, author=author, committer=committer, author_date=author_date, commit_date=commit_date) if not skip_hooks: run_commit_hook(, self) return rval
Commit the current default index file, creating a commit object. For more information on the arguments, see tree.commit. :note: If you have manually altered the .entries member of this instance, don't forget to write() your changes to disk beforehand. Passing skip_hooks=True is the equivalent of using `-n` or `--no-verify` on the command line. :return: Commit object representing the new commit
21,041
def _is_valid_channel(self, channel, conda_url=): if channel.startswith() or channel.startswith(): url = channel else: url = "{0}/{1}".format(conda_url, channel) if url[-1] == : url = url[:-1] plat = self._conda_api.get_platform() repodata_url = "{0}/{1}/{2}".format(url, plat, ) try: r = requests.head(repodata_url, proxies=self.proxy_servers) value = r.status_code in [200] except Exception as error: logger.error(str(error)) value = False return value
Callback for is_valid_channel.
21,042
def inject(self): with open(self.script_url, "r", encoding="utf8") as f: self.selenium.execute_script(f.read())
Recursively inject aXe into all iframes and the top level document. :param script_url: location of the axe-core script. :type script_url: string
21,043
def _mute(self): if self.muted: self._sendCommand("volume {}\n".format(self.actual_volume)) if logger.isEnabledFor(logging.DEBUG): logger.debug(.format(self.actual_volume, int(100 * self.actual_volume / self.max_volume))) else: if self.actual_volume == -1: self._get_volume() self._sendCommand("volume 0\n") if logger.isEnabledFor(logging.DEBUG): logger.debug()
mute vlc
21,044
def _clear(self): draw = ImageDraw.Draw(self._background_image) draw.rectangle(self._device.bounding_box, fill="black") del draw
Helper that clears the composition.
21,045
def remove_image_info_cb(self, gshell, channel, iminfo): chname = channel.name if chname not in self.name_dict: return fileDict = self.name_dict[chname] name = iminfo.name if name not in fileDict: return del fileDict[name] self.logger.debug(.format(name)) if not self.gui_up: return False self.clear_selected_history() self.recreate_toc()
Delete entries related to deleted image.
21,046
def replace_namespaced_stateful_set_scale(self, name, namespace, body, **kwargs): kwargs[] = True if kwargs.get(): return self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) else: (data) = self.replace_namespaced_stateful_set_scale_with_http_info(name, namespace, body, **kwargs) return data
replace_namespaced_stateful_set_scale # noqa: E501 replace scale of the specified StatefulSet # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_stateful_set_scale(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the Scale (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V1Scale body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V1Scale If the method is called asynchronously, returns the request thread.
21,047
def Trans(self, stateFrom, *condAndNextState): top = [] last = True for cAndS in reversed(condAndNextState): if last is True: last = False try: condition, newvalue = cAndS except TypeError: top = self.stateReg(cAndS) continue top = [] else: condition, newvalue = cAndS top = \ If(condition, self.stateReg(newvalue) ).Else( top ) if stateFrom is None: return Switch.Default(self, top) else: return Switch.Case(self, stateFrom, top)
:param stateFrom: apply when FSM is in this state :param condAndNextState: tupes (condition, newState), last does not to have condition :attention: transitions has priority, first has the biggest :attention: if stateFrom is None it is evaluated as default
21,048
def retrieve(cls, *args, **kwargs): return super(Subscription, cls).retrieve(*args, **kwargs)
Return parent method.
21,049
def get_path(self): if self.section: return self.section.get_path() + (self.name,) else: return self.name,
Calculate item's path in configuration tree. Use this sparingly -- path is calculated by going up the configuration tree. For a large number of items, it is more efficient to use iterators that return paths as keys. Path value is stable only once the configuration tree is completely initialised.
21,050
def _rotate(img, angle): s = img.shape if angle == 0: return img else: M = cv2.getRotationMatrix2D((s[1] // 2, s[0] // 2), angle, 1) return cv2.warpAffine(img, M, (s[1], s[0]))
angle [DEG]
21,051
def back_slash_to_front_converter(string): try: if not string or not isinstance(string, str): return string return string.replace(, ) except Exception: return string
Replacing all \ in the str to / :param string: single string to modify :type string: str
21,052
def imap(self, coords): for tr in self.transforms: coords = tr.imap(coords) return coords
Inverse map coordinates Parameters ---------- coords : array-like Coordinates to inverse map. Returns ------- coords : ndarray Coordinates.
21,053
def exceptions(self): ex = {} for sd in self.root.xpath(): bits = str(sd.text).split() date = text_to_date(bits.pop(0)) ex.setdefault(date, []).extend([ _time_text_to_period(t) for t in bits ]) return ex
A dict of dates -> [Period time tuples] representing exceptions to the base recurrence pattern.
21,054
def is_stable(self,species): He-3 element_name_of_iso = species.split()[0] try: a_of_iso = int(species.split()[1]) except ValueError: a_of_iso = 999 idp_of_element_in_stable_names = self.stable_names.index(element_name_of_iso) if a_of_iso in self.stable_el[idp_of_element_in_stable_names][1:]: return True else: return False
This routine accepts input formatted like 'He-3' and checks with stable_el list if occurs in there. If it does, the routine returns True, otherwise False. Notes ----- this method is designed to work with an se instance from nugridse.py. In order to make it work with ppn.py some additional work is required. FH, April 20, 2013.
21,055
def put_task(self, dp, callback=None): f = Future() if callback is not None: f.add_done_callback(callback) self.input_queue.put((dp, f)) return f
Same as in :meth:`AsyncPredictorBase.put_task`.
21,056
def SegmentProd(a, ids): func = lambda idxs: reduce(np.multiply, a[idxs]) return seg_map(func, a, ids),
Segmented prod op.
21,057
def avg(self, property): self.__prepare() return self.sum(property) / self.count()
Getting average according to given property :@param property :@type property: string :@return average: int/float
21,058
def add_sources_argument(cls, group, allow_filters=True, prefix=None, add_root_paths=False): prefix = prefix or cls.argument_prefix group.add_argument("--%s-sources" % prefix, action="store", nargs="+", dest="%s_sources" % prefix.replace(, ), help="%s source files to parse" % prefix) if allow_filters: group.add_argument("--%s-source-filters" % prefix, action="store", nargs="+", dest="%s_source_filters" % prefix.replace( , ), help="%s source files to ignore" % prefix) if add_root_paths: group.add_argument("--%s-source-roots" % prefix, action="store", nargs="+", dest="%s_source_roots" % prefix.replace( , ), help="%s source root directories allowing files " "to be referenced relatively to those" % prefix)
Subclasses may call this to add sources and source_filters arguments. Args: group: arparse.ArgumentGroup, the extension argument group allow_filters: bool, Whether the extension wishes to expose a source_filters argument. prefix: str, arguments have to be namespaced.
21,059
def register_name(self, register_index): result = self._dll.JLINKARM_GetRegisterName(register_index) return ctypes.cast(result, ctypes.c_char_p).value.decode()
Retrives and returns the name of an ARM CPU register. Args: self (JLink): the ``JLink`` instance register_index (int): index of the register whose name to retrieve Returns: Name of the register.
21,060
def output(self, _in, out, **kwargs): out.write( .format( self.catalog_name ) ) out.write(_in.read()) out.write()
Wrap translation in Angular module.
21,061
def generateDrawSpecs(self, p): profiler = debug.Profiler() bounds = self.mapRectFromParent(self.geometry()) linkedView = self.linkedView() if linkedView is None or self.grid is False: tickBounds = bounds else: tickBounds = linkedView.mapRectToItem(self, linkedView.boundingRect()) if self.orientation == : span = (bounds.topRight(), bounds.bottomRight()) tickStart = tickBounds.right() tickStop = bounds.right() tickDir = -1 axis = 0 elif self.orientation == : span = (bounds.topLeft(), bounds.bottomLeft()) tickStart = tickBounds.left() tickStop = bounds.left() tickDir = 1 axis = 0 elif self.orientation == : span = (bounds.bottomLeft(), bounds.bottomRight()) tickStart = tickBounds.bottom() tickStop = bounds.bottom() tickDir = -1 axis = 1 elif self.orientation == : span = (bounds.topLeft(), bounds.topRight()) tickStart = tickBounds.top() tickStop = bounds.top() tickDir = 1 axis = 1 points = list(map(self.mapToDevice, span)) if None in points: return lengthInPixels = Point(points[1] - points[0]).length() if lengthInPixels == 0: return if self._tickLevels is None: tickLevels = self.tickValues(self.range[0], self.range[1], lengthInPixels) tickStrings = None else: tickLevels = [] tickStrings = [] for level in self._tickLevels: values = [] strings = [] tickLevels.append((None, values)) tickStrings.append(strings) for val, strn in level: values.append(val) strings.append(strn) dif = self.range[1] - self.range[0] if dif == 0: xScale = 1 offset = 0 else: if axis == 0: xScale = -bounds.height() / dif offset = self.range[0] * xScale - bounds.height() else: xScale = bounds.width() / dif offset = self.range[0] * xScale xRange = [x * xScale - offset for x in self.range] xMin = min(xRange) xMax = max(xRange) profiler() tickPositions = [] tickSpecs = [] for i in range(len(tickLevels)): tickPositions.append([]) ticks = tickLevels[i][1] tickLength = self.style[] / ((i * 0.5) + 1.0) lineAlpha = 255 / (i + 1) if self.grid is not False: lineAlpha *= self.grid / 255. * np.clip((0.05 * lengthInPixels / (len(ticks) + 1)), 0., 1.) for v in ticks: x = (v * xScale) - offset if x < xMin or x > xMax: tickPositions[i].append(None) continue tickPositions[i].append(x) p1 = [x, x] p2 = [x, x] p1[axis] = tickStart p2[axis] = tickStop if self.grid is False: p2[axis] += tickLength * tickDir tickPen = self.pen() color = tickPen.color() color.setAlpha(lineAlpha) tickPen.setColor(color) tickSpecs.append((tickPen, Point(p1), Point(p2))) profiler() if self.style[][0] is True: stop = max(span[0].y(), min(map(min, tickPositions))) if axis == 0: span[0].setY(stop) else: span[0].setX(stop) if self.style[][1] is True: stop = min(span[1].y(), max(map(max, tickPositions))) if axis == 0: span[1].setY(stop) else: span[1].setX(stop) axisSpec = (self.pen(), span[0], span[1]) textOffset = self.style[][axis] textSize2 = 0 textRects = [] textSpecs = [] if not self.style[]: return (axisSpec, tickSpecs, textSpecs) for i in range(min(len(tickLevels), self.style[] + 1)): if tickStrings is None: spacing, values = tickLevels[i] strings = self.tickStrings(values, self.autoSIPrefixScale * self.scale, spacing) else: strings = tickStrings[i] if len(strings) == 0: continue for j in range(len(strings)): if tickPositions[i][j] is None: strings[j] = None rects = [] for s in strings: if s is None: rects.append(None) else: br = p.boundingRect(QtCore.QRectF(0, 0, 100, 100), QtCore.Qt.AlignCenter, asUnicode(s)) br.setHeight(br.height() * 1.4) rects.append(br) textRects.append(rects[-1]) if len(textRects) > 0: self._updateMaxTextSize(textSize2) return (axisSpec, tickSpecs, textSpecs)
Calls tickValues() and tickStrings() to determine where and how ticks should be drawn, then generates from this a set of drawing commands to be interpreted by drawPicture().
21,062
def attach_tcp_service(cls, tcp_service: TCPService): if cls._services[] is None: cls._services[] = tcp_service cls._set_bus(tcp_service) else: warnings.warn()
Attaches a service for hosting :param tcp_service: A TCPService instance
21,063
def destroy(self): logger.info("Destroying page: %s" % self) if self.doc.nb_pages <= 1: self.doc.destroy() return doc_pages = self.doc.pages[:] current_doc_nb_pages = self.doc.nb_pages paths = [ self.__get_box_path(), self.__get_img_path(), self._get_thumb_path(), ] for path in paths: if self.fs.exists(path): self.fs.unlink(path) for page_nb in range(self.page_nb + 1, current_doc_nb_pages): page = doc_pages[page_nb] page.change_index(offset=-1)
Delete the page. May delete the whole document if it's actually the last page.
21,064
def paste(region, img, left, above, right, down): region = region.transpose(Image.ROTATE_180) box = (left, above, right, down) img.paste(region, box) return img
将扣的图粘贴到制定图片上 当你粘贴矩形选区的时候必须保证尺寸一致。此外,矩形选区不能在图像外。然而你不必保证矩形选区和原图的颜色模式一致, 因为矩形选区会被自动转换颜色,遗憾的是,只能扣矩形图。 :param region: 扣出的图 :param img: 指定图片 :param left: 左 :param above: 上 :param right: 右 :param down: 下 :return: 被修改过的图片对象,还在内存中,未保存。
21,065
def matches(self, properties): try: return self.comparator(self.value, properties[self.name]) except KeyError: return False
Tests if the given criterion matches this LDAP criterion :param properties: A dictionary of properties :return: True if the properties matches this criterion, else False
21,066
def price_dataframe(symbols=, start=datetime.datetime(2008, 1, 1), end=datetime.datetime(2009, 12, 31), price_type=, cleaner=clean_dataframe, ): if isinstance(price_type, basestring): price_type = [price_type] start = util.normalize_date(start or datetime.date(2008, 1, 1)) end = util.normalize_date(end or datetime.date(2009, 12, 31)) symbols = normalize_symbols(symbols) t = du.getNYSEdays(start, end, datetime.timedelta(hours=16)) df = clean_dataframes(dataobj.get_data(t, symbols, price_type)) if not df or len(df) > 1: return cleaner(df) else: return cleaner(df[0])
Retrieve the prices of a list of equities as a DataFrame (columns = symbols) Arguments: symbols (list of str): Ticker symbols like "GOOG", "AAPL", etc e.g. ["AAPL", " slv ", GLD", "GOOG", "$SPX", "XOM", "msft"] start (datetime): The date at the start of the period being analyzed. end (datetime): The date at the end of the period being analyzed. Yahoo data stops at 2013/1/1
21,067
def validate(self): try: response = self.client.get_access_key_last_used( AccessKeyId=self.access_key_id ) username = response[] access_keys = self.client.list_access_keys( UserName=username ) for key in access_keys[]: if \ (key[] == self.access_key_id)\ and (key[] == ): return True return False except Exception as e: logger.info( "Failed to validate key disable for " "key {id} due to: {e}.".format( e=e, id=self.access_key_id ) ) return False
Returns whether this plugin does what it claims to have done
21,068
def from_file(cls, filename, sr=22050): y, sr = librosa.load(filename, sr=sr) return cls(y, sr)
Loads an audiofile, uses sr=22050 by default.
21,069
def urlencode(self): output = ( % (k, quote(v)) for k, v in self.items()) return .join(output)
Convert dictionary into a query string; keys are assumed to always be str
21,070
def _validate_frequency(cls, index, freq, **kwargs): if is_period_dtype(cls): return None inferred = index.inferred_freq if index.size == 0 or inferred == freq.freqstr: return None try: on_freq = cls._generate_range(start=index[0], end=None, periods=len(index), freq=freq, **kwargs) if not np.array_equal(index.asi8, on_freq.asi8): raise ValueError except ValueError as e: if "non-fixed" in str(e): raise e raise ValueError( .format(infer=inferred, passed=freq.freqstr))
Validate that a frequency is compatible with the values of a given Datetime Array/Index or Timedelta Array/Index Parameters ---------- index : DatetimeIndex or TimedeltaIndex The index on which to determine if the given frequency is valid freq : DateOffset The frequency to validate
21,071
def add(self, scheduler, max_iteration, bigdl_type="float"): return callBigDlFunc(bigdl_type, "addScheduler", self.value, scheduler, max_iteration)
Add a learning rate scheduler to the contained `schedules` :param scheduler: learning rate scheduler to be add :param max_iteration: iteration numbers this scheduler will run
21,072
def create_route(self, uri, sub_service): if uri not in self.routes.keys(): logger.debug( .format(self.name)) self.routes[uri] = { : StackInABoxService.get_service_regex(self.base_url, uri, sub_service), : uri, : StackInABoxServiceRouter(self.name, uri, None, self) }
Create the route for the URI. :param uri: string - URI to be routed :param sub_service: boolean - is the URI for a sub-service :returns: n/a
21,073
def add_nodes(self, nodes, nesting=1): hopNodes = set() hopEdges = [] for i, n in zip(range(len(nodes)), nodes): r, g, b = rainbowcolour(i, len(nodes)) colour = % (r, g, b) for p in n.calls: if p not in hopNodes: hopNodes.add(p) hopEdges.append((n, p, , colour)) for p in getattr(n, , []): if p not in hopNodes: hopNodes.add(p) hopEdges.append((n, p, , colour)) if self.add_to_graph(hopNodes, hopEdges, nesting): self.dot.attr(, size=) self.dot.attr(, concentrate=)
Adds edges indicating the call-tree for the procedures listed in the nodes.
21,074
def get_deposit_address(self, currency): data = { : currency } return self._get(, True, data=data)
Get deposit address for a currency https://docs.kucoin.com/#get-deposit-address :param currency: Name of currency :type currency: string .. code:: python address = client.get_deposit_address('NEO') :returns: ApiResponse .. code:: python { "address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1", "memo": "5c247c8a03aa677cea2a251d" } :raises: KucoinResponseException, KucoinAPIException
21,075
def _recalculate_extents_and_offsets(self, index, logical_block_size): if index == 0: dirrecord_offset = 0 num_extents = 1 else: dirrecord_offset = self.children[index - 1].offset_to_here num_extents = self.children[index - 1].extents_to_here for i in range(index, len(self.children)): c = self.children[i] dirrecord_len = c.dr_len if (dirrecord_offset + dirrecord_len) > logical_block_size: num_extents += 1 dirrecord_offset = 0 dirrecord_offset += dirrecord_len c.extents_to_here = num_extents c.offset_to_here = dirrecord_offset c.index_in_parent = i return num_extents, dirrecord_offset
Internal method to recalculate the extents and offsets associated with children of this directory record. Parameters: index - The index at which to start the recalculation. logical_block_size - The block size to use for comparisons. Returns: A tuple where the first element is the total number of extents required by the children and where the second element is the offset into the last extent currently being used.
21,076
def _build_basemap(self): if self.config[] >= self.config[]: raise ValueError() if self.config[] >= self.config[]: raise ValueError() lowcrnrlat = self.config[] lowcrnrlon = self.config[] uppcrnrlat = self.config[] uppcrnrlon = self.config[] if not in self.config.keys(): self.config[] = lat0 = lowcrnrlat + ((uppcrnrlat - lowcrnrlat) / 2) lon0 = lowcrnrlon + ((uppcrnrlon - lowcrnrlon) / 2) if (uppcrnrlat - lowcrnrlat) >= (uppcrnrlon - lowcrnrlon): fig_aspect = PORTRAIT_ASPECT else: fig_aspect = LANDSCAPE_ASPECT if self.ax is None: self.fig, self.ax = plt.subplots(figsize=fig_aspect, facecolor=, edgecolor=) else: self.fig = self.ax.get_figure() if self.title: self.ax.set_title(self.title, fontsize=16) parallels = np.arange(-90., 90., self.lat_lon_spacing) meridians = np.arange(0., 360., self.lat_lon_spacing)
Creates the map according to the input configuration
21,077
def augpath(path, augsuf=, augext=, augpref=, augdir=None, newext=None, newfname=None, ensure=False, prefix=None, suffix=None): if prefix is not None: augpref = prefix if suffix is not None: augsuf = suffix dpath, fname = split(path) fname_noext, ext = splitext(fname) if newfname is not None: fname_noext = newfname if newext is None: newext = ext new_fname = .join((augpref, fname_noext, augsuf, newext, augext)) if augdir is not None: new_dpath = join(dpath, augdir) if ensure: ensuredir(new_dpath) else: new_dpath = dpath newpath = join(new_dpath, new_fname) return newpath
augments end of path before the extension. augpath Args: path (str): augsuf (str): augment filename before extension Returns: str: newpath Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug' >>> newpath = augpath(path, augsuf) >>> result = str(newpath) >>> print(result) somefile_aug.txt Example: >>> # DISABLE_DOCTEST >>> from utool.util_path import * # NOQA >>> path = 'somefile.txt' >>> augsuf = '_aug2' >>> newext = '.bak' >>> augdir = 'backup' >>> newpath = augpath(path, augsuf, newext=newext, augdir=augdir) >>> result = str(newpath) >>> print(result) backup/somefile_aug2.bak
21,078
def get_publisher(): log.info("initializing publisher") pub = None auth_url = "" if FORWARD_ENDPOINT_TYPE == "redis": auth_url = FORWARD_BROKER_URL else: auth_url = FORWARD_BROKER_URL pub = Publisher(name="{}_{}".format(SOURCE, "-redis"), auth_url=auth_url, ssl_options=FORWARD_SSL_OPTIONS) log.info("publisher={}".format(pub)) return pub
get_publisher
21,079
def versions(self): if self._versions is None: with self.database.cursor_autocommit() as cursor: query = .format(self.table_name) cursor.execute(query) rows = cursor.fetchall() versions = [] for row in rows: row = list(row) row[4] = json.loads(row[4]) if row[4] else [] versions.append( self.VersionRecord(*row) ) self._versions = versions return self._versions
Read versions from the table The versions are kept in cache for the next reads.
21,080
def adjacency2graph(adjacency, edge_type=None, adjust=1, **kwargs): if isinstance(adjacency, np.ndarray): adjacency = _matrix2dict(adjacency) elif isinstance(adjacency, dict): adjacency = _dict2dict(adjacency) else: msg = ("If the adjacency parameter is supplied it must be a " "dict, or a numpy.ndarray.") raise TypeError(msg) if edge_type is None: edge_type = {} else: if isinstance(edge_type, np.ndarray): edge_type = _matrix2dict(edge_type, etype=True) elif isinstance(edge_type, dict): edge_type = _dict2dict(edge_type) for u, ty in edge_type.items(): for v, et in ty.items(): adjacency[u][v][] = et g = nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph()) adjacency = nx.to_dict_of_dicts(g) adjacency = _adjacency_adjust(adjacency, adjust, True) return nx.from_dict_of_dicts(adjacency, create_using=nx.DiGraph())
Takes an adjacency list, dict, or matrix and returns a graph. The purpose of this function is take an adjacency list (or matrix) and return a :class:`.QueueNetworkDiGraph` that can be used with a :class:`.QueueNetwork` instance. The Graph returned has the ``edge_type`` edge property set for each edge. Note that the graph may be altered. Parameters ---------- adjacency : dict or :class:`~numpy.ndarray` An adjacency list as either a dict, or an adjacency matrix. adjust : int ``{1, 2}`` (optional, default: 1) Specifies what to do when the graph has terminal vertices (nodes with no out-edges). Note that if ``adjust`` is not 2 then it is assumed to be 1. There are two choices: * ``adjust = 1``: A loop is added to each terminal node in the graph, and their ``edge_type`` of that loop is set to 0. * ``adjust = 2``: All edges leading to terminal nodes have their ``edge_type`` set to 0. **kwargs : Unused. Returns ------- out : :any:`networkx.DiGraph` A directed graph with the ``edge_type`` edge property. Raises ------ TypeError Is raised if ``adjacency`` is not a dict or :class:`~numpy.ndarray`. Examples -------- If terminal nodes are such that all in-edges have edge type ``0`` then nothing is changed. However, if a node is a terminal node then a loop is added with edge type 0. >>> import queueing_tool as qt >>> adj = { ... 0: {1: {}}, ... 1: {2: {}, ... 3: {}}, ... 3: {0: {}}} >>> eTy = {0: {1: 1}, 1: {2: 2, 3: 4}, 3: {0: 1}} >>> # A loop will be added to vertex 2 >>> g = qt.adjacency2graph(adj, edge_type=eTy) >>> ans = qt.graph2dict(g) >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE [(0, {1: {'edge_type': 1}}), (1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}), (2, {2: {'edge_type': 0}}), (3, {0: {'edge_type': 1}})] You can use a dict of lists to represent the adjacency list. >>> adj = {0 : [1], 1: [2, 3], 3: [0]} >>> g = qt.adjacency2graph(adj, edge_type=eTy) >>> ans = qt.graph2dict(g) >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE [(0, {1: {'edge_type': 1}}), (1, {2: {'edge_type': 2}, 3: {'edge_type': 4}}), (2, {2: {'edge_type': 0}}), (3, {0: {'edge_type': 1}})] Alternatively, you could have this function adjust the edges that lead to terminal vertices by changing their edge type to 0: >>> # The graph is unaltered >>> g = qt.adjacency2graph(adj, edge_type=eTy, adjust=2) >>> ans = qt.graph2dict(g) >>> sorted(ans.items()) # doctest: +NORMALIZE_WHITESPACE [(0, {1: {'edge_type': 1}}), (1, {2: {'edge_type': 0}, 3: {'edge_type': 4}}), (2, {}), (3, {0: {'edge_type': 1}})]
21,081
def index_template(self, tpl): objcls = self.inner_class.my_type name = getattr(tpl, , ) if not name: mesg = "a %s template has been defined without name, from: %s" % \ (objcls, tpl.imported_from) tpl.add_error(mesg) elif name in self.name_to_template: tpl = self.manage_conflict(tpl, name) self.name_to_template[name] = tpl logger.debug("Indexed a %s template: %s, uses: %s", tpl.my_type, name, getattr(tpl, , )) return tpl
Indexes a template by `name` into the `name_to_template` dictionary. :param tpl: The template to index :type tpl: alignak.objects.item.Item :return: None
21,082
def add_suffix(file_path, suffix=, sep=, ext=None): return _add_suffix(file_path, suffix, sep, ext)
Adds suffix to a file name seperated by an underscore and returns file path.
21,083
def tab(self, n=1, interval=0, pre_dl=None, post_dl=None): self.delay(pre_dl) self.k.tap_key(self.k.tab_key, n, interval) self.delay(post_dl)
Tap ``tab`` key for ``n`` times, with ``interval`` seconds of interval. **中文文档** 以 ``interval`` 中定义的频率按下某个tab键 ``n`` 次。
21,084
def paint_cube(self, x, y): color = self.next_color() cube_pos = [x, y, x + self.cube_size, y + self.cube_size] draw = ImageDraw.Draw(im=self.image) draw.rectangle(xy=cube_pos, fill=color)
Paints a cube at a certain position a color. Parameters ---------- x: int Horizontal position of the upper left corner of the cube. y: int Vertical position of the upper left corner of the cube.
21,085
def get_schema_validator(self, schema_name): if schema_name not in self.schemas: schema_file = self.get_schema_file(schema_name) with open(schema_file) as f: try: jsn_schema = json.load(f) except ValueError as ex: log.error("Could not load %s", schema_file) raise ex schemas_folder = self.get_schemas_folder() root_schema_path = self.get_schema_path(schemas_folder) resolver = jsonschema.RefResolver(root_schema_path, None) self.schemas[schema_name] = (jsn_schema, resolver) else: jsn_schema, resolver = self.schemas[schema_name] validator = jsonschema.Draft4Validator(schema=jsn_schema, resolver=resolver) return validator
Had to remove the id property from map.json or it uses URLs for validation See various issues at https://github.com/Julian/jsonschema/pull/306
21,086
def turn_left(): motors.left_motor(150) motors.right_motor(150) board.sleep(0.5) motors.brake(); board.sleep(0.1)
turns RedBot to the Left
21,087
def _get_leader_for_partition(self, topic, partition): key = TopicPartition(topic, partition) if self.topics_to_brokers.get(key) is not None: return self.topics_to_brokers[key] self.load_metadata_for_topics(topic) leader = self.topic_partitions[topic][partition] if leader == -1: raise LeaderNotAvailableError((topic, partition)) return self.brokers[leader]
Returns the leader for a partition or None if the partition exists but has no leader. Raises: UnknownTopicOrPartitionError: If the topic or partition is not part of the metadata. LeaderNotAvailableError: If the server has metadata, but there is no current leader.
21,088
def ones_matrix_band_part(rows, cols, num_lower, num_upper, out_shape=None): if all([isinstance(el, int) for el in [rows, cols, num_lower, num_upper]]): if num_lower < 0: num_lower = rows - 1 if num_upper < 0: num_upper = cols - 1 lower_mask = np.tri(cols, rows, num_lower).T upper_mask = np.tri(rows, cols, num_upper) band = np.ones((rows, cols)) * lower_mask * upper_mask if out_shape: band = band.reshape(out_shape) band = tf.constant(band, tf.float32) else: band = tf.matrix_band_part( tf.ones([rows, cols]), tf.cast(num_lower, tf.int64), tf.cast(num_upper, tf.int64)) if out_shape: band = tf.reshape(band, out_shape) return band
Matrix band part of ones. Args: rows: int determining number of rows in output cols: int num_lower: int, maximum distance backward. Negative values indicate unlimited. num_upper: int, maximum distance forward. Negative values indicate unlimited. out_shape: shape to reshape output by. Returns: Tensor of size rows * cols reshaped into shape out_shape.
21,089
def handle_lock_expired( payment_state: InitiatorPaymentState, state_change: ReceiveLockExpired, channelidentifiers_to_channels: ChannelMap, block_number: BlockNumber, ) -> TransitionResult[InitiatorPaymentState]: initiator_state = payment_state.initiator_transfers.get(state_change.secrethash) if not initiator_state: return TransitionResult(payment_state, list()) channel_identifier = initiator_state.channel_identifier channel_state = channelidentifiers_to_channels.get(channel_identifier) if not channel_state: return TransitionResult(payment_state, list()) secrethash = initiator_state.transfer.lock.secrethash result = channel.handle_receive_lock_expired( channel_state=channel_state, state_change=state_change, block_number=block_number, ) assert result.new_state, if not channel.get_lock(result.new_state.partner_state, secrethash): transfer = initiator_state.transfer unlock_failed = EventUnlockClaimFailed( identifier=transfer.payment_identifier, secrethash=transfer.lock.secrethash, reason=, ) result.events.append(unlock_failed) return TransitionResult(payment_state, result.events)
Initiator also needs to handle LockExpired messages when refund transfers are involved. A -> B -> C - A sends locked transfer to B - B attempted to forward to C but has not enough capacity - B sends a refund transfer with the same secrethash back to A - When the lock expires B will also send a LockExpired message to A - A needs to be able to properly process it Related issue: https://github.com/raiden-network/raiden/issues/3183
21,090
def dp004(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( .format(value)) self._dp004 = value
Corresponds to IDD Field `dp004` Dew-point temperature corresponding to 0.4% annual cumulative frequency of occurrence Args: value (float): value for IDD Field `dp004` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
21,091
def update_cloud_integration(self, id, **kwargs): kwargs[] = True if kwargs.get(): return self.update_cloud_integration_with_http_info(id, **kwargs) else: (data) = self.update_cloud_integration_with_http_info(id, **kwargs) return data
Update a specific cloud integration # noqa: E501 # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.update_cloud_integration(id, async_req=True) >>> result = thread.get() :param async_req bool :param str id: (required) :param CloudIntegration body: Example Body: <pre>{ \"name\":\"CloudWatch integration\", \"service\":\"CLOUDWATCH\", \"cloudWatch\":{ \"baseCredentials\":{ \"roleArn\":\"arn:aws:iam::&lt;accountid&gt;:role/&lt;rolename&gt;\", \"externalId\":\"wave123\" }, \"metricFilterRegex\":\"^aws.(sqs|ec2|ebs|elb).*$\", \"pointTagFilterRegex\":\"(region|name)\" }, \"serviceRefreshRateInMins\":5 }</pre> :return: ResponseContainerCloudIntegration If the method is called asynchronously, returns the request thread.
21,092
def set_sticker_position_in_set(self, sticker, position): assert_type_or_raise(sticker, unicode_type, parameter_name="sticker") assert_type_or_raise(position, int, parameter_name="position") result = self.do("setStickerPositionInSet", sticker=sticker, position=position) if self.return_python_objects: logger.debug("Trying to parse {data}".format(data=repr(result))) try: return from_array_list(bool, result, list_level=0, is_builtin=True) except TgApiParseException: logger.debug("Failed parsing as primitive bool", exc_info=True) raise TgApiParseException("Could not parse result.") return result
Use this method to move a sticker in a set created by the bot to a specific position . Returns True on success. https://core.telegram.org/bots/api#setstickerpositioninset Parameters: :param sticker: File identifier of the sticker :type sticker: str|unicode :param position: New sticker position in the set, zero-based :type position: int Returns: :return: Returns True on success :rtype: bool
21,093
def tidy(args): p = OptionParser(tidy.__doc__) p.add_option("--nogaps", default=False, action="store_true", help="Remove all gap lines [default: %default]") opts, args = p.parse_args(args) if len(args) != 2: sys.exit(p.print_help()) agpfile, componentfasta = args originalagpfile = agpfile tmpfasta = "tmp.fasta" trimmed_agpfile = build([agpfile, componentfasta, tmpfasta, "--newagp", "--novalidate"]) os.remove(tmpfasta) agpfile = trimmed_agpfile agpfile = reindex([agpfile, "--inplace"]) merged_agpfile = gaps([agpfile, "--merge"]) os.remove(agpfile) agpfile = merged_agpfile agp = AGP(agpfile) newagpfile = agpfile.replace(".agp", ".fixed.agp") fw = open(newagpfile, "w") for object, a in groupby(agp, key=lambda x: x.object): a = list(a) if a[0].is_gap: g, a = a[0], a[1:] logging.debug("Trim beginning Ns({0}) of {1}".\ format(g.gap_length, object)) if a and a[-1].is_gap: a, g = a[:-1], a[-1] logging.debug("Trim trailing Ns({0}) of {1}".\ format(g.gap_length, object)) print("\n".join(str(x) for x in a), file=fw) fw.close() os.remove(agpfile) agpfile = newagpfile reindex_opts = [agpfile, "--inplace"] if opts.nogaps: reindex_opts += ["--nogaps"] agpfile = reindex(reindex_opts) tidyagpfile = originalagpfile.replace(".agp", ".tidy.agp") shutil.move(agpfile, tidyagpfile) logging.debug("File written to `{0}`.".format(tidyagpfile)) return tidyagpfile
%prog tidy agpfile componentfasta Given an agp file, run through the following steps: 1. Trim components with dangling N's 2. Merge adjacent gaps 3. Trim gaps at the end of an object 4. Reindex the agp Final output is in `.tidy.agp`.
21,094
def parse_python_version(output): version_line = output.split("\n", 1)[0] version_pattern = re.compile( r, re.VERBOSE, ) match = version_pattern.match(version_line) if not match: return None return match.groupdict(default="0")
Parse a Python version output returned by `python --version`. Return a dict with three keys: major, minor, and micro. Each value is a string containing a version part. Note: The micro part would be `'0'` if it's missing from the input string.
21,095
def get_lines(data_nts, prtfmt=None, nt_fields=None, **kws): lines = [] if prtfmt is None: prtfmt = mk_fmtfld(data_nts[0], kws.get(, ), kws.get(, )) if nt_fields is not None: _chk_flds_fmt(nt_fields, prtfmt) if in kws: data_nts = sorted(data_nts, key=kws[]) prt_if = kws.get(, None) for data_nt in data_nts: if prt_if is None or prt_if(data_nt): lines.append(prtfmt.format(**data_nt._asdict())) return lines
Print list of namedtuples into a table using prtfmt.
21,096
def encode_fetch_request(cls, client_id, correlation_id, payloads=None, max_wait_time=100, min_bytes=4096): payloads = [] if payloads is None else payloads grouped_payloads = group_by_topic_and_partition(payloads) message = cls._encode_message_header(client_id, correlation_id, KafkaCodec.FETCH_KEY) assert isinstance(max_wait_time, int) message += struct.pack(, -1, max_wait_time, min_bytes, len(grouped_payloads)) for topic, topic_payloads in grouped_payloads.items(): message += write_short_ascii(topic) message += struct.pack(, len(topic_payloads)) for partition, payload in topic_payloads.items(): message += struct.pack(, partition, payload.offset, payload.max_bytes) return message
Encodes some FetchRequest structs :param bytes client_id: :param int correlation_id: :param list payloads: list of :class:`FetchRequest` :param int max_wait_time: how long to block waiting on min_bytes of data :param int min_bytes: the minimum number of bytes to accumulate before returning the response
21,097
def _convert_to_style(cls, style_dict): from openpyxl.style import Style xls_style = Style() for key, value in style_dict.items(): for nk, nv in value.items(): if key == "borders": (xls_style.borders.__getattribute__(nk) .__setattr__(, nv)) else: xls_style.__getattribute__(key).__setattr__(nk, nv) return xls_style
converts a style_dict to an openpyxl style object Parameters ---------- style_dict : style dictionary to convert
21,098
def setupMovie(self): if self.state == self.INIT: self.sendRtspRequest(self.SETUP)
Setup button handler.
21,099
def get_issuer(self): issuer = None issuer_nodes = self.__query() if len(issuer_nodes) == 1: issuer = OneLogin_Saml2_Utils.element_text(issuer_nodes[0]) return issuer
Gets the Issuer of the Logout Response Message :return: The Issuer :rtype: string