Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
25,700
def init_db_conn( connection_name, HOST=None, PORT=None, DB=None, PASSWORD=None): rpool = redis.ConnectionPool( host=HOST, port=PORT, db=DB, password=PASSWORD) r = redis.Redis(connection_pool=rpool) redis_pool.connections[connection_name] = RedisClient(r)
Initialize a redis connection by each connection string defined in the configuration file
25,701
def build_seasonal_transition_matrix( num_seasons, is_last_day_of_season, dtype, basis_change_matrix=None, basis_change_matrix_inv=None): with tf.compat.v1.name_scope(): seasonal_permutation = np.concatenate( [np.arange(1, num_seasons), [0]], axis=0) seasonal_permutation_matrix = tf.constant( np.eye(num_seasons)[seasonal_permutation], dtype=dtype) if basis_change_matrix is not None: seasonal_permutation_matrix = tf.matmul( basis_change_matrix, tf.matmul(seasonal_permutation_matrix, basis_change_matrix_inv)) identity_matrix = tf.eye( tf.shape(input=seasonal_permutation_matrix)[-1], dtype=dtype) def seasonal_transition_matrix(t): return tf.linalg.LinearOperatorFullMatrix( matrix=dist_util.pick_scalar_condition( is_last_day_of_season(t), seasonal_permutation_matrix, identity_matrix)) return seasonal_transition_matrix
Build a function computing transitions for a seasonal effect model.
25,702
def tee_output_python(): buffer = StringIO() out = CapturedStdout(buffer) orig_stdout, orig_stderr = sys.stdout, sys.stderr flush() sys.stdout = TeeingStreamProxy(sys.stdout, buffer) sys.stderr = TeeingStreamProxy(sys.stderr, buffer) try: yield out finally: flush() out.finalize() sys.stdout, sys.stderr = orig_stdout, orig_stderr
Duplicate sys.stdout and sys.stderr to new StringIO.
25,703
def to_slice(arr): if isinstance(arr, slice): return arr if len(arr) == 1: return slice(arr[0], arr[0] + 1) step = np.unique(arr[1:] - arr[:-1]) if len(step) == 1: return slice(arr[0], arr[-1] + step[0], step[0])
Test whether `arr` is an integer array that can be replaced by a slice Parameters ---------- arr: numpy.array Numpy integer array Returns ------- slice or None If `arr` could be converted to an array, this is returned, otherwise `None` is returned See Also -------- get_index_from_coord
25,704
def b58ToC32(b58check, version=-1): addr_version_byte, addr_bin, addr_checksum = keylib.b58check.b58check_unpack(b58check) addr_version = ord(addr_version_byte) addr_hash160 = addr_bin.encode() stacks_version = None if version < 0: stacks_version = addr_version if ADDR_BITCOIN_TO_STACKS.get(addr_version) is not None: stacks_version = ADDR_BITCOIN_TO_STACKS[addr_version] else: stacks_version = version return c32address(stacks_version, addr_hash160)
>>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d') 'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7' >>> b58ToC32('3GgUssdoWh5QkoUDXKqT6LMESBDf8aqp2y') 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G' >>> b58ToC32('mvWRFPELmpCHSkFQ7o9EVdCd9eXeUTa9T8') 'ST2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQYAC0RQ' >>> b58ToC32('2N8EgwcZq89akxb6mCTTKiHLVeXRpxjuy98') 'SN2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKP6D2ZK9' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 22) 'SP2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKNRV9EJ7' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 0) 'S02J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKPVKG2CE' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 31) 'SZ2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQ9H6DPR' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 20) 'SM2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQVX8X0G' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 26) 'ST2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKQYAC0RQ' >>> b58ToC32('1FzTxL9Mxnm2fdmnQEArfhzJHevwbvcH6d', 21) 'SN2J6ZY48GV1EZ5V2V5RB9MP66SW86PYKKP6D2ZK9'
25,705
def reset_kernel(self): client = self.get_current_client() if client is not None: self.switch_to_plugin() client.reset_namespace()
Reset kernel of current client.
25,706
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10): p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) fmtstr = "%10i %10.3g %10.3g" titlestr = "%10s %10s %10s" if verbose: print(titlestr % ("iter", "residual norm", "soln norm")) for i in range(cg_iters): if callback is not None: callback(x) if verbose: print(fmtstr % (i, rdotr, np.linalg.norm(x))) z = f_Ax(p) v = rdotr / p.dot(z) x += v*p r -= v*z newrdotr = r.dot(r) mu = newrdotr/rdotr p = r + mu*p rdotr = newrdotr if rdotr < residual_tol: break if callback is not None: callback(x) if verbose: print(fmtstr % (i+1, rdotr, np.linalg.norm(x))) return x
Demmel p 312
25,707
def spladder(job, inputs, bam_id, bai_id): job.fileStore.logToMaster(.format(inputs.uuid)) work_dir = job.fileStore.getLocalTempDir() job.fileStore.readGlobalFile(bam_id, os.path.join(work_dir, )) job.fileStore.readGlobalFile(bai_id, os.path.join(work_dir, )) download_url(job=job, url=inputs.gtf, work_dir=work_dir, name=) download_url(job=job, url=inputs.gtf_pickle, work_dir=work_dir, name=) command = [, , , , , , , , , , , , , , , , , , , , , , , , , , ] docker_call(job=job, work_dir=work_dir, parameters=command, sudo=inputs.sudo, tool=) output_pickle = os.path.join(work_dir, , , ) if not os.path.exists(output_pickle): matches = [] for root, dirnames, filenames in os.walk(work_dir): for filename in fnmatch.filter(filenames, ): matches.append(os.path.join(root, filename)) if matches: output_pickle = matches[0] else: raise RuntimeError("Couldnalignment.filt.hdf5alignment.hdf5spladder.tar.gzspladder.tar.gz'))
Run SplAdder to detect and quantify alternative splicing events :param JobFunctionWrappingJob job: passed by Toil automatically :param Namespace inputs: Stores input arguments (see main) :param str bam_id: FileStore ID of bam :param str bai_id: FileStore ID of bam index file :return: FileStore ID of SplAdder tarball :rtype: str
25,708
def get_stp_mst_detail_output_msti_port_interface_type(self, **kwargs): config = ET.Element("config") get_stp_mst_detail = ET.Element("get_stp_mst_detail") config = get_stp_mst_detail output = ET.SubElement(get_stp_mst_detail, "output") msti = ET.SubElement(output, "msti") instance_id_key = ET.SubElement(msti, "instance-id") instance_id_key.text = kwargs.pop() port = ET.SubElement(msti, "port") interface_type = ET.SubElement(port, "interface-type") interface_type.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
25,709
def getAttributeName(self, index): offset = self._get_attribute_offset(index) name = self.m_attributes[offset + const.ATTRIBUTE_IX_NAME] res = self.sb[name] if not res: attr = self.m_resourceIDs[name] if attr in public.SYSTEM_RESOURCES[][]: res = + public.SYSTEM_RESOURCES[][][attr] else: res = .format(attr) return res
Returns the String which represents the attribute name
25,710
def load_script(browser, url): if browser.current_url.startswith(): url = + url browser.execute_script(, url) sleep(1)
Ensure that JavaScript at a given URL is available to the browser.
25,711
def logout(current): current.user.is_online(False) current.session.delete() current.output[] = if current.task_data.get(, False): current.output[] = current.task_data.get(, None) current.output[] = current.task_data.get(, None)
Log out view. Simply deletes the session object. For showing logout message: 'show_logout_message' field should be True in current.task_data, Message should be sent in current.task_data with 'logout_message' field. Message title should be sent in current.task_data with 'logout_title' field. current.task_data['show_logout_message'] = True current.task_data['logout_title'] = 'Message Title' current.task_data['logout_message'] = 'Message' Args: current: :attr:`~zengine.engine.WFCurrent` object.
25,712
def partition_asymmetry(bif_point): assert len(bif_point.children) == 2, n = float(sum(1 for _ in bif_point.children[0].ipreorder())) m = float(sum(1 for _ in bif_point.children[1].ipreorder())) if n == m: return 0.0 return abs(n - m) / abs(n + m)
Calculate the partition asymmetry at a bifurcation point as defined in https://www.ncbi.nlm.nih.gov/pubmed/18568015 The number of nodes in each child tree is counted. The partition is defined as the ratio of the absolute difference and the sum of the number of bifurcations in the two daughter subtrees at each branch point.
25,713
def unsubscribe(self): _LOGGER.info("PubNub unsubscribing") self._pubnub.unsubscribe_all() self._pubnub.stop() self._pubnub = None
Completly stop all pubnub operations.
25,714
def dump(context=os.environ): output = {} for key, value in context.iteritems(): if not key.startswith("BE_"): continue output[key[3:].lower()] = value return output
Dump current environment as a dictionary Arguments: context (dict, optional): Current context, defaults to the current environment.
25,715
def directories_in_directory(db, user_id, db_dirname): fields = _directory_default_fields() rows = db.execute( select( fields, ).where( _is_in_directory(directories, user_id, db_dirname), ) ) return [to_dict_no_content(fields, row) for row in rows]
Return subdirectories of a directory.
25,716
def env(**kwargs: Union[Dict[str, str], None]) -> ContextManager: old = os.environ.copy() try: os.environ.clear() for key, value in old.items(): os.environ[key] = value os.putenv(key, value) for key, value in kwargs.items(): if value is None: del os.environ[key] else: os.environ[key] = value os.putenv(key, value) yield finally: os.environ.clear() for key, value in old.items(): os.environ[key] = value os.putenv(key, value)
Context handler to temporarily alter environment. If you supply a value of ``None``, then the associated key will be deleted from the environment. Args: kwargs: Environment variables to override Yields: Execution context with modified environment
25,717
def printTPRegionParams(tpregion): tm = tpregion.getSelf()._tfdr print "------------PY TemporalMemory Parameters ------------------" print "numberOfCols =", tm.columnDimensions print "cellsPerColumn =", tm.cellsPerColumn print "minThreshold =", tm.minThreshold print "activationThreshold =", tm.activationThreshold print "newSynapseCount =", tm.maxNewSynapseCount print "initialPerm =", tm.initialPermanence print "connectedPerm =", tm.connectedPermanence print "permanenceInc =", tm.permanenceIncrement print "permanenceDec =", tm.permanenceDecrement print "predictedSegmentDecrement=", tm.predictedSegmentDecrement print
Note: assumes we are using TemporalMemory/TPShim in the TPRegion
25,718
def forward(self, X, training=False, device=): y_infer = list(self.forward_iter(X, training=training, device=device)) is_multioutput = len(y_infer) > 0 and isinstance(y_infer[0], tuple) if is_multioutput: return tuple(map(torch.cat, zip(*y_infer))) return torch.cat(y_infer)
Gather and concatenate the output from forward call with input data. The outputs from ``self.module_.forward`` are gathered on the compute device specified by ``device`` and then concatenated using PyTorch :func:`~torch.cat`. If multiple outputs are returned by ``self.module_.forward``, each one of them must be able to be concatenated this way. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. training : bool (default=False) Whether to set the module to train mode or not. device : string (default='cpu') The device to store each inference result on. This defaults to CPU memory since there is genereally more memory available there. For performance reasons this might be changed to a specific CUDA device, e.g. 'cuda:0'. Returns ------- y_infer : torch tensor The result from the forward step.
25,719
def allocate(self): self.logger.debug("Allocating environment.") self._allocate() self.logger.debug("Environment successfully allocated.")
Builds the context and the Hooks.
25,720
def read_lsm_floatpairs(fh): size = struct.unpack(, fh.read(4))[0] return fh.read_array(, count=size)
Read LSM sequence of float pairs from file and return as list.
25,721
def attr_membership(attr_val, value_set, attr_type=basestring, modifier_fn=lambda x: x): if attr_val is None: return False if not isinstance(attr_val, attr_type): warnings.warn("Attribute is of type {}, {} expected. " "Attempting to cast to expected type.".format(type(attr_val), attr_type)) try: return False return is_in_set
Helper function passed to netCDF4.Dataset.get_attributes_by_value Checks that `attr_val` exists, has the same type as `attr_type`, and is contained in `value_set` attr_val: The value of the attribute being checked attr_type: A type object that the `attr_val` is expected to have the same type as. If the type is not the same, a warning is issued and the code attempts to cast `attr_val` to the expected type. value_set: The set against which membership for `attr_val` is tested modifier_fn: A function to apply to attr_val prior to applying the set membership test
25,722
def next_child(self, child_pid): relation = self._get_child_relation(child_pid) if relation.index is not None: return self.children.filter( PIDRelation.index > relation.index ).ordered(ord=).first() else: return None
Get the next child PID in the PID relation.
25,723
def battery_voltage(self): msb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_VOLTAGE_MSB_REG) lsb = self.bus.read_byte_data(AXP209_ADDRESS, BATTERY_VOLTAGE_LSB_REG) voltage_bin = msb << 4 | lsb & 0x0f return voltage_bin * 1.1
Returns voltage in mV
25,724
def getfield(f): if isinstance(f, list): return [getfield(x) for x in f] else: return f.value
convert values from cgi.Field objects to plain values.
25,725
def get_matches(self, code, start=0, end=None, skip=None): if end is None: end = len(self.source) for match in self._get_matched_asts(code): match_start, match_end = match.get_region() if start <= match_start and match_end <= end: if skip is not None and (skip[0] < match_end and skip[1] > match_start): continue yield match
Search for `code` in source and return a list of `Match`\es `code` can contain wildcards. ``${name}`` matches normal names and ``${?name} can match any expression. You can use `Match.get_ast()` for getting the node that has matched a given pattern.
25,726
def build_request_relationship(type, ids): if ids is None: return { : None } elif isinstance(ids, str): return { : {: ids, : type} } else: return { "data": [{"id": id, "type": type} for id in ids] }
Build a relationship list. A relationship list is used to update relationships between two resources. Setting sensors on a label, for example, uses this function to construct the list of sensor ids to pass to the Helium API. Args: type(string): The resource type for the ids in the relationship ids([uuid] or uuid): Just one or a list of resource uuids to use in the relationship Returns: A ready to use relationship JSON object.
25,727
def build_body(self): _increase_indent() body_array = [x.build() for x in self.iterable] nl = if self.append_extra_newline else if len(self.iterable) >= 1: body = self.join_body_on.join(body_array) + nl else: body = _decrease_indent() return body
Builds the body of a syslog-ng configuration object.
25,728
def format_docstring(*args, **kwargs): def decorator(func): func.__doc__ = getdoc(func).format(*args, **kwargs) return func return decorator
Decorator for clean docstring formatting
25,729
def do_find(self, params): for path in self._zk.find(params.path, params.match, 0): self.show_output(path)
\x1b[1mNAME\x1b[0m find - Find znodes whose path matches a given text \x1b[1mSYNOPSIS\x1b[0m find [path] [match] \x1b[1mOPTIONS\x1b[0m * path: the path (default: cwd) * match: the string to match in the paths (default: '') \x1b[1mEXAMPLES\x1b[0m > find / foo /foo2 /fooish/wayland /fooish/xorg /copy/foo
25,730
def _get_or_add(self, prop_name): get_or_add_method_name = % prop_name get_or_add_method = getattr(self, get_or_add_method_name) element = get_or_add_method() return element
Return element returned by 'get_or_add_' method for *prop_name*.
25,731
def prettyname(cls, attrib_name): if attrib_name.startswith(): tagname = attrib_name[len():] return .format(tagname) elif attrib_name in cls.COLUMN_NAMES: return cls.COLUMN_NAMES[attrib_name] else: return attrib_name
Returns the "pretty name" (capitalized, etc) of an attribute, by looking it up in ``cls.COLUMN_NAMES`` if it exists there. :param attrib_name: An attribute name. :type attrib_name: ``str`` :rtype: ``str``
25,732
def in_memory(self, value): self_class = self.__class__ memory = Annotation.__ANNOTATIONS_IN_MEMORY__ if value: annotations_memory = memory.setdefault(self_class, set()) annotations_memory.add(self) else: if self_class in memory: annotations_memory = memory[self_class] while self in annotations_memory: annotations_memory.remove(self) if not annotations_memory: del memory[self_class]
Add or remove self from global memory. :param bool value: if True(False) ensure self is(is not) in memory.
25,733
def set_ntp_servers(primary_server=None, secondary_server=None, deploy=False): *** ret = {} if primary_server: query = {: , : , : localhost.localdomain\ , : .format(primary_server)} ret.update({: __proxy__[](query)}) if secondary_server: query = {: , : , : localhost.localdomain\ , : .format(secondary_server)} ret.update({: __proxy__[](query)}) if deploy is True: ret.update(commit()) return ret
Set the NTP servers of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: primary_server(str): The primary NTP server IP address or FQDN. secondary_server(str): The secondary NTP server IP address or FQDN. deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' ntp.set_servers 0.pool.ntp.org 1.pool.ntp.org salt '*' ntp.set_servers primary_server=0.pool.ntp.org secondary_server=1.pool.ntp.org salt '*' ntp.ser_servers 0.pool.ntp.org 1.pool.ntp.org deploy=True
25,734
def browserfamilies(self, tag=None, fromdate=None, todate=None): return self.call("GET", "/stats/outbound/clicks/browserfamilies", tag=tag, fromdate=fromdate, todate=todate)
Gets an overview of the browsers used to open links in your emails. This is only recorded when Link Tracking is enabled for that email.
25,735
async def article( self, title, description=None, *, url=None, thumb=None, content=None, id=None, text=None, parse_mode=(), link_preview=True, geo=None, period=60, contact=None, game=False, buttons=None ): result = types.InputBotInlineResult( id=id or , type=, send_message=await self._message( text=text, parse_mode=parse_mode, link_preview=link_preview, geo=geo, period=period, contact=contact, game=game, buttons=buttons ), title=title, description=description, url=url, thumb=thumb, content=content ) if id is None: result.id = hashlib.sha256(bytes(result)).hexdigest() return result
Creates new inline result of article type. Args: title (`str`): The title to be shown for this result. description (`str`, optional): Further explanation of what this result means. url (`str`, optional): The URL to be shown for this result. thumb (:tl:`InputWebDocument`, optional): The thumbnail to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present. content (:tl:`InputWebDocument`, optional): The content to be shown for this result. For now it has to be a :tl:`InputWebDocument` if present.
25,736
def _getCellForNewSegment(self, colIdx): if self.maxSegmentsPerCell < 0: if self.cellsPerColumn > 1: maxIdx = self.cellsPerColumn-1 for i in xrange(minIdx, maxIdx+1): numSegs = len(self.cells[colIdx][i]) if numSegs < self.maxSegmentsPerCell: candidateCellIdxs.append(i) if len(candidateCellIdxs) > 0: candidateCellIdx = ( candidateCellIdxs[self._random.getUInt32(len(candidateCellIdxs))]) if self.verbosity >= 5: print "Cell [%d,%d] chosen for new segment, colIdx, candidateCellIdx, len(self.cells[colIdx][candidateCellIdx])) return candidateCellIdx candidateSegment = None candidateSegmentDC = 1.0 for i in xrange(minIdx, maxIdx+1): for s in self.cells[colIdx][i]: dc = s.dutyCycle() if dc < candidateSegmentDC: candidateCellIdx = i candidateSegmentDC = dc candidateSegment = s if self.verbosity >= 5: print ("Deleting segment "segment" % (candidateSegment.segID, colIdx, candidateCellIdx)) candidateSegment.debugPrint() self._cleanUpdatesList(colIdx, candidateCellIdx, candidateSegment) self.cells[colIdx][candidateCellIdx].remove(candidateSegment) return candidateCellIdx
Return the index of a cell in this column which is a good candidate for adding a new segment. When we have fixed size resources in effect, we insure that we pick a cell which does not already have the max number of allowed segments. If none exists, we choose the least used segment in the column to re-allocate. :param colIdx which column to look at :returns: cell index
25,737
def get_op_traceback(self, op_name): if not self._graph_traceback: raise ValueError() for op_log_entry in self._graph_traceback.log_entries: if op_log_entry.name == op_name: return self._code_def_to_traceback_list(op_log_entry.code_def) raise ValueError( % (op_name, self._graph_version))
Get the traceback of an op in the latest version of the TF graph. Args: op_name: Name of the op. Returns: Creation traceback of the op, in the form of a list of 2-tuples: (file_path, lineno) Raises: ValueError: If the op with the given name cannot be found in the latest version of the graph that this SourceManager instance has received, or if this SourceManager instance has not received any graph traceback yet.
25,738
def get_pending_reboot(): * checks = (get_pending_update, get_pending_file_rename, get_pending_servermanager, get_pending_component_servicing, get_reboot_required_witnessed, get_pending_computer_name, get_pending_domain_join) for check in checks: if check(): return True return False
Determine whether there is a reboot pending. .. versionadded:: 2016.11.0 Returns: bool: ``True`` if the system is pending reboot, otherwise ``False`` CLI Example: .. code-block:: bash salt '*' system.get_pending_reboot
25,739
def get_all_security_groups(groupnames=None, group_ids=None, filters=None, region=None, key=None, keyid=None, profile=None): groupnamesll want to use the filter instead. Valid keys for the filters argument are: description - The description of the security group. egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access. group-id - The ID of the security group. group-name - The name of the security group. ip-permission.cidr - A CIDR range that has been granted permission. ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number. ip-permission.group-id - The ID of a security group that has been granted permission. ip-permission.group-name - The name of a security group that has been granted permission. ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number). ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code. ip-permission.user-id - The ID of an AWS account that has been granted permission. owner-id - The AWS account ID of the owner of the security group. tag-key - The key of a tag assigned to the security group. tag-value - The value of a tag assigned to the security group. vpc-id - The ID of the VPC specified when the security group was created. CLI example:: salt myminion boto_secgroup.get_all_security_groups filters= descriptionidinstancesnameowner_idregionrulesrules_egresstagsvpc_idregionrulesrules_egressinstances': v = [i.id for i in v()] n[a] = v ret += [n] return ret except boto.exception.BotoServerError as e: log.debug(e) return []
Return a list of all Security Groups matching the given criteria and filters. Note that the 'groupnames' argument only functions correctly for EC2 Classic and default VPC Security Groups. To find groups by name in other VPCs you'll want to use the 'group-name' filter instead. Valid keys for the filters argument are: description - The description of the security group. egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service to which the security group allows access. group-id - The ID of the security group. group-name - The name of the security group. ip-permission.cidr - A CIDR range that has been granted permission. ip-permission.from-port - The start of port range for the TCP and UDP protocols, or an ICMP type number. ip-permission.group-id - The ID of a security group that has been granted permission. ip-permission.group-name - The name of a security group that has been granted permission. ip-permission.protocol - The IP protocol for the permission (tcp | udp | icmp or a protocol number). ip-permission.to-port - The end of port range for the TCP and UDP protocols, or an ICMP code. ip-permission.user-id - The ID of an AWS account that has been granted permission. owner-id - The AWS account ID of the owner of the security group. tag-key - The key of a tag assigned to the security group. tag-value - The value of a tag assigned to the security group. vpc-id - The ID of the VPC specified when the security group was created. CLI example:: salt myminion boto_secgroup.get_all_security_groups filters='{group-name: mygroup}'
25,740
def _unpack_token_json(token): if not isinstance(token, dict): raise DecodeError("Not a dict") if not token.has_key(): raise DecodeError("Missing field") for k in [, ]: if not token.has_key(k): raise DecodeError("Missing field".format(k)) if not isinstance(token[k], list): raise DecodeError("Field is not a string".format(k)) headers = [] signatures = [] signing_inputs = [] payload = None try: headers = [base64url_decode(str(h)) for h in token[]] except (TypeError, binascii.Error): raise DecodeError("Invalid header padding") try: payload_data = base64url_decode(str(token[])) except (TypeError, binascii.Error): raise DecodeError("Invalid payload padding") try: payload = json.loads(payload_data.decode()) except ValueError as e: raise DecodeError(.format(e)) try: signatures = [base64url_decode(str(s)) for s in token[]] except (TypeError, binascii.Error): raise DecodeError("Invalid crypto padding") for header_b64 in token[]: signing_inputs.append( b.format(header_b64, token[]) ) return (headers, payload, signatures, signing_inputs)
Unpack a JSON-serialized JWT Returns (headers, payload, signatures) on success Raises DecodeError on bad input
25,741
def FunctionTimer(on_done=None): foo executing...bar executing...foobar def decfn(fn): def timed(*args, **kwargs): ts = time.time() result = fn(*args, **kwargs) te = time.time() if on_done: on_done((fn.__name__,int(te - ts)), args, kwargs) else: print(( % (fn.__name__, (te - ts)))) return result return timed return decfn
To check execution time of a function borrowed from https://medium.com/pythonhive/python-decorator-to-measure-the-execution-time-of-methods-fa04cb6bb36d >>> def logger(details, args, kwargs): #some function that uses the time output ... print(details) ... >>> @FunctionTimer(on_done= logger) ... def foo(t=10): ... print('foo executing...') ... time.sleep(t) ... >>> @FunctionTimer(on_done= logger) ... def bar(t, n): ... for i in range(n): ... print('bar executing...') ... time.sleep(1) ... foo(t) ... >>> bar(3,2) bar executing... bar executing... foo executing... ('foo', 3) ('bar', 5)
25,742
def checkPermissions(permissions=[], obj=None): if not obj: return False sm = getSecurityManager() for perm in permissions: if not sm.checkPermission(perm, obj): return return True
Checks if a user has permissions for a given object. Args: permissions: The permissions the current user must be compliant with obj: The object for which the permissions apply Returns: 1 if the user complies with all the permissions for the given object. Otherwise, it returns empty.
25,743
def getcolor(spec): if isinstance(spec, str): from matplotlib import colors return asarray(colors.hex2color(colors.cnames[spec])) else: return spec
Turn optional color string spec into an array.
25,744
def ensure_session_key(request): key = request.session.session_key if key is None: request.session.save() request.session.modified = True key = request.session.session_key return key
Given a request return a session key that will be used. There may already be a session key associated, but if there is not, we force the session to create itself and persist between requests for the client behind the given request.
25,745
def get(self, obj, key): if key not in self._exposed: raise MethodNotExposed() rightFuncs = self._exposed[key] T = obj.__class__ seen = {} for subT in inspect.getmro(T): for name, value in subT.__dict__.items(): for rightFunc in rightFuncs: if value is rightFunc: if name in seen: raise MethodNotExposed() return value.__get__(obj, T) seen[name] = True raise MethodNotExposed()
Retrieve 'key' from an instance of a class which previously exposed it. @param key: a hashable object, previously passed to L{Exposer.expose}. @return: the object which was exposed with the given name on obj's key. @raise MethodNotExposed: when the key in question was not exposed with this exposer.
25,746
def get_storage_credentials(key, read_only=False): if read_only: scopes = [] else: scopes = [] credentials = service_account.Credentials.from_service_account_info(key) scoped_credentials = credentials.with_scopes(scopes) return scoped_credentials
Authenticates a service account for reading and/or writing on a bucket. This uses the `google.oauth2.service_account` module to obtain "scoped credentials". These can be used with the `google.storage` module. TODO: docstring
25,747
def set_from_json(self, obj, json, models=None, setter=None): json = self._extract_units(obj, json) super(UnitsSpecPropertyDescriptor, self).set_from_json(obj, json, models, setter)
Sets the value of this property from a JSON value. This method first separately extracts and removes any ``units`` field in the JSON, and sets the associated units property directly. The remaining JSON is then passed to the superclass ``set_from_json`` to be handled. Args: obj: (HasProps) : instance to set the property value on json: (JSON-value) : value to set to the attribute to models (dict or None, optional) : Mapping of model ids to models (default: None) This is needed in cases where the attributes to update also have values that have references. setter (ClientSession or ServerSession or None, optional) : This is used to prevent "boomerang" updates to Bokeh apps. (default: None) In the context of a Bokeh server application, incoming updates to properties will be annotated with the session that is doing the updating. This value is propagated through any subsequent change notifications that the update triggers. The session can compare the event setter to itself, and suppress any updates that originate from itself. Returns: None
25,748
def stop(self, bIgnoreExceptions = True): try: event = self.lastEvent has_event = bool(event) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) has_event = False if has_event: try: pid = event.get_pid() self.disable_process_breakpoints(pid) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) try: tid = event.get_tid() self.disable_thread_breakpoints(tid) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) try: event.continueDebugEvent = win32.DBG_CONTINUE self.cont(event) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) try: if self.__bKillOnExit: self.kill_all(bIgnoreExceptions) else: self.detach_from_all(bIgnoreExceptions) except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) try: self.system.clear() except Exception: if not bIgnoreExceptions: raise e = sys.exc_info()[1] warnings.warn(str(e), RuntimeWarning) self.force_garbage_collection(bIgnoreExceptions)
Stops debugging all processes. If the kill on exit mode is on, debugged processes are killed when the debugger is stopped. Otherwise when the debugger stops it detaches from all debugged processes and leaves them running (default). For more details see: L{__init__} @note: This method is better than L{detach_from_all} because it can gracefully handle the last debugging event before detaching. @type bIgnoreExceptions: bool @param bIgnoreExceptions: C{True} to ignore any exceptions that may be raised when detaching.
25,749
def get_history_item_for_tree_iter(self, child_tree_iter): history_item = self.history_tree_store[child_tree_iter][self.HISTORY_ITEM_STORAGE_ID] if history_item is None: if self.history_tree_store.iter_n_children(child_tree_iter) > 0: child_iter = self.history_tree_store.iter_nth_child(child_tree_iter, 0) history_item = self.history_tree_store[child_iter][self.HISTORY_ITEM_STORAGE_ID] else: logger.debug("In a dummy history should be respective real call element.") return history_item
Hands history item for tree iter and compensate if tree item is a dummy item :param Gtk.TreeIter child_tree_iter: Tree iter of row :rtype rafcon.core.execution.execution_history.HistoryItem: :return history tree item:
25,750
def get_form(self, request, obj=None, **kwargs): if obj is not None and obj.parent is not None: self.previous_parent = obj.parent previous_parent_id = self.previous_parent.id else: previous_parent_id = None my_choice_field = TreeItemChoiceField(self.tree, initial=previous_parent_id) form = super(TreeItemAdmin, self).get_form(request, obj, **kwargs) my_choice_field.label = form.base_fields[].label my_choice_field.help_text = form.base_fields[].help_text my_choice_field.widget = form.base_fields[].widget form.base_fields[] = my_choice_field if not getattr(self, , False): self.known_url_names = [] self.known_url_rules = [] resolver = get_resolver(get_urlconf()) for ns, (url_prefix, ns_resolver) in resolver.namespace_dict.items(): if ns != : self._stack_known_urls(ns_resolver.reverse_dict, ns) self._stack_known_urls(resolver.reverse_dict) self.known_url_rules = sorted(self.known_url_rules) form.known_url_names_hint = _( ) form.known_url_names = self.known_url_names form.known_url_rules = self.known_url_rules return form
Returns modified form for TreeItem model. 'Parent' field choices are built by sitetree itself.
25,751
def load_file(self, filename): self.counter.clear() try: etree = ET.parse(filename) except ET.ParseError: parser = ET.XMLParser(encoding=) etree = ET.parse(filename, parser) eroot = etree.getroot() self.remove_all() self.previewer.remove_all() self.widget_editor.hide_all() self.previewer.resource_paths.append(os.path.dirname(filename)) for element in eroot: self.populate_tree(, eroot, element,from_file=True) children = self.treeview.get_children() for child in children: self.draw_widget(child) self.previewer.show_selected(None, None)
Load file into treeview
25,752
def Henry_H_at_T(T, H, Tderiv, T0=None, units=None, backend=None): be = get_backend(backend) if units is None: K = 1 else: K = units.Kelvin if T0 is None: T0 = 298.15*K return H * be.exp(Tderiv*(1/T - 1/T0))
Evaluate Henry's constant H at temperature T Parameters ---------- T: float Temperature (with units), assumed to be in Kelvin if ``units == None`` H: float Henry's constant Tderiv: float (optional) dln(H)/d(1/T), assumed to be in Kelvin if ``units == None``. T0: float Reference temperature, assumed to be in Kelvin if ``units == None`` default: 298.15 K units: object (optional) object with attributes: kelvin (e.g. chempy.units.default_units) backend : module (optional) module with "exp", default: numpy, math
25,753
def simxGetStringParameter(clientID, paramIdentifier, operationMode): paramValue = ct.POINTER(ct.c_char)() ret = c_GetStringParameter(clientID, paramIdentifier, ct.byref(paramValue), operationMode) a = bytearray() if ret == 0: i = 0 while paramValue[i] != b: if sys.version_info[0] == 3: a.append(int.from_bytes(paramValue[i],)) else: a.append(paramValue[i]) i=i+1 if sys.version_info[0] == 3: a=str(a,) else: a=str(a) return ret, a
Please have a look at the function description/documentation in the V-REP user manual
25,754
def get_recent_files(self): try: recent_files = self.CONF[WORKSPACE].get(, , default=[]) except EnvironmentError: return [] for recent_file in recent_files[:]: if not os.path.isfile(recent_file): recent_files.remove(recent_file) return list(OrderedDict.fromkeys(recent_files))
Return a list of files opened by the project.
25,755
def load_file(self): daychunks = self.__split_file() if (daychunks): maxcount = len(self.__splitpointers) for i in range(maxcount): start = self.__splitpointers[i] end = None if (i < (maxcount - 1)): end = self.__splitpointers[i + 1] chunk = self.__get_chunk(start, end) parser = sarparse.Parser() cpu_usage, mem_usage, swp_usage, io_usage = \ parser._parse_file(parser._split_file(chunk)) self.__sarinfos[self.__get_part_date(chunk)] = { "cpu": cpu_usage, "mem": mem_usage, "swap": swp_usage, "io": io_usage } del(cpu_usage) del(mem_usage) del(swp_usage) del(io_usage) del(parser) return(True)
Loads combined SAR format logfile in ASCII format. :return: ``True`` if loading and parsing of file went fine, \ ``False`` if it failed (at any point)
25,756
def get_loggers(self): return self.log.debug, self.log.info, self.log.warn, self.log.error
Return a list of the logger methods: (debug, info, warn, error)
25,757
def update(self, action: torch.Tensor) -> : checklist_addition = (self.terminal_actions == action).float() new_checklist = self.checklist + checklist_addition new_checklist_state = ChecklistStatelet(terminal_actions=self.terminal_actions, checklist_target=self.checklist_target, checklist_mask=self.checklist_mask, checklist=new_checklist, terminal_indices_dict=self.terminal_indices_dict) return new_checklist_state
Takes an action index, updates checklist and returns an updated state.
25,758
def delete_by_hash(self, file_hash): full_path = self.file_path_from_hash(file_hash) return self.delete_by_path(full_path)
Remove file/archive by it's `file_hash`. Args: file_hash (str): Hash, which is used to find the file in storage. Raises: IOError: If the file for given `file_hash` was not found in \ storage.
25,759
def _cram_to_fastq_regions(regions, cram_file, dirs, data): base_name = utils.splitext_plus(os.path.basename(cram_file))[0] work_dir = utils.safe_makedir(os.path.join(dirs["work"], "align_prep", "%s-parts" % base_name)) fnames = run_multicore(_cram_to_fastq_region, [(cram_file, work_dir, base_name, region, data) for region in regions], data["config"]) if any(not _is_gzip_empty(p1) for p1, p2, s in fnames): out = [[p1, p2] for p1, p2, s in fnames] else: out = [[s] for p1, p2, s in fnames] return out, work_dir
Convert CRAM files to fastq, potentially within sub regions. Returns multiple fastq files that can be merged back together.
25,760
def dtypes(self): from pandas import Series return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
Return the dtypes in the DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type of each column. See Also -------- DataFrame.ftypes : Dtype and sparsity information. Examples -------- >>> df = pd.DataFrame({'float': [1.0], ... 'int': [1], ... 'datetime': [pd.Timestamp('20180310')], ... 'string': ['foo']}) >>> df.dtypes float float64 int int64 datetime datetime64[ns] string object dtype: object
25,761
def _read_as_dict(self): data = list() for row in self._rows: row_data = OrderedDict() for i, header in enumerate(self.headers): row_data[header.cget()] = row[i].get() data.append(row_data) return data
Read the data contained in all entries as a list of dictionaries with the headers as the dictionary keys :return: list of dicts containing all tabular data
25,762
def epoch_to_human_time(epoch_time): if isinstance(epoch_time, int): try: d = datetime.datetime.fromtimestamp(epoch_time / 1000) return d.strftime("%m-%d-%Y %H:%M:%S ") except ValueError: return None
Converts an epoch timestamp to human readable time. This essentially converts an output of get_current_epoch_time to an output of get_current_human_time Args: epoch_time: An integer representing an epoch timestamp in milliseconds. Returns: A time string representing the input time. None if input param is invalid.
25,763
def execute_sync(self, message): info("synchronizing message: {message}") with self.world._unlock_temporarily(): message._sync(self.world) self.world._react_to_sync_response(message) for actor in self.actors: actor._react_to_sync_response(message)
Respond when the server indicates that the client is out of sync. The server can request a sync when this client sends a message that fails the check() on the server. If the reason for the failure isn't very serious, then the server can decide to send it as usual in the interest of a smooth gameplay experience. When this happens, the server sends out an extra response providing the clients with the information they need to resync themselves.
25,764
def create_assessment(self, assessment_form): collection = JSONClientValidated(, collection=, runtime=self._runtime) if not isinstance(assessment_form, ABCAssessmentForm): raise errors.InvalidArgument() if assessment_form.is_for_update(): raise errors.InvalidArgument() try: if self._forms[assessment_form.get_id().get_identifier()] == CREATED: raise errors.IllegalState() except KeyError: raise errors.Unsupported() if not assessment_form.is_valid(): raise errors.InvalidArgument() insert_result = collection.insert_one(assessment_form._my_map) self._forms[assessment_form.get_id().get_identifier()] = CREATED result = objects.Assessment( osid_object_map=collection.find_one({: insert_result.inserted_id}), runtime=self._runtime, proxy=self._proxy) return result
Creates a new ``Assessment``. arg: assessment_form (osid.assessment.AssessmentForm): the form for this ``Assessment`` return: (osid.assessment.Assessment) - the new ``Assessment`` raise: IllegalState - ``assessment_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``assessment_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - ``assessment_form`` did not originate from ``get_assessment_form_for_create()`` *compliance: mandatory -- This method must be implemented.*
25,765
def set_token(self, token): if token: auth = HTTPBasicAuth(token, ) self._token = token self.authenticated = True self.session.auth = auth logger.debug("Using session token: %s", token) else: self._token = None self.authenticated = False self.session.auth = None logger.debug("Session token/auth reinitialised")
Set token in authentification for next requests :param token: str. token to set in auth. If None, reinit auth
25,766
def event_info(self, event): def default_func(event): return {} registry = { AxesImage : [pick_info.image_props], PathCollection : [pick_info.scatter_props, self._contour_info, pick_info.collection_props], Line2D : [pick_info.line_props, pick_info.errorbar_props], LineCollection : [pick_info.collection_props, self._contour_info, pick_info.errorbar_props], PatchCollection : [pick_info.collection_props, self._contour_info], PolyCollection : [pick_info.collection_props, pick_info.scatter_props], QuadMesh : [pick_info.collection_props], Rectangle : [pick_info.rectangle_props], } x, y = event.mouseevent.xdata, event.mouseevent.ydata props = dict(x=x, y=y, label=event.artist.get_label(), event=event) props[] = getattr(event, , None) props[] = self._point_label(event) funcs = registry.get(type(event.artist), [default_func]) funcs += [pick_info.three_dim_props] for func in funcs: props.update(func(event)) return props
Get a dict of info for the artist selected by "event".
25,767
def redirect( to, headers=None, status=302, content_type="text/html; charset=utf-8" ): headers = headers or {} safe_to = quote_plus(to, safe=":/% headers["Location"] = safe_to return HTTPResponse( status=status, headers=headers, content_type=content_type )
Abort execution and cause a 302 redirect (by default). :param to: path or fully qualified URL to redirect to :param headers: optional dict of headers to include in the new request :param status: status code (int) of the new request, defaults to 302 :param content_type: the content type (string) of the response :returns: the redirecting Response
25,768
def csv( self, filepath=None ): self.log.debug() renderedData = self._list_of_dictionaries_to_csv("machine") if filepath and renderedData != "NO MATCH": if not os.path.exists(os.path.dirname(filepath)): os.makedirs(os.path.dirname(filepath)) writeFile = codecs.open(filepath, encoding=, mode=) writeFile.write(renderedData) writeFile.close() self.log.debug() return renderedData
*Render the data in CSV format* **Key Arguments:** - ``filepath`` -- path to the file to write the csv content to. Default *None* **Return:** - ``renderedData`` -- the data rendered in csv format **Usage:** To render the data set as csv: .. code-block:: python print dataSet.csv() .. code-block:: text owner,pet,address daisy,dog,"belfast, uk" john,snake,the moon susan,crocodile,larne and to save the csv rendering to file: .. code-block:: python dataSet.csv("/path/to/myfile.csv")
25,769
def wait(timeout=None, flush=True): if timeout is not None: timeout = timeout + _time.clock() while True: if _eventQueue: return _eventQueue.pop(0) if flush: _tdl.flush() if timeout and _time.clock() >= timeout: return None _time.sleep(0.001) _processEvents()
Wait for an event. Args: timeout (Optional[int]): The time in seconds that this function will wait before giving up and returning None. With the default value of None, this will block forever. flush (bool): If True a call to :any:`tdl.flush` will be made before listening for events. Returns: Type[Event]: An event, or None if the function has timed out. Anything added via :any:`push` will also be returned.
25,770
def cancel(self, workflow_id): self.logger.debug( + workflow_id) url = % { : self.workflows_url, : workflow_id } r = self.gbdx_connection.post(url, data=) r.raise_for_status()
Cancels a running workflow. Args: workflow_id (str): Workflow id. Returns: Nothing
25,771
def get_running_race(self, race_id): raw = self.protocol.get(, id=race_id) return model.RunningRace.deserialize(raw, bind_client=self)
Gets a running race for a given identifier.t http://strava.github.io/api/v3/running_races/#list :param race_id: id for the race :rtype: :class:`stravalib.model.RunningRace`
25,772
def as_hyperbola(self, rotated=False): idx = N.diag_indices(3) _ = 1/self.covariance_matrix[idx] d = list(_) d[-1] *= -1 arr = N.identity(4)*-1 arr[idx] = d hyp = conic(arr) if rotated: R = augment(self.axes) hyp = hyp.transform(R) return hyp
Hyperbolic error area
25,773
def chmod(path, mode=None, user=None, group=None, other=None, recursive=False): successful = True mode = _ops_mode(mode) if user is not None: mode.user = user if group is not None: mode.group = group if other is not None: mode.other = other if recursive: for p in find(path, no_peek=True): successful = _chmod(p, mode) and successful else: successful = _chmod(path, mode) return successful
Changes file mode permissions. >>> if chmod('/tmp/one', 0755): ... print('OK') OK NOTE: The precending ``0`` is required when using a numerical mode.
25,774
def rgamma(alpha, beta, size=None): return np.random.gamma(shape=alpha, scale=1. / beta, size=size)
Random gamma variates.
25,775
def initialize_ui(self): LOGGER.debug("> Initializing Component ui.".format(self.__class__.__name__)) self.__model = ProjectsProxyModel(self) self.__model.setSourceModel(self.__script_editor.model) self.__delegate = RichText_QStyledItemDelegate(self, self.__style) self.Projects_Explorer_treeView.setParent(None) self.Projects_Explorer_treeView = Projects_QTreeView(self, self.__model) self.Projects_Explorer_treeView.setItemDelegate(self.__delegate) self.Projects_Explorer_treeView.setObjectName("Projects_Explorer_treeView") self.Projects_Explorer_treeView.setContextMenuPolicy(Qt.ActionsContextMenu) self.Projects_Explorer_dockWidgetContents_gridLayout.addWidget(self.Projects_Explorer_treeView, 0, 0) self.__view = self.Projects_Explorer_treeView self.__view_add_actions() self.__add_actions() self.__view.expanded.connect(self.__view__expanded) self.__view.doubleClicked.connect(self.__view__doubleClicked) self.__view.selectionModel().selectionChanged.connect(self.__view_selectionModel__selectionChanged) self.__script_editor.Script_Editor_tabWidget.currentChanged.connect( self.__script_editor_Script_Editor_tabWidget__currentChanged) self.__script_editor.model.project_registered.connect(self.__script_editor_model__project_registered) self.initialized_ui = True return True
Initializes the Component ui. :return: Method success. :rtype: bool
25,776
def PrependENVPath(self, name, newpath, envname = , sep = os.pathsep, delete_existing=1): orig = if envname in self._dict and name in self._dict[envname]: orig = self._dict[envname][name] nv = SCons.Util.PrependPath(orig, newpath, sep, delete_existing, canonicalize=self._canonicalize) if envname not in self._dict: self._dict[envname] = {} self._dict[envname][name] = nv
Prepend path elements to the path 'name' in the 'ENV' dictionary for this environment. Will only add any particular path once, and will normpath and normcase all paths to help assure this. This can also handle the case where the env variable is a list instead of a string. If delete_existing is 0, a newpath which is already in the path will not be moved to the front (it will be left where it is).
25,777
def initialize(self, config): assert config.workflowID is None config.workflowID = str(uuid4()) logger.debug("The workflow ID is: " % config.workflowID) self.__config = config self.writeConfig()
Create the physical storage for this job store, allocate a workflow ID and persist the given Toil configuration to the store. :param toil.common.Config config: the Toil configuration to initialize this job store with. The given configuration will be updated with the newly allocated workflow ID. :raises JobStoreExistsException: if the physical storage for this job store already exists
25,778
def climb_stairs(n): arr = [1, 1] for _ in range(1, n): arr.append(arr[-1] + arr[-2]) return arr[-1]
:type n: int :rtype: int
25,779
def surface(self, param): squeeze_out = (np.broadcast(*param).shape == ()) param_in = param param = tuple(np.array(p, dtype=float, copy=False, ndmin=1) for p in param) if self.check_bounds and not is_inside_bounds(param, self.params): raise ValueError( .format(param_in, self.params)) surf = sum(np.multiply.outer(p, ax) for p, ax in zip(param, self.axes)) if squeeze_out: surf = surf.squeeze() return surf
Return the detector surface point corresponding to ``param``. For parameter value ``p``, the surface point is given by :: surf = p[0] * axes[0] + p[1] * axes[1] Parameters ---------- param : `array-like` or sequence Parameter value(s) at which to evaluate. A sequence of parameters must have length 2. Returns ------- point : `numpy.ndarray` Vector(s) pointing from the origin to the detector surface point at ``param``. If ``param`` is a single parameter, the returned array has shape ``(3,)``, otherwise ``broadcast(*param).shape + (3,)``. Examples -------- The method works with a single parameter, resulting in a single vector: >>> part = odl.uniform_partition([0, 0], [1, 1], (10, 10)) >>> det = Flat2dDetector(part, axes=[(1, 0, 0), (0, 0, 1)]) >>> det.surface([0, 0]) array([ 0., 0., 0.]) >>> det.surface([0, 1]) array([ 0., 0., 1.]) >>> det.surface([1, 1]) array([ 1., 0., 1.]) It is also vectorized, i.e., it can be called with multiple parameters at once (or n-dimensional arrays of parameters): >>> # 3 pairs of parameters, resulting in 3 vectors >>> det.surface([[0, 0, 1], ... [0, 1, 1]]) array([[ 0., 0., 0.], [ 0., 0., 1.], [ 1., 0., 1.]]) >>> # Pairs of parameters in a (4, 5) array each >>> param = (np.zeros((4, 5)), np.zeros((4, 5))) >>> det.surface(param).shape (4, 5, 3) >>> # Using broadcasting for "outer product" type result >>> param = (np.zeros((4, 1)), np.zeros((1, 5))) >>> det.surface(param).shape (4, 5, 3)
25,780
def roll_mean(input, window): nobs, i, j, sum_x = 0,0,0,0. N = len(input) if window > N: raise ValueError() output = np.ndarray(N-window+1,dtype=input.dtype) for val in input[:window]: if val == val: nobs += 1 sum_x += val output[j] = NaN if not nobs else sum_x / nobs for val in input[window:]: prev = input[j] if prev == prev: sum_x -= prev nobs -= 1 if val == val: nobs += 1 sum_x += val j += 1 output[j] = NaN if not nobs else sum_x / nobs return output
Apply a rolling mean function to an array. This is a simple rolling aggregation.
25,781
def _generate_limit_items(lower, upper): if lower is not None and upper is not None and lower == upper: yield , upper + 0 else: if lower is not None: yield , lower + 0 if upper is not None: yield , upper + 0
Yield key, value pairs for limits dictionary. Yield pairs of key, value where key is ``lower``, ``upper`` or ``fixed``. A key, value pair is emitted if the bounds are not None.
25,782
def generate_matrices(dim = 40): positive = numpy.random.uniform(-1, 1, (dim, dim)) negative = positive + numpy.random.normal(0, 1, (dim, dim)) return positive, negative
Generates the matrices that positive and negative samples are multiplied with. The matrix for positive samples is randomly drawn from a uniform distribution, with elements in [-1, 1]. The matrix for negative examples is the sum of the positive matrix with a matrix drawn from a normal distribution with mean 0 variance 1.
25,783
def list(self, id=None): args = {: id} self._job_chk.check(args) return self._client.json(, args)
List all running jobs :param id: optional ID for the job to list
25,784
def wait_command(self, start_func, turns=1, end_func=None): self.disable_input() start_func() self.app.wait_turns(turns, cb=partial(self.enable_input, end_func))
Call ``start_func``, wait ``turns``, and then call ``end_func`` if provided Disables input for the duration. :param start_func: function to call just after disabling input :param turns: number of turns to wait :param end_func: function to call just before re-enabling input :return: ``None``
25,785
def add_to_dumper(dumper: Type, classes: List[Type]) -> None: if not isinstance(classes, list): classes = [classes] for class_ in classes: if issubclass(class_, enum.Enum): dumper.add_representer(class_, EnumRepresenter(class_)) elif issubclass(class_, str) or issubclass(class_, UserString): dumper.add_representer(class_, UserStringRepresenter(class_)) else: dumper.add_representer(class_, Representer(class_))
Register user-defined classes with the Dumper. This enables the Dumper to write objects of your classes to a \ YAML file. Note that all the arguments are types, not instances! Args: dumper: Your dumper class(!), derived from yatiml.Dumper classes: One or more classes to add.
25,786
def get_wrapped_stream(stream, encoding=None, errors="replace"): if stream is None: raise TypeError("must provide a stream to wrap") stream = _get_binary_buffer(stream) if stream is not None and encoding is None: encoding = "utf-8" if not encoding: encoding = get_output_encoding(stream) else: encoding = get_canonical_encoding_name(encoding) return StreamWrapper(stream, encoding, errors, line_buffering=True)
Given a stream, wrap it in a `StreamWrapper` instance and return the wrapped stream. :param stream: A stream instance to wrap :param str encoding: The encoding to use for the stream :param str errors: The error handler to use, default "replace" :returns: A new, wrapped stream :rtype: :class:`StreamWrapper`
25,787
def get_aggregates(self, request): pipeline = crud_pipeline_factory.get_aggregates_pipeline( configuration=self.configuration) return pipeline(request=request)
Implements the Get aggregates (total number of objects filtered) maps to PATCH /api/object_name/get_aggregates/ in rest semantics :param request: rip.Request :return: rip.Response
25,788
def raise_from_response(resp): if resp.status_code < 400: return raise BackendError(status_code=resp.status_code, reason=resp.reason, content=resp.text)
Turn a failed request response into a BackendError. Handy for reflecting HTTP errors from farther back in the call chain. Parameters ---------- resp: :class:`requests.Response` Raises ------ :class:`apikit.BackendError` If `resp.status_code` is equal to or greater than 400.
25,789
def add_alarm_action(self, action_arn=None): if not action_arn: return self.actions_enabled = self.alarm_actions.append(action_arn)
Adds an alarm action, represented as an SNS topic, to this alarm. What do do when alarm is triggered. :type action_arn: str :param action_arn: SNS topics to which notification should be sent if the alarm goes to state ALARM.
25,790
def get_time_slide_id(xmldoc, time_slide, create_new = None, superset_ok = False, nonunique_ok = False): try: tisitable = lsctables.TimeSlideTable.get_table(xmldoc) except ValueError: if create_new is None: raise tisitable = lsctables.New(lsctables.TimeSlideTable) xmldoc.childNodes[0].appendChild(tisitable) tisitable.sync_next_id() return tisitable.get_time_slide_id(time_slide, create_new = create_new, superset_ok = superset_ok, nonunique_ok = nonunique_ok)
Return the time_slide_id corresponding to the offset vector described by time_slide, a dictionary of instrument/offset pairs. Example: >>> get_time_slide_id(xmldoc, {"H1": 0, "L1": 0}) 'time_slide:time_slide_id:10' This function is a wrapper around the .get_time_slide_id() method of the pycbc_glue.ligolw.lsctables.TimeSlideTable class. See the documentation for that class for the meaning of the create_new, superset_ok and nonunique_ok keyword arguments. This function requires the document to contain exactly one time_slide table. If the document does not contain exactly one time_slide table then ValueError is raised, unless the optional create_new argument is not None. In that case a new table is created. This effect of the create_new argument is in addition to the affects described by the TimeSlideTable class.
25,791
def get_evpn_table(self): evpn_table = self._global_tables.get(RF_L2_EVPN) if not evpn_table: evpn_table = EvpnTable(self._core_service, self._signal_bus) self._global_tables[RF_L2_EVPN] = evpn_table self._tables[(None, RF_L2_EVPN)] = evpn_table return evpn_table
Returns global EVPN table. Creates the table if it does not exist.
25,792
def delete(self): with self._qpart: for cursor in self.cursors(): if cursor.hasSelection(): cursor.deleteChar()
Del or Backspace pressed. Delete selection
25,793
def similarity_matrix(self, x_subjects=None, y_subjects=None, symmetric=False): if x_subjects is None: x_subjects = [] if y_subjects is None: y_subjects = [] xset = set(x_subjects) yset = set(y_subjects) zset = xset.union(yset) gmap={} for z in zset: gmap[z] = self.inferred_types(z) ilist = [] for x in x_subjects: for y in y_subjects: if not symmetric or x<y: shared = gmap[x].intersection(gmap[y]) union = gmap[x].union(gmap[y]) j = 0 if len(union)>0: j = len(shared) / len(union) ilist.append({:x,:y,:shared, :len(shared), :j}) return self.intersectionlist_to_matrix(ilist, x_subjects, y_subjects)
Query for similarity matrix between groups of subjects Return a list of intersection result objects with keys: - x : term from x - y : term from y - c : count of intersection - j : jaccard score
25,794
def build_groetzch_graph(): adj = [[0 for _ in range(11)] for _ in range(11)] row_connections = [] row_connections.append( (1,2,7,10) ) row_connections.append( (0,3,6,9) ) row_connections.append( (0,4,6,8) ) row_connections.append( (1,4,8,10) ) row_connections.append( (2,3,7,9) ) row_connections.append( (6,7,8,9,10) ) row_connections.append( (1,2,5) ) row_connections.append( (0,4,5) ) row_connections.append( (2,3,5) ) row_connections.append( (1,4,5) ) row_connections.append( (0,3,5) ) for j, tpl in enumerate(row_connections): for i in tpl: adj[j][i] = 1 adj[i][j] = 1 graph, _ = create_graph_from_adjacency_matrix(adj) return graph
Makes a new Groetzsch graph. Ref: http://mathworld.wolfram.com/GroetzschGraph.html
25,795
def cprint(self, cstr): cstr = str(cstr) cstr_len = len(cstr) prev_cstr_len = len(self._prev_cstr) num_spaces = 0 if cstr_len < prev_cstr_len: num_spaces = abs(prev_cstr_len - cstr_len) try: print(cstr + " " * num_spaces, end=) self._prev_cstr = cstr except UnicodeEncodeError: print(, end=) self._prev_cstr =
Clear line, then reprint on same line :param cstr: string to print on current line
25,796
def generate_idx(maxlen, nedit): ALPHABET = ["A", "C", "G", "T", "N"] indexlists = [] ALPHABETS = [ALPHABET for x in range(nedit)] return list(itertools.product(itertools.combinations(range(maxlen), nedit), *ALPHABETS))
generate all possible nedit edits of a string. each item has the form ((index1, index2), 'A', 'G') for nedit=2 index1 will be replaced by 'A', index2 by 'G' this covers all edits < nedit as well since some of the specified substitutions will not change the base
25,797
def mt_modelform_register_clean_method(form_self, field_name, func, nomaster=False): args_list = [(.format(field_name, lang), lang) for lang in AVAILABLE_LANGUAGES] if not nomaster: args_list.append((field_name, None)) def _get_mt_clean_method(args): def _mt_clean_method(): return func(*args) return _mt_clean_method for item_args in args_list: method_name = .format(item_args[0]) setattr(form_self, method_name, _get_mt_clean_method(item_args))
You can add clean_<field_name> for each translated field. For example: class MyModelForm(models.ModelForm): class Meta: model = MyModel _mt_fields = mt_fields(('title', 'description'), nomaster=True) fields = _mt_fields def __init__(self, *args, **kwargs): super(ItemCategoryEditForm, self).__init__(*args, **kwargs) mt_modelform_register_clean_method(self, 'title', self.mt_clean_title, nomaster=True) def mt_clean_title(self, field_name, lang): value = self.cleaned_data[field_name] # validation here return value
25,798
def on_batch_end(self, iteration:int, smooth_loss:TensorOrNumber, **kwargs:Any)->None: "Determine if loss has runaway and we should stop." if iteration==0 or smooth_loss < self.best_loss: self.best_loss = smooth_loss self.opt.lr = self.sched.step() if self.sched.is_done or (self.stop_div and (smooth_loss > 4*self.best_loss or torch.isnan(smooth_loss))):
Determine if loss has runaway and we should stop.
25,799
def write(self, image, options, thumbnail): format_ = options[] quality = options[] image_info = options.get(, {}) progressive = options.get(, settings.THUMBNAIL_PROGRESSIVE) raw_data = self._get_raw_data( image, format_, quality, image_info=image_info, progressive=progressive ) thumbnail.write(raw_data)
Wrapper for ``_write``