Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
11,500
def get_field_def( schema: GraphQLSchema, parent_type: GraphQLType, field_node: FieldNode ) -> Optional[GraphQLField]: name = field_node.name.value if name == "__schema" and schema.query_type is parent_type: return SchemaMetaFieldDef if name == "__type" and schema.query_type is parent_type: return TypeMetaFieldDef if name == "__typename" and is_composite_type(parent_type): return TypeNameMetaFieldDef if is_object_type(parent_type) or is_interface_type(parent_type): parent_type = cast(Union[GraphQLObjectType, GraphQLInterfaceType], parent_type) return parent_type.fields.get(name) return None
Get field definition. Not exactly the same as the executor's definition of `get_field_def()`, in this statically evaluated environment we do not always have an Object type, and need to handle Interface and Union types.
11,501
def update(self, feedforwardInputI, feedforwardInputE, v, recurrent=True, envelope=False, iSpeedTuning=False, enforceDale=True): self.instantaneousI.fill(0) self.instantaneousEL.fill(0) self.instantaneousER.fill(0) self.instantaneousI += feedforwardInputI self.instantaneousEL += feedforwardInputE self.instantaneousER += feedforwardInputE if enforceDale: weightsII = np.minimum(self.weightsII, 0) weightsIER = np.minimum(self.weightsIER, 0) weightsIEL = np.minimum(self.weightsIEL, 0) weightsELI = np.maximum(self.weightsELI, 0) weightsERI = np.maximum(self.weightsERI, 0) else: weightsII = self.weightsII weightsIER = self.weightsIER weightsIEL = self.weightsIEL weightsELI = self.weightsELI weightsERI = self.weightsERI if recurrent: self.instantaneousI += (np.matmul(self.activationsEL, weightsELI) +\ np.matmul(self.activationsER, weightsERI) +\ np.matmul(self.activationsI, weightsII)) self.instantaneousEL += np.matmul(self.activationsI, weightsIEL) self.instantaneousER += np.matmul(self.activationsI, weightsIER) self.instantaneousEL *= max((1 - self.velocityGain*v), 0) self.instantaneousER *= max((1 + self.velocityGain*v), 0) if iSpeedTuning: self.instantaneousI *= min(self.velocityGain*np.abs(v), 1) self.instantaneousI += self.constantTonicMagnitude self.instantaneousEL += self.constantTonicMagnitude self.instantaneousER += self.constantTonicMagnitude if envelope: self.instantaneousI *= self.envelopeI self.instantaneousER *= self.envelopeE self.instantaneousEL *= self.envelopeE np.maximum(self.instantaneousI, 0., self.instantaneousI) np.maximum(self.instantaneousEL, 0., self.instantaneousEL) np.maximum(self.instantaneousER, 0., self.instantaneousER) self.activationsI += (self.instantaneousI - self.activationsI/self.decayConstant)*self.dt self.activationsEL += (self.instantaneousEL - self.activationsEL/self.decayConstant)*self.dt self.activationsER += (self.instantaneousER - self.activationsER/self.decayConstant)*self.dt np.minimum(self.activationsI, self.clip, self.activationsI) np.minimum(self.activationsEL, self.clip, self.activationsEL) np.minimum(self.activationsER, self.clip, self.activationsER)
Do one update of the CAN network, of length self.dt. :param feedforwardInputI: The feedforward input to inhibitory cells. :param feedforwardInputR: The feedforward input to excitatory cells. :param v: The current velocity. :param recurrent: Whether or not recurrent connections should be used. :param envelope: Whether or not an envelope should be applied. :param iSpeedTuning: Whether or not inhibitory cells should also have their activations partially depend on current movement speed. This is necessary for periodic training, serving a role similar to that of the envelope. :param Whether or not Dale's law should be enforced locally. Helps with training with recurrent weights active, but can slow down training.
11,502
def cat_top_keywords(self, session, cat, up=True, offset=0, offsets=[]): print %(str(cat), str(cat.level)) print %offset response = [] if not offsets or offset==0: url = %(cat.parent.cid, if cat.level==2 else str(cat.cid), if up else , offset) print url rs = self.fetch(url) if not rs: return response soup = BeautifulSoup(rs.content, convertEntities=BeautifulSoup.HTML_ENTITIES, markupMassage=hexentityMassage) response = self.parse_cat_top_keywords(soup, offset) if offset==0: offsets = self.get_cat_top_keywords_pages(soup, offset) print %offsets if offsets: rs = [] threadPool = ThreadPool(len(offsets) if len(offsets)<=5 else 5) for idx, page_offset in enumerate(offsets): page_url = %(cat.parent.cid, if cat.level==2 else str(cat.cid), if up else , page_offset) next_page = if idx == (len(offsets)-1) else threadPool.run(self.fetch, callback=None, url=page_url, config=dict(get_next=next_page, offset=page_offset)) pages = threadPool.killAllWorkers(None) for p in pages: if not p: continue soup2 = BeautifulSoup(p.content, convertEntities=BeautifulSoup.HTML_ENTITIES, markupMassage=hexentityMassage) offset2 = int(p.config[]) response += self.parse_cat_top_keywords(soup2, offset2) print %offset2 if p.config[] != : continue offsets = self.get_cat_top_keywords_pages(soup2, offset2) print offsets if not offsets: continue response += self.cat_top_keywords(session, cat, up, offset2, offsets) for k in response: new_keyword = models.Keyword(k[].decode()) new_keyword.categories.append(cat) session.add(new_keyword) try: session.commit() except IntegrityError: session.rollback() new_keyword = session.query(models.Keyword).filter(models.Keyword.name == k[]).first() new_keyword.categories.append(cat) session.commit() print %new_keyword return response
Get top keywords in a specific category
11,503
def add_namespaces(spec_dict): for ns in spec_dict["namespaces"]: spec_dict["namespaces"][ns]["list"] = [] spec_dict["namespaces"][ns]["list_long"] = [] spec_dict["namespaces"][ns]["list_short"] = [] spec_dict["namespaces"][ns]["to_short"] = {} spec_dict["namespaces"][ns]["to_long"] = {} for obj in spec_dict["namespaces"][ns]["info"]: spec_dict["namespaces"][ns]["list"].extend([obj["name"], obj["abbreviation"]]) spec_dict["namespaces"][ns]["list_short"].append(obj["abbreviation"]) spec_dict["namespaces"][ns]["list_long"].append(obj["name"]) spec_dict["namespaces"][ns]["to_short"][obj["abbreviation"]] = obj["abbreviation"] spec_dict["namespaces"][ns]["to_short"][obj["name"]] = obj["abbreviation"] spec_dict["namespaces"][ns]["to_long"][obj["abbreviation"]] = obj["name"] spec_dict["namespaces"][ns]["to_long"][obj["name"]] = obj["name"] if "abbrev1" in obj: spec_dict["namespaces"][ns]["to_short"][obj["abbrev1"]] = obj["abbreviation"] spec_dict["namespaces"][ns]["to_long"][obj["abbrev1"]] = obj["name"]
Add namespace convenience keys, list, list_{short|long}, to_{short|long}
11,504
def parse_info_frags(info_frags): new_scaffolds = {} with open(info_frags, "r") as info_frags_handle: current_new_contig = None for line in info_frags_handle: if line.startswith(">"): current_new_contig = str(line[1:-1]) new_scaffolds[current_new_contig] = [] elif line.startswith("init_contig"): pass else: (init_contig, id_frag, orientation, pos_start, pos_end) = str( line[:-1] ).split("\t") start = int(pos_start) end = int(pos_end) ori = int(orientation) fragid = int(id_frag) assert start < end assert ori in {-1, 1} new_scaffolds[current_new_contig].append( [init_contig, fragid, start, end, ori] ) return new_scaffolds
Import an info_frags.txt file and return a dictionary where each key is a newly formed scaffold and each value is the list of bins and their origin on the initial scaffolding.
11,505
def __connect(self): self.__methods = _get_methods_by_uri(self.sqluri) uri_connect_method = self.__methods[METHOD_CONNECT] self.__dbapi2_conn = uri_connect_method(self.sqluri)
Connect to the database.
11,506
def assign(self, object_type, object_uuid, overwrite=False): if self.is_deleted(): raise PIDInvalidAction( "You cannot assign objects to a deleted/redirected persistent" " identifier." ) if not isinstance(object_uuid, uuid.UUID): object_uuid = uuid.UUID(object_uuid) if self.object_type or self.object_uuid: if object_type == self.object_type and \ object_uuid == self.object_uuid: return True if not overwrite: raise PIDObjectAlreadyAssigned(object_type, object_uuid) self.unassign() try: with db.session.begin_nested(): self.object_type = object_type self.object_uuid = object_uuid db.session.add(self) except SQLAlchemyError: logger.exception("Failed to assign {0}:{1}".format( object_type, object_uuid), extra=dict(pid=self)) raise logger.info("Assigned object {0}:{1}".format( object_type, object_uuid), extra=dict(pid=self)) return True
Assign this persistent identifier to a given object. Note, the persistent identifier must first have been reserved. Also, if an existing object is already assigned to the pid, it will raise an exception unless overwrite=True. :param object_type: The object type is a string that identify its type. :param object_uuid: The object UUID. :param overwrite: Force PID overwrites in case was previously assigned. :raises invenio_pidstore.errors.PIDInvalidAction: If the PID was previously deleted. :raises invenio_pidstore.errors.PIDObjectAlreadyAssigned: If the PID was previously assigned with a different type/uuid. :returns: `True` if the PID is successfully assigned.
11,507
def _placement_points_generator(self, skyline, width): skyline_r = skyline[-1].right skyline_l = skyline[0].left ppointsl = (s.left for s in skyline if s.left+width <= skyline_r) ppointsr = (s.right-width for s in skyline if s.right-width >= skyline_l) return heapq.merge(ppointsl, ppointsr)
Returns a generator for the x coordinates of all the placement points on the skyline for a given rectangle. WARNING: In some cases could be duplicated points, but it is faster to compute them twice than to remove them. Arguments: skyline (list): Skyline HSegment list width (int, float): Rectangle width Returns: generator
11,508
def main(): parser = __build_option_parser() args = parser.parse_args() analyze_ws = AnalyzeWS(args) try: analyze_ws.set_file(args.file_[0]) except IOError: print sys.exit(3) if args.to_file or args.to_browser: analyze_ws.to_file_mode() if args.to_browser: analyze_ws.to_browser_mode() else: analyze_ws.interactive_mode()
Main method of the script
11,509
def add_segy_view_widget(self, ind, widget, name=None): if self._context is None: self._segywidgets.append(widget) self.initialize() return 0 self._tab_widget.updatesEnabled = False widget.show_toolbar(toolbar=True, layout_combo=False, colormap=True, save=True, settings=True) self._modify_qtree(widget.settings_window.qtree, [0, 1, 2, 4]) if name is None: name = os.path.basename(widget.slice_data_source.source_filename) id = self._tab_widget.insertTab(ind, widget, name) widget.context.data_changed.connect(self._local_data_changed) self._tab_widget.updatesEnabled = True return id
:param widget: The SegyViewWidget that will be added to the SegyTabWidget :type widget: SegyViewWidget
11,510
def update(self, dict): for field in self.sorted_fields(): if field in dict: if dict[field] is None: delattr(self, field) else: setattr(self, field, dict[field])
Set all field values from a dictionary. For any key in `dict` that is also a field to store tags the method retrieves the corresponding value from `dict` and updates the `MediaFile`. If a key has the value `None`, the corresponding property is deleted from the `MediaFile`.
11,511
def expand_to_vector(x, tensor_name=None, op_name=None, validate_args=False): with tf.name_scope(op_name or "expand_to_vector"): x = tf.convert_to_tensor(value=x, name="x") ndims = tensorshape_util.rank(x.shape) if ndims is None: if validate_args: x = with_dependencies([ assert_util.assert_rank_at_most( x, 1, message="Input is neither scalar nor vector.") ], x) ndims = tf.rank(x) expanded_shape = pick_vector( tf.equal(ndims, 0), np.array([1], dtype=np.int32), tf.shape(input=x)) return tf.reshape(x, expanded_shape) elif ndims == 0: x_const = tf.get_static_value(x) if x_const is not None: return tf.convert_to_tensor( value=dtype_util.as_numpy_dtype(x.dtype)([x_const]), name=tensor_name) else: return tf.reshape(x, [1]) elif ndims != 1: raise ValueError("Input is neither scalar nor vector.") return x
Transform a 0-D or 1-D `Tensor` to be 1-D. For user convenience, many parts of the TensorFlow Probability API accept inputs of rank 0 or 1 -- i.e., allowing an `event_shape` of `[5]` to be passed to the API as either `5` or `[5]`. This function can be used to transform such an argument to always be 1-D. NOTE: Python or NumPy values will be converted to `Tensor`s with standard type inference/conversion. In particular, an empty list or tuple will become an empty `Tensor` with dtype `float32`. Callers should convert values to `Tensor`s before calling this function if different behavior is desired (e.g. converting empty lists / other values to `Tensor`s with dtype `int32`). Args: x: A 0-D or 1-D `Tensor`. tensor_name: Python `str` name for `Tensor`s created by this function. op_name: Python `str` name for `Op`s created by this function. validate_args: Python `bool, default `False`. When `True`, arguments may be checked for validity at execution time, possibly degrading runtime performance. When `False`, invalid inputs may silently render incorrect outputs. Returns: vector: a 1-D `Tensor`.
11,512
def default_user_agent(name="python-requests"): _implementation = platform.python_implementation() if _implementation == : _implementation_version = platform.python_version() elif _implementation == : _implementation_version = % (sys.pypy_version_info.major, sys.pypy_version_info.minor, sys.pypy_version_info.micro) if sys.pypy_version_info.releaselevel != : _implementation_version = .join([_implementation_version, sys.pypy_version_info.releaselevel]) elif _implementation == : _implementation_version = platform.python_version() elif _implementation == : _implementation_version = platform.python_version() else: _implementation_version = try: p_system = platform.system() p_release = platform.release() except IOError: p_system = p_release = return " ".join([ % (name, __version__), % (_implementation, _implementation_version), % (p_system, p_release)])
Return a string representing the default user agent.
11,513
def _remove_none_values(dictionary): return list(map(dictionary.pop, [i for i in dictionary if dictionary[i] is None]))
Remove dictionary keys whose value is None
11,514
def clone(self, fp): return self.__class__(fp, self._mangle_from_, None, policy=self.policy)
Clone this generator with the exact same options.
11,515
def check_type(self, value, attr, data): root_value = super(InstructionParameter, self).check_type( value, attr, data) if is_collection(value): _ = [super(InstructionParameter, self).check_type(item, attr, data) for item in value] return root_value
Customize check_type for handling containers.
11,516
def extract_archive(archive, verbosity=0, outdir=None, program=None, interactive=True): util.check_existing_filename(archive) if verbosity >= 0: util.log_info("Extracting %s ..." % archive) return _extract_archive(archive, verbosity=verbosity, interactive=interactive, outdir=outdir, program=program)
Extract given archive.
11,517
def add_sibling(self, pos=None, **kwargs): pos = self._prepare_pos_var_for_add_sibling(pos) if len(kwargs) == 1 and in kwargs: newobj = kwargs[] if newobj.pk: raise NodeAlreadySaved("Attempted to add a tree node that is "\ "already in the database") else: newobj = get_result_class(self.__class__)(**kwargs) newobj.depth = self.depth sql = None target = self if target.is_root(): newobj.lft = 1 newobj.rgt = 2 if pos == : siblings = list(target.get_sorted_pos_queryset( target.get_siblings(), newobj)) if siblings: pos = target = siblings[0] else: pos = last_root = target.__class__.get_last_root_node() if ( (pos == ) or (pos == and target == last_root) ): newobj.tree_id = last_root.tree_id + 1 else: newpos = {: 1, : target.tree_id, : target.tree_id + 1}[pos] sql, params = target.__class__._move_tree_right(newpos) newobj.tree_id = newpos else: newobj.tree_id = target.tree_id if pos == : siblings = list(target.get_sorted_pos_queryset( target.get_siblings(), newobj)) if siblings: pos = target = siblings[0] else: pos = if pos in (, , ): siblings = list(target.get_siblings()) if pos == : if target == siblings[-1]: pos = else: pos = found = False for node in siblings: if found: target = node break elif node == target: found = True if pos == : if target == siblings[0]: pos = if pos == : target = siblings[0] move_right = self.__class__._move_right if pos == : newpos = target.get_parent().rgt sql, params = move_right(target.tree_id, newpos, False, 2) elif pos == : newpos = target.lft sql, params = move_right(target.tree_id, newpos - 1, False, 2) elif pos == : newpos = target.lft sql, params = move_right(target.tree_id, newpos, True, 2) newobj.lft = newpos newobj.rgt = newpos + 1 if sql: cursor = self._get_database_cursor() cursor.execute(sql, params) newobj.save() return newobj
Adds a new node as a sibling to the current node object.
11,518
def _get_mean(self, sites, C, ln_y_ref, exp1, exp2, v1): z1pt0 = sites.z1pt0 eta = epsilon = 0 ln_y = ( ln_y_ref + C[] * np.log(np.clip(sites.vs30, -np.inf, v1) / 1130) + C[] * (exp1 - exp2) * np.log((np.exp(ln_y_ref) + C[]) / C[]) + C[] * (1.0 - 1.0 / np.cosh( C[] * (z1pt0 - C[]).clip(0, np.inf))) + C[] / np.cosh(0.15 * (z1pt0 - 15).clip(0, np.inf)) + eta + epsilon ) return ln_y
Add site effects to an intensity. Implements eq. 5
11,519
def create_payload(self): payload = super(OverrideValue, self).create_payload() if hasattr(self, ): del payload[] if hasattr(self, ): del payload[] return payload
Remove ``smart_class_parameter_id`` or ``smart_variable_id``
11,520
def get_config_values(config_path, section, default=): values = {} if not os.path.isfile(config_path): raise IpaUtilsException( % config_path ) config = configparser.ConfigParser() try: config.read(config_path) except Exception: raise IpaUtilsException( ) try: values.update(config.items(default)) except Exception: pass try: values.update(config.items(section)) except Exception: pass return values
Parse ini config file and return a dict of values. The provided section overrides any values in default section.
11,521
def from_center(self, x=None, y=None, z=None, r=None, theta=None, h=None, reference=None): coords_to_endpoint = None if all([isinstance(i, numbers.Number) for i in (x, y, z)]): coords_to_endpoint = self.from_cartesian(x, y, z) if all([isinstance(i, numbers.Number) for i in (r, theta, h)]): coords_to_endpoint = self.from_polar(r, theta, h) coords_to_reference = Vector(0, 0, 0) if reference: coords_to_reference = self.coordinates(reference) return coords_to_reference + coords_to_endpoint
Accepts a set of (:x:, :y:, :z:) ratios for Cartesian or (:r:, :theta:, :h:) rations/angle for Polar and returns :Vector: using :reference: as origin
11,522
def motif4struct_wei(W): from scipy import io import os fname = os.path.join(os.path.dirname(__file__), motiflib) mot = io.loadmat(fname) m4 = mot[] m4n = mot[] id4 = mot[].squeeze() n4 = mot[].squeeze() n = len(W) I = np.zeros((199, n)) Q = np.zeros((199, n)) F = np.zeros((199, n)) A = binarize(W, copy=True) As = np.logical_or(A, A.T) for u in range(n - 3): V1 = np.append(np.zeros((u,), dtype=int), As[u, u + 1:n + 1]) for v1 in np.where(V1)[0]: V2 = np.append(np.zeros((u,), dtype=int), As[v1, u + 1:n + 1]) V2[V1] = 0 V2 = np.logical_or( np.append(np.zeros((v1,)), As[u, v1 + 1:n + 1]), V2) for v2 in np.where(V2)[0]: vz = np.max((v1, v2)) V3 = np.append(np.zeros((u,), dtype=int), As[v2, u + 1:n + 1]) V3[V2] = 0 V3 = np.logical_or( np.append(np.zeros((v2,)), As[v1, v2 + 1:n + 1]), V3) V3[V1] = 0 V3 = np.logical_or( np.append(np.zeros((vz,)), As[u, vz + 1:n + 1]), V3) for v3 in np.where(V3)[0]: a = np.array((A[v1, u], A[v2, u], A[v3, u], A[u, v1], A[v2, v1], A[v3, v1], A[u, v2], A[v1, v2], A[ v3, v2], A[u, v3], A[v1, v3], A[v2, v3])) s = np.uint64( np.sum(np.power(10, np.arange(11, -1, -1)) * a)) ix = np.squeeze(s == m4n) w = np.array((W[v1, u], W[v2, u], W[v3, u], W[u, v1], W[v2, v1], W[v3, v1], W[u, v2], W[v1, v2], W[ v3, v2], W[u, v3], W[v1, v3], W[v2, v3])) M = w * m4[ix, :] id = id4[ix] - 1 l = n4[ix] x = np.sum(M, axis=1) / l M[M == 0] = 1 i = np.prod(M, axis=1)**(1 / l) q = i / x I[id, u] += i I[id, v1] += i I[id, v2] += i I[id, v3] += i Q[id, u] += q Q[id, v1] += q Q[id, v2] += q Q[id, v3] += q F[id, u] += 1 F[id, v1] += 1 F[id, v2] += 1 F[id, v3] += 1 return I, Q, F
Structural motifs are patterns of local connectivity. Motif frequency is the frequency of occurrence of motifs around a node. Motif intensity and coherence are weighted generalizations of motif frequency. Parameters ---------- W : NxN np.ndarray weighted directed connection matrix (all weights between 0 and 1) Returns ------- I : 199xN np.ndarray motif intensity matrix Q : 199xN np.ndarray motif coherence matrix F : 199xN np.ndarray motif frequency matrix Notes ----- Average intensity and coherence are given by I./F and Q./F.
11,523
def _select_options(self, options, keys, invert=False): options = self._merge_options(options) result = {} for key in options: if (invert and key not in keys) or (not invert and key in keys): result[key] = options[key] return result
Select the provided keys out of an options object. Selects the provided keys (or everything except the provided keys) out of an options object.
11,524
def assert_array(A, shape=None, uniform=None, ndim=None, size=None, dtype=None, kind=None): r try: if shape is not None: if not np.array_equal(np.shape(A), shape): raise AssertionError(+str(shape)++str(np.shape(A))) if uniform is not None: shapearr = np.array(np.shape(A)) is_uniform = np.count_nonzero(shapearr-shapearr[0]) == 0 if uniform and not is_uniform: raise AssertionError(+str(shapearr)) elif not uniform and is_uniform: raise AssertionError(+str(shapearr)) if size is not None: if not np.size(A) == size: raise AssertionError(+str(size)++str(np.size(A))) if ndim is not None: if not ndim == np.ndim(A): raise AssertionError(+str(ndim)++str(np.ndim(A))) if dtype is not None: if not isinstance(A, (np.ndarray)) and not scisp.issparse(A): A = np.array(A) if kind == : if not (A.dtype.kind == or A.dtype.kind == ): raise AssertionError(+str(A.dtype.kind)) elif not A.dtype.kind == kind: raise AssertionError(+str(kind) ++str(A.dtype.kind)) except Exception as ex: if isinstance(ex, AssertionError): raise ex else: print(,ex) raise AssertionError(+ +str(A)++str(type(A)))
r""" Asserts whether the given array or sparse matrix has the given properties Parameters ---------- A : ndarray, scipy.sparse matrix or array-like the array under investigation shape : shape, optional, default=None asserts if the array has the requested shape. Be careful with vectors because this will distinguish between row vectors (1,n), column vectors (n,1) and arrays (n,). If you want to be less specific, consider using size square : None | True | False if not None, asserts whether the array dimensions are uniform (e.g. square for a ndim=2 array) (True), or not uniform (False). size : int, optional, default=None asserts if the arrays has the requested number of elements ndim : int, optional, default=None asserts if the array has the requested dimension dtype : type, optional, default=None asserts if the array data has the requested data type. This check is strong, e.g. int and int64 are not equal. If you want a weaker check, consider the kind option kind : string, optional, default=None Checks if the array data is of the specified kind. Options include 'i' for integer types, 'f' for float types Check numpy.dtype.kind for possible options. An additional option is 'numeric' for either integer or float. Raises ------ AssertionError If assertions has failed
11,525
def cluster(list_of_texts, num_clusters=3): pipeline = Pipeline([ ("vect", CountVectorizer()), ("tfidf", TfidfTransformer()), ("clust", KMeans(n_clusters=num_clusters)) ]) try: clusters = pipeline.fit_predict(list_of_texts) except ValueError: clusters = list(range(len(list_of_texts))) return clusters
Cluster a list of texts into a predefined number of clusters. :param list_of_texts: a list of untokenized texts :param num_clusters: the predefined number of clusters :return: a list with the cluster id for each text, e.g. [0,1,0,0,2,2,1]
11,526
def matches_query(self, key, query): dumped = query.dump() dumped[] = query._query_class._class_name self._add_condition(key, , dumped) return self
增加查询条件,限制查询结果对象指定字段的值,与另外一个查询对象的返回结果相同。 :param key: 查询条件字段名 :param query: 查询对象 :type query: Query :rtype: Query
11,527
def __extract_modules(self, loader, name, is_pkg): mod = loader.find_module(name).load_module(name) if hasattr(mod, ): module_router = ModuleRouter(mod, ignore_names=self.__serialize_module_paths() ).register_route(app=self.application, name=name) self.__routers.extend(module_router.routers) self.__modules.append(mod) else: pass
if module found load module and save all attributes in the module found
11,528
def get_cached_manylinux_wheel(self, package_name, package_version, disable_progress=False): cached_wheels_dir = os.path.join(tempfile.gettempdir(), ) if not os.path.isdir(cached_wheels_dir): os.makedirs(cached_wheels_dir) wheel_file = .format(package_name, package_version, self.manylinux_wheel_file_suffix) wheel_path = os.path.join(cached_wheels_dir, wheel_file) if not os.path.exists(wheel_path) or not zipfile.is_zipfile(wheel_path): wheel_url = self.get_manylinux_wheel_url(package_name, package_version) if not wheel_url: return None print(" - {}=={}: Downloading".format(package_name, package_version)) with open(wheel_path, ) as f: self.download_url_with_progress(wheel_url, f, disable_progress) if not zipfile.is_zipfile(wheel_path): return None else: print(" - {}=={}: Using locally cached manylinux wheel".format(package_name, package_version)) return wheel_path
Gets the locally stored version of a manylinux wheel. If one does not exist, the function downloads it.
11,529
def get_pose_error(target_pose, current_pose): error = np.zeros(6) target_pos = target_pose[:3, 3] current_pos = current_pose[:3, 3] pos_err = target_pos - current_pos r1 = current_pose[:3, 0] r2 = current_pose[:3, 1] r3 = current_pose[:3, 2] r1d = target_pose[:3, 0] r2d = target_pose[:3, 1] r3d = target_pose[:3, 2] rot_err = 0.5 * (np.cross(r1, r1d) + np.cross(r2, r2d) + np.cross(r3, r3d)) error[:3] = pos_err error[3:] = rot_err return error
Computes the error corresponding to target pose - current pose as a 6-dim vector. The first 3 components correspond to translational error while the last 3 components correspond to the rotational error. Args: target_pose: a 4x4 homogenous matrix for the target pose current_pose: a 4x4 homogenous matrix for the current pose Returns: A 6-dim numpy array for the pose error.
11,530
def scan(self, restrict): while True: best_pat = None best_pat_len = 0 for p, regexp in self.patterns: if best_pat is None: msg = "Bad Token" if restrict: msg = "Trying to find one of " + ", ".join(restrict) raise SyntaxError(self.pos, msg) if not self.tokens or token != self.tokens[-1]: self.tokens.append(token) self.restrictions.append(restrict) return 1 break return 0
Should scan another token and add it to the list, self.tokens, and add the restriction to self.restrictions
11,531
def delete(cls, bucket_id): bucket = cls.get(bucket_id) if not bucket or bucket.deleted: return False bucket.deleted = True return True
Delete a bucket. Does not actually delete the Bucket, just marks it as deleted.
11,532
def default_package(self): packages = [pk for pk in self.packages() if pk.get() == ] if packages: return packages[0] else: return None
:: GET /:login/packages :Returns: the default package for this datacenter :rtype: :py:class:`dict` or ``None`` Requests all the packages in this datacenter, filters for the default, and returns the corresponding dict, if a default has been defined.
11,533
def count_above_mean(x): m = np.mean(x) return np.where(x > m)[0].size
Returns the number of values in x that are higher than the mean of x :param x: the time series to calculate the feature of :type x: numpy.ndarray :return: the value of this feature :return type: float
11,534
def _set_config_path(self): self._path = os.getenv("CLOEEPY_CONFIG_PATH") if self._path is None: msg = "CLOEEPY_CONFIG_PATH is not set. Exiting..." sys.exit(msg)
Reads config path from environment variable CLOEEPY_CONFIG_PATH and sets as instance attr
11,535
def bbin(obj: Union[str, Element]) -> str: return obj.name if isinstance(obj, Element ) else f if obj in builtin_names else obj
Boldify built in types @param obj: object name or id @return:
11,536
def add_menu(self, menu): self._menus.append(menu) self._menus = sorted(list(set(self._menus)), key=lambda x: x.title()) for action in menu.actions(): action.setShortcutContext(QtCore.Qt.WidgetShortcut) self.addActions(menu.actions())
Adds a sub-menu to the editor context menu. Menu are put at the bottom of the context menu. .. note:: to add a menu in the middle of the context menu, you can always add its menuAction(). :param menu: menu to add
11,537
def prepare(args): p = OptionParser(prepare.__doc__ + FastqNamings) p.add_option("-K", default=51, type="int", help="K-mer size") p.set_cpus(cpus=32) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) genomesize = float(args[0]) / 1000 fnames = args[1:] for x in fnames: assert op.exists(x), "File `{0}` not found.".format(x) s = comment_banner("Meraculous params file") + "\n" s += comment_banner("Basic parameters") + "\n" s += " s += " libs = get_libs(fnames) lib_seqs = [] rank = 0 for lib, fs in libs: size = lib.size if size == 0: continue rank += 1 library_name = lib.library_name name = library_name.replace("-", "") wildcard = "{0}*.1.*,{0}*.2.*".format(library_name) rl = max(readlen([x]) for x in fs) lib_seq = lib.get_lib_seq(wildcard, name, rl, rank) lib_seqs.append(lib_seq) s += "\n" + "\n".join(load_csv(None, lib_seqs, sep=" ")) + "\n" params = [("genome_size", genomesize), ("is_diploid", 0), ("mer_size", opts.K), ("num_prefix_blocks", 1), ("no_read_validation", 0), ("local_num_procs", opts.cpus)] s += "\n" + "\n".join(load_csv(None, params, sep=" ")) + "\n" cfgfile = "meraculous.config" write_file(cfgfile, s, tee=True) s = "~/export/meraculous/bin/run_meraculous.sh -c {0}"\ .format(cfgfile) runsh = "run.sh" write_file(runsh, s)
%prog prepare genomesize *.fastq Prepare MERACULOUS configuation file. Genome size should be entered in Mb.
11,538
def remove(self, module=True, force=False, configuration=True, dry_run=False): if not (module or configuration): raise ValueError("Need to specify to delete at least the module, or the configuration") nc = 0 for csm in self.children(): nc += 1 csm.remove(module, force, configuration, dry_run) del(csm) if configuration and not dry_run and nc > 0: self.module().index.commit("Removed at least one of child-modules of " % self.name) if module and self.module_exists(): mod = self.module() git_dir = mod.git_dir if force: mp = self.abspath method = None if osp.islink(mp): method = os.remove elif osp.isdir(mp): method = rmtree elif osp.exists(mp): raise AssertionError("Cannot forcibly delete repository as it was neither a link, nor a directory") if not dry_run: assert method method(mp) else: if mod.is_dirty(index=True, working_tree=True, untracked_files=True): raise InvalidGitRepositoryError( "Cannot delete module at %s with any modifications, unless force is specified" % mod.working_tree_dir) if len(rrefs): del(rref) del(rrefs) del(remote) if not dry_run: self._clear_cache() wtd = mod.working_tree_dir del(mod) import gc gc.collect() try: rmtree(wtd) except Exception as ex: if HIDE_WINDOWS_KNOWN_ERRORS: raise SkipTest("FIXME: fails with: PermissionError\n %s", ex) else: raise if not dry_run and osp.isdir(git_dir): self._clear_cache() try: rmtree(git_dir) except Exception as ex: if HIDE_WINDOWS_KNOWN_ERRORS: raise SkipTest("FIXME: fails with: PermissionError\n %s", ex) else: raise if not dry_run: self._clear_cache() if configuration and not dry_run: parent_index = self.repo.index try: del(parent_index.entries[parent_index.entry_key(self.path, 0)]) except KeyError: pass parent_index.write() with self.repo.config_writer() as writer: writer.remove_section(sm_section(self.name)) with self.config_writer() as writer: writer.remove_section() return self
Remove this submodule from the repository. This will remove our entry from the .gitmodules file and the entry in the .git/config file. :param module: If True, the module checkout we point to will be deleted as well. If the module is currently on a commit which is not part of any branch in the remote, if the currently checked out branch working tree, or untracked files, is ahead of its tracking branch, if you have modifications in the In case the removal of the repository fails for these reasons, the submodule status will not have been altered. If this submodule has child-modules on its own, these will be deleted prior to touching the own module. :param force: Enforces the deletion of the module even though it contains modifications. This basically enforces a brute-force file system based deletion. :param configuration: if True, the submodule is deleted from the configuration, otherwise it isn't. Although this should be enabled most of the times, this flag enables you to safely delete the repository of your submodule. :param dry_run: if True, we will not actually do anything, but throw the errors we would usually throw :return: self :note: doesn't work in bare repositories :note: doesn't work atomically, as failure to remove any part of the submodule will leave an inconsistent state :raise InvalidGitRepositoryError: thrown if the repository cannot be deleted :raise OSError: if directories or files could not be removed
11,539
def _isdst(dt): if type(dt) == datetime.date: dt = datetime.datetime.combine(dt, datetime.datetime.min.time()) dtc = dt.replace(year=datetime.datetime.now().year) if time.localtime(dtc.timestamp()).tm_isdst == 1: return True return False
Check if date is in dst.
11,540
def _connect(self): def tryConnect(): self.connector = d = maybeDeferred(connect) d.addCallback(cbConnect) d.addErrback(ebConnect) def connect(): endpoint = self._endpointFactory(self._reactor, self.host, self.port) log.debug(, self, endpoint) return endpoint.connect(self) def cbConnect(proto): log.debug(, self, proto.transport.getPeer()) self._failures = 0 self.connector = None self.proto = proto if self._dDown: proto.transport.loseConnection() else: self._sendQueued() def ebConnect(fail): if self._dDown: log.debug(, self, fail) return fail self._failures += 1 delay = self._retryPolicy(self._failures) log.debug(, self, self._failures, fail.value, delay) self.connector = d = deferLater(self._reactor, delay, lambda: None) d.addCallback(cbDelayed) def cbDelayed(result): tryConnect() self._failures = 0 tryConnect()
Connect to the Kafka Broker This routine will repeatedly try to connect to the broker (with backoff according to the retry policy) until it succeeds.
11,541
def send(self, pkt): iff = pkt.route()[0] if iff is None: iff = conf.iface if self.assigned_interface != iff: try: fcntl.ioctl(self.outs, BIOCSETIF, struct.pack("16s16x", iff.encode())) except IOError: raise Scapy_Exception("BIOCSETIF failed on %s" % iff) self.assigned_interface = iff frame = raw(self.guessed_cls() / pkt) pkt.sent_time = time.time() L2bpfSocket.send(self, frame)
Send a packet
11,542
def _claim(cls, cdata: Any) -> "Tileset": self = object.__new__(cls) if cdata == ffi.NULL: raise RuntimeError("Tileset initialized with nullptr.") self._tileset_p = ffi.gc(cdata, lib.TCOD_tileset_delete) return self
Return a new Tileset that owns the provided TCOD_Tileset* object.
11,543
def _parse_triggered_hits(self, file_obj): for _ in range(self.n_triggered_hits): dom_id, pmt_id = unpack(, file_obj.read(5)) tdc_time = unpack(, file_obj.read(4))[0] tot = unpack(, file_obj.read(1))[0] trigger_mask = unpack(, file_obj.read(8)) self.triggered_hits.append( (dom_id, pmt_id, tdc_time, tot, trigger_mask) )
Parse and store triggered hits.
11,544
def __fetch(self, url, payload): r = requests.get(url, params=payload, auth=self.auth, verify=self.verify) try: r.raise_for_status() except requests.exceptions.HTTPError as e: raise e return r
Fetch requests from groupsio API
11,545
def getMaxStmIdForStm(stm): maxId = 0 if isinstance(stm, Assignment): return stm._instId elif isinstance(stm, WaitStm): return maxId else: for _stm in stm._iter_stms(): maxId = max(maxId, getMaxStmIdForStm(_stm)) return maxId
Get maximum _instId from all assigments in statement
11,546
def connect_paragraph(self, paragraph, paragraphs): if paragraph.depth > 0: n = range(len(paragraphs)) n.reverse() for i in n: if paragraphs[i].depth == paragraph.depth-1: paragraph.parent = paragraphs[i] paragraphs[i].children.append(paragraph) break return paragraph
Create parent/child links to other paragraphs. The paragraphs parameters is a list of all the paragraphs parsed up till now. The parent is the previous paragraph whose depth is less. The parent's children include this paragraph. Called from parse_paragraphs() method.
11,547
def _fetch_cached_output(self, items, result): if not appsettings.FLUENT_CONTENTS_CACHE_OUTPUT or not self.use_cached_output: result.add_remaining_list(items) return for contentitem in items: result.add_ordering(contentitem) output = None try: plugin = contentitem.plugin except PluginNotFound as ex: result.store_exception(contentitem, ex) logger.debug("- item continue if self.can_use_cached_output(contentitem): result.add_plugin_timeout(plugin) output = plugin.get_cached_output(result.placeholder_name, contentitem) if output is not None and not isinstance(output, ContentItemOutput): output = None logger.debug("Flushed cached output of {0} plugin.type_name, contentitem.pk, get_placeholder_name(contentitem.placeholder) )) if output and settings.DEBUG: cachekey = get_rendering_cache_key(result.placeholder_name, contentitem) if is_template_updated(self.request, contentitem, cachekey): output = None if output: result.store_output(contentitem, output) else: result.add_remaining(contentitem)
First try to fetch all items from the cache. The items are 'non-polymorphic', so only point to their base class. If these are found, there is no need to query the derived data from the database.
11,548
async def get_config(self): config_facade = client.ModelConfigFacade.from_connection( self.connection() ) result = await config_facade.ModelGet() config = result.config for key, value in config.items(): config[key] = ConfigValue.from_json(value) return config
Return the configuration settings for this model. :returns: A ``dict`` mapping keys to `ConfigValue` instances, which have `source` and `value` attributes.
11,549
def _get_reciprocal(self): orign = self.portal[] destn = self.portal[] if ( destn in self.board.arrow and orign in self.board.arrow[destn] ): return self.board.arrow[destn][orign] else: return None
Return the :class:`Arrow` that connects my origin and destination in the opposite direction, if it exists.
11,550
def smt_dataset(directory=, train=False, dev=False, test=False, train_filename=, dev_filename=, test_filename=, extracted_name=, check_files=[], url=, fine_grained=False, subtrees=False): download_file_maybe_extract(url=url, directory=directory, check_files=check_files) ret = [] splits = [(train, train_filename), (dev, dev_filename), (test, test_filename)] splits = [f for (requested, f) in splits if requested] for filename in splits: full_path = os.path.join(directory, extracted_name, filename) examples = [] with io.open(full_path, encoding=) as f: for line in f: line = line.strip() if subtrees: examples.extend(parse_tree(line, subtrees=subtrees, fine_grained=fine_grained)) else: examples.append(parse_tree(line, subtrees=subtrees, fine_grained=fine_grained)) ret.append(Dataset(examples)) if len(ret) == 1: return ret[0] else: return tuple(ret)
Load the Stanford Sentiment Treebank dataset. Semantic word spaces have been very useful but cannot express the meaning of longer phrases in a principled way. Further progress towards understanding compositionality in tasks such as sentiment detection requires richer supervised training and evaluation resources and more powerful models of composition. To remedy this, we introduce a Sentiment Treebank. It includes fine grained sentiment labels for 215,154 phrases in the parse trees of 11,855 sentences and presents new challenges for sentiment compositionality. **Reference**: https://nlp.stanford.edu/sentiment/index.html **Citation:** Richard Socher, Alex Perelygin, Jean Y. Wu, Jason Chuang, Christopher D. Manning, Andrew Y. Ng and Christopher Potts. Recursive Deep Models for Semantic Compositionality Over a Sentiment Treebank Args: directory (str, optional): Directory to cache the dataset. train (bool, optional): If to load the training split of the dataset. dev (bool, optional): If to load the development split of the dataset. test (bool, optional): If to load the test split of the dataset. train_filename (str, optional): The filename of the training split. dev_filename (str, optional): The filename of the development split. test_filename (str, optional): The filename of the test split. extracted_name (str, optional): Name of the extracted dataset directory. check_files (str, optional): Check if these files exist, then this download was successful. url (str, optional): URL of the dataset `tar.gz` file. subtrees (bool, optional): Whether to include sentiment-tagged subphrases in addition to complete examples. fine_grained (bool, optional): Whether to use 5-class instead of 3-class labeling. Returns: :class:`tuple` of :class:`torchnlp.datasets.Dataset`: Tuple with the training tokens, dev tokens and test tokens in order if their respective boolean argument is true. Example: >>> from torchnlp.datasets import smt_dataset # doctest: +SKIP >>> train = smt_dataset(train=True) # doctest: +SKIP >>> train[5] # doctest: +SKIP { 'text': "Whether or not you 're enlightened by any of Derrida 's lectures on ...", 'label': 'positive' }
11,551
def fake_keypress(self, key, repeat=1): for _ in range(repeat): self.mediator.fake_keypress(key)
Fake a keypress Usage: C{keyboard.fake_keypress(key, repeat=1)} Uses XTest to 'fake' a keypress. This is useful to send keypresses to some applications which won't respond to keyboard.send_key() @param key: they key to be sent (e.g. "s" or "<enter>") @param repeat: number of times to repeat the key event
11,552
def format_python2_stmts(python_stmts, show_tokens=False, showast=False, showgrammar=False, compile_mode=): parser_debug = {: False, : False, : showgrammar, : True, : True, : True } parsed = parse_python2(python_stmts, show_tokens=show_tokens, parser_debug=parser_debug) assert parsed == , formatter = Python2Formatter() if showast: print(parsed) python2_formatted_str = formatter.traverse(parsed) return python2_formatted_str
formats python2 statements
11,553
def from_resolver(cls, spec_resolver): spec_validators = cls._get_spec_validators(spec_resolver) return validators.extend(Draft4Validator, spec_validators)
Creates a customized Draft4ExtendedValidator. :param spec_resolver: resolver for the spec :type resolver: :class:`jsonschema.RefResolver`
11,554
def matrix2map(data_matrix, map_shape): r map_shape = np.array(map_shape) image_shape = np.sqrt(data_matrix.shape[0]).astype(int) layout = np.array(map_shape // np.repeat(image_shape, 2), dtype=) data_map = np.zeros(map_shape) temp = data_matrix.reshape(image_shape, image_shape, data_matrix.shape[1]) for i in range(data_matrix.shape[1]): lower = (image_shape * (i // layout[1]), image_shape * (i % layout[1])) upper = (image_shape * (i // layout[1] + 1), image_shape * (i % layout[1] + 1)) data_map[lower[0]:upper[0], lower[1]:upper[1]] = temp[:, :, i] return data_map.astype(int)
r"""Matrix to Map This method transforms a 2D matrix to a 2D map Parameters ---------- data_matrix : np.ndarray Input data matrix, 2D array map_shape : tuple 2D shape of the output map Returns ------- np.ndarray 2D map Raises ------ ValueError For invalid layout Examples -------- >>> from modopt.base.transform import matrix2map >>> a = np.array([[0, 4, 8, 12], [1, 5, 9, 13], [2, 6, 10, 14], [3, 7, 11, 15]]) >>> matrix2map(a, (2, 2)) array([[ 0, 1, 4, 5], [ 2, 3, 6, 7], [ 8, 9, 12, 13], [10, 11, 14, 15]])
11,555
def derationalize_denom(expr): r_pos = -1 p_pos = -1 numerator = S.Zero denom_sq = S.One post_factors = [] if isinstance(expr, Mul): for pos, factor in enumerate(expr.args): if isinstance(factor, Rational) and r_pos < 0: r_pos = pos numerator, denom_sq = factor.p, factor.q elif isinstance(factor, Pow) and r_pos >= 0: if factor == sqrt(denom_sq): p_pos = pos else: post_factors.append(factor) else: post_factors.append(factor) if r_pos >= 0 and p_pos >= 0: return numerator, denom_sq, Mul(*post_factors) else: raise ValueError("Cannot derationalize") else: raise ValueError("expr is not a Mul instance")
Try to de-rationalize the denominator of the given expression. The purpose is to allow to reconstruct e.g. ``1/sqrt(2)`` from ``sqrt(2)/2``. Specifically, this matches `expr` against the following pattern:: Mul(..., Rational(n, d), Pow(d, Rational(1, 2)), ...) and returns a tuple ``(numerator, denom_sq, post_factor)``, where ``numerator`` and ``denom_sq`` are ``n`` and ``d`` in the above pattern (of type `int`), respectively, and ``post_factor`` is the product of the remaining factors (``...`` in `expr`). The result will fulfill the following identity:: (numerator / sqrt(denom_sq)) * post_factor == expr If `expr` does not follow the appropriate pattern, a :exc:`ValueError` is raised.
11,556
def kill(self, signal=None): return self.client.api.kill(self.id, signal=signal)
Kill or send a signal to the container. Args: signal (str or int): The signal to send. Defaults to ``SIGKILL`` Raises: :py:class:`docker.errors.APIError` If the server returns an error.
11,557
def updateRules( self ): terms = sorted(self._rules.keys()) for child in self.lineWidgets(): child.setTerms(terms)
Updates the query line items to match the latest rule options.
11,558
def main(argv=None): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument(, type=argparse.FileType(), help= ) parser.add_argument(, , action=, help=) parser.add_argument(, , type=argparse.FileType(), help=) parser.add_argument(, , type=argparse.FileType(), help=) args = parser.parse_args(argv) if args.input and args.output == sys.stdout: raise ValueError() from cnxepub.collation import reconstitute binder = reconstitute(args.collated_html) if args.dump_tree: print(pformat(cnxepub.model_to_tree(binder)), file=sys.stdout) if args.output: cnxepub.adapters.make_epub(binder, args.output) if args.input: args.output.seek(0) zout = ZipFile(args.output, , ZIP_DEFLATED) zin = ZipFile(args.input, ) for res in zin.namelist(): if res.startswith(): zres = zin.open(res) zi = zin.getinfo(res) zout.writestr(zi, zres.read(), ZIP_DEFLATED) zout.close() return 0
Parse passed in cooked single HTML.
11,559
def temperature_effectiveness_TEMA_H(R1, NTU1, Ntp, optimal=True): r if Ntp == 1: A = 1./(1 + R1/2.)*(1. - exp(-NTU1*(1. + R1/2.)/2.)) D = exp(-NTU1*(1. - R1/2.)/2.) if R1 != 2: B = (1. - D)/(1. - R1*D/2.) else: B = NTU1/(2. + NTU1) E = (A + B - A*B*R1/2.)/2. P1 = E*(1. + (1. - B*R1/2.)*(1. - A*R1/2. + A*B*R1)) - A*B*(1. - B*R1/2.) elif Ntp == 2 and optimal: alpha = NTU1*(4. + R1)/8. beta = NTU1*(4. - R1)/8. D = (1. - exp(-alpha))/(4./R1 + 1) if R1 != 4: E = (1. - exp(-beta))/(4./R1 - 1.) H = (1. - exp(-2.*beta))/(4./R1 - 1.) else: E = NTU1/2. H = NTU1 G = (1-D)**2*(D**2 + E**2) + D**2*(1+E)**2 B = (1. + H)*(1. + E)**2 P1 = 1./R1*(1. - (1. - D)**4/(B - 4.*G/R1)) elif Ntp == 2 and not optimal: R1_orig = R1 NTU1 = NTU1*R1_orig R1 = 1./R1_orig beta = NTU1*(4.*R1 + 1)/8. alpha = NTU1/8.*(4.*R1 - 1.) H = (exp(-2.*beta) - 1.)/(4.*R1 + 1.) E = (exp(-beta) - 1.)/(4.*R1 + 1.) B = (1. + H)*(1. + E)**2 if R1 != 0.25: D = (1. - exp(-alpha))/(1. - 4.*R1) G = (1. - D)**2*(D**2 + E**2) + D**2*(1. + E)**2 P1 = (1. - (B + 4.*G*R1)/(1. - D)**4) else: D = -NTU1/8. G = (1. - D)**2*(D**2 + E**2) + D**2*(1. + E)**2 P1 = (1. - (B + 4.*G*R1)/(1. - D)**4) P1 = P1/R1_orig else: raise Exception() return P1
r'''Returns temperature effectiveness `P1` of a TEMA H type heat exchanger with a specified heat capacity ratio, number of transfer units `NTU1`, and of number of tube passes `Ntp`. For the two tube pass case, there are two possible orientations, one inefficient and one efficient controlled by the `optimal` option. The supported cases are as follows: * One tube pass (tube fluid split into two streams individually mixed, shell fluid mixed) * Two tube passes (shell fluid mixed, tube pass mixed between passes) * Two tube passes (shell fluid mixed, tube pass mixed between passes, inlet tube side next to inlet shell-side) 1-1 TEMA H, tube fluid split into two streams individually mixed, shell fluid mixed: .. math:: P_1 = E[1 + (1 - BR_1/2)(1 - A R_1/2 + ABR_1)] - AB(1 - BR_1/2) A = \frac{1}{1 + R_1/2}\{1 - \exp[-NTU_1(1 + R_1/2)/2]\} B = \frac{1-D}{1-R_1 D/2} D = \exp[-NTU_1(1-R_1/2)/2] E = (A + B - ABR_1/2)/2 1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section: .. math:: P_1 = \frac{1}{R_1}\left[1 - \frac{(1-D)^4}{B - 4G/R_1}\right] B = (1+H)(1+E)^2 G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2 H = [1 - \exp(-2\beta)]/(4/R_1 -1) E = [1 - \exp(-\beta)]/(4/R_1 - 1) D = [1 - \exp(-\alpha)]/(4/R_1 + 1) \alpha = NTU_1(4 + R_1)/8 \beta = NTU_1(4-R_1)/8 1-2 TEMA H, shell and tube fluids mixed in each pass at the cross section but with the inlet tube stream coming in next to the shell fluid inlet in an inefficient way (this is only shown in [2]_, and the stream 1/2 convention in it is different but converted here; P1 is still returned): .. math:: P_2 = \left[1 - \frac{B + 4GR_2}{(1-D)^4}\right] B = (1 + H)(1 + E)^2 G = (1-D)^2(D^2 + E^2) + D^2(1 + E)^2 D = \frac{1 - \exp(-\alpha)}{1 - 4R_2} E = \frac{\exp(-\beta) - 1}{4R_2 +1} H = \frac{\exp(-2\beta) - 1}{4R_2 +1} \alpha = \frac{NTU_2}{8}(4R_2 -1) \beta = \frac{NTU_2}{8}(4R_2 +1) Parameters ---------- R1 : float Heat capacity ratio of the heat exchanger in the P-NTU method, calculated with respect to stream 1 (shell side = 1, tube side = 2) [-] NTU1 : float Thermal number of transfer units of the heat exchanger in the P-NTU method, calculated with respect to stream 1 (shell side = 1, tube side = 2) [-] Ntp : int Number of tube passes, 1, or 2, [-] optimal : bool, optional Whether or not the arrangement is configured to give more of a countercurrent and efficient (True) case or an inefficient parallel case, [-] Returns ------- P1 : float Thermal effectiveness of the heat exchanger in the P-NTU method, calculated with respect to stream 1 [-] Notes ----- For numbers of tube passes greater than 1 or 2, an exception is raised. The convention for the formulas in [1]_ and [3]_ are with the shell side as side 1, and the tube side as side 2. [2]_ has formulas with the opposite convention. Examples -------- >>> temperature_effectiveness_TEMA_H(R1=1/3., NTU1=1., Ntp=1) 0.5730728284905833 References ---------- .. [1] Shah, Ramesh K., and Dusan P. Sekulic. Fundamentals of Heat Exchanger Design. 1st edition. Hoboken, NJ: Wiley, 2002. .. [2] Thulukkanam, Kuppan. Heat Exchanger Design Handbook, Second Edition. CRC Press, 2013. .. [3] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998.
11,560
def YiqToRgb(y, i, q): (%g, %g, %g)(1, 0.5, 5.442e-07) r = y + (i * 0.9562) + (q * 0.6210) g = y - (i * 0.2717) - (q * 0.6485) b = y - (i * 1.1053) + (q * 1.7020) return (r, g, b)
Convert the color from YIQ coordinates to RGB. Parameters: :y: Tte Y component value [0...1] :i: The I component value [0...1] :q: The Q component value [0...1] Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] >>> '(%g, %g, %g)' % Color.YiqToRgb(0.592263, 0.458874, -0.0499818) '(1, 0.5, 5.442e-07)'
11,561
def _iterdump(self, file_name, headers=None): if headers is None: headers = ["Discharge_Capacity", "Charge_Capacity"] step_txt = self.headers_normal[] point_txt = self.headers_normal[] cycle_txt = self.headers_normal[] self.logger.debug("iterating through file: %s" % file_name) if not os.path.isfile(file_name): print("Missing file_\n %s" % file_name) filesize = os.path.getsize(file_name) hfilesize = humanize_bytes(filesize) txt = "Filesize: %i (%s)" % (filesize, hfilesize) self.logger.info(txt) table_name_global = TABLE_NAMES["global"] table_name_stats = TABLE_NAMES["statistic"] table_name_normal = TABLE_NAMES["normal"] temp_dir = tempfile.gettempdir() temp_filename = os.path.join(temp_dir, os.path.basename(file_name)) shutil.copy2(file_name, temp_dir) constr = self.__get_res_connector(temp_filename) if use_ado: conn = dbloader.connect(constr) else: conn = dbloader.connect(constr, autocommit=True) self.logger.debug("tmp file: %s" % temp_filename) self.logger.debug("constr str: %s" % constr) self.logger.debug("reading global data table") sql = "select * from %s" % table_name_global global_data_df = pd.read_sql_query(sql, conn) self.logger.debug("sql statement: %s" % sql) tests = global_data_df[self.headers_normal[]] number_of_sets = len(tests) self.logger.debug("number of datasets: %i" % number_of_sets) self.logger.debug("only selecting first test") test_no = 0 self.logger.debug("setting data for test number %i" % test_no) loaded_from = file_name start_datetime = global_data_df[self.headers_global[]][test_no] test_ID = int(global_data_df[self.headers_normal[]][test_no]) test_name = global_data_df[self.headers_global[]][test_no] self.logger.debug("reading raw-data") columns = ["Data_Point", "Step_Index", "Cycle_Index"] columns.extend(headers) columns_txt = ", ".join(["%s"] * len(columns)) % tuple(columns) sql_1 = "select %s " % columns_txt sql_2 = "from %s " % table_name_normal sql_3 = "where %s=%s " % (self.headers_normal[], test_ID) sql_5 = "order by %s" % self.headers_normal[] import time info_list = [] info_header = ["cycle", "row_count", "start_point", "end_point"] info_header.extend(headers) self.logger.info(" ".join(info_header)) self.logger.info("-------------------------------------------------") for cycle_number in range(1, 2000): t1 = time.time() self.logger.debug("picking cycle %i" % cycle_number) sql_4 = "AND %s=%i " % (cycle_txt, cycle_number) sql = sql_1 + sql_2 + sql_3 + sql_4 + sql_5 self.logger.debug("sql statement: %s" % sql) normal_df = pd.read_sql_query(sql, conn) t2 = time.time() dt = t2 - t1 self.logger.debug("time: %f" % dt) if normal_df.empty: self.logger.debug("reached the end") break row_count, _ = normal_df.shape start_point = normal_df[point_txt].min() end_point = normal_df[point_txt].max() last = normal_df.iloc[-1, :] step_list = [cycle_number, row_count, start_point, end_point] step_list.extend([last[x] for x in headers]) info_list.append(step_list) self._clean_up_loadres(None, conn, temp_filename) info_dict = pd.DataFrame(info_list, columns=info_header) return info_dict
Function for dumping values from a file. Should only be used by developers. Args: file_name: name of the file headers: list of headers to pick default: ["Discharge_Capacity", "Charge_Capacity"] Returns: pandas.DataFrame
11,562
def parse(self, scope): assert (len(self.tokens) == 3) expr = self.process(self.tokens, scope) A, O, B = [ e[0] if isinstance(e, tuple) else e for e in expr if str(e).strip() ] try: a, ua = utility.analyze_number(A, ) b, ub = utility.analyze_number(B, ) except SyntaxError: return .join([str(A), str(O), str(B)]) if (a is False or b is False): return .join([str(A), str(O), str(B)]) if ua == or ub == : return color.Color().process((A, O, B)) if a == 0 and O == : return .join([str(A), str(O), str(B), ]) out = self.operate(a, b, O) if isinstance(out, bool): return out return self.with_units(out, ua, ub)
Parse Node args: scope (Scope): Scope object raises: SyntaxError returns: str
11,563
def cli(obj, purge): client = obj[] if obj[] == : r = client.http.get() click.echo(json.dumps(r[], sort_keys=True, indent=4, ensure_ascii=False)) else: timezone = obj[] headers = { : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : , : } blackouts = client.get_blackouts() click.echo(tabulate([b.tabular(timezone) for b in blackouts], headers=headers, tablefmt=obj[])) expired = [b for b in blackouts if b.status == ] if purge: with click.progressbar(expired, label=.format(len(expired))) as bar: for b in bar: client.delete_blackout(b.id)
List alert suppressions.
11,564
def get_swagger_view(title=None, url=None, patterns=None, urlconf=None): class SwaggerSchemaView(APIView): _ignore_model_permissions = True exclude_from_schema = True permission_classes = [AllowAny] renderer_classes = [ CoreJSONRenderer, renderers.OpenAPIRenderer, renderers.SwaggerUIRenderer ] def get(self, request): generator = SchemaGenerator( title=title, url=url, patterns=patterns, urlconf=urlconf ) schema = generator.get_schema(request=request) if not schema: raise exceptions.ValidationError( ) return Response(schema) return SwaggerSchemaView.as_view()
Returns schema view which renders Swagger/OpenAPI.
11,565
def get_base_route(cls): base_route = cls.__name__.lower() if cls.base_route is not None: base_route = cls.base_route base_rule = parse_rule(base_route) cls.base_args = [r[2] for r in base_rule] return base_route.strip("/")
Returns the route base to use for the current class.
11,566
def _trace_dispatch(frame, event, arg): code = frame.f_code key = id(code) n = sampling_counters.get(key, 0) if n is None: return if event == : sampling_counters[key] = n + 1 if n not in sampling_sequence: if n > LAST_SAMPLE: sampling_counters[key] = None sampling_counters[key] = None else: function_key = FunctionKey(filename, code.co_firstlineno, func_name) if event == : arg_info = inspect.getargvalues(frame) resolved_types = prep_args(arg_info) _task_queue.put(KeyAndTypes(function_key, resolved_types)) elif event == : last_opcode = code.co_code[frame.f_lasti] if last_opcode == RETURN_VALUE_OPCODE: if code.co_flags & CO_GENERATOR: t = resolve_type(FakeIterator([])) else: t = resolve_type(arg) elif last_opcode == YIELD_VALUE_OPCODE: t = resolve_type(FakeIterator([arg])) else: t = NoReturnType _task_queue.put(KeyAndReturn(function_key, t)) else: sampling_counters[key] = None
This is the main hook passed to setprofile(). It implement python profiler interface. Arguments are described in https://docs.python.org/2/library/sys.html#sys.settrace
11,567
def wait(self, time): self._wait = Event() return not self._wait.wait(time)
Pauses the thread for a specified time. Returns False if interrupted by another thread and True if the time runs out normally.
11,568
def get_collection(self, url): url = self.BASE_API2 + url while url is not None: response = self.get_data(url) for value in response[]: yield value url = response.get(, None)
Pages through an object collection from the bitbucket API. Returns an iterator that lazily goes through all the 'values' of all the pages in the collection.
11,569
def postprocess_segments(self): for iseg, seg in enumerate(self.segs): mask = np.zeros(self._adata.shape[0], dtype=bool) mask[seg] = True self.segs[iseg] = mask self.segs = np.array(self.segs) self.segs_tips = np.array(self.segs_tips)
Convert the format of the segment class members.
11,570
def solve(self, scenario, solver): clusters = set(self.clustering.busmap.values) n = len(clusters) self.stats = {: pd.DataFrame( index=sorted(clusters), columns=["decompose", "spread", "transfer"])} profile = cProfile.Profile() for i, cluster in enumerate(sorted(clusters)): print() print( % (cluster, i+1, n)) profile.enable() t = time.time() partial_network, externals = self.construct_partial_network( cluster, scenario) profile.disable() self.stats[].loc[cluster, ] = time.time() - t print(, self.stats[].loc[cluster, ]) t = time.time() profile.enable() self.solve_partial_network(cluster, partial_network, scenario, solver) profile.disable() self.stats[].loc[cluster, ] = time.time() - t print(, self.stats[].loc[cluster, ]) profile.enable() t = time.time() self.transfer_results(partial_network, externals) profile.disable() self.stats[].loc[cluster, ] = time.time() - t print(, self.stats[].loc[cluster, ]) profile.enable() t = time.time() print() fs = (mc("sum"), mc("sum")) for bt, ts in ( (, {: fs, : fs}), (, {: fs, : fs, : fs})): print("Attribute sums, {}, clustered - disaggregated:" .format(bt)) cnb = getattr(self.clustered_network, bt) onb = getattr(self.original_network, bt) print("{:>{}}: {}".format(, 4 + len(), reduce(lambda x, f: f(x), fs[:-1], cnb[]) - reduce(lambda x, f: f(x), fs[:-1], onb[]))) print("Series sums, {}, clustered - disaggregated:" .format(bt)) cnb = getattr(self.clustered_network, bt + ) onb = getattr(self.original_network, bt + ) for s in ts: print("{:>{}}: {}".format(s, 4 + len(), reduce(lambda x, f: f(x), ts[s], cnb[s]) - reduce(lambda x, f: f(x), ts[s], onb[s]))) profile.disable() self.stats[] = time.time() - t print(, self.stats[])
Decompose each cluster into separate units and try to optimize them separately :param scenario: :param solver: Solver that may be used to optimize partial networks
11,571
def stop(self): if self.is_run: self._service.shutdown() self._service.server_close()
Stop the server. Do nothing if server is already not running.
11,572
def regressOut(Y, X, return_b=False): Xd = la.pinv(X) b = Xd.dot(Y) Y_out = Y-X.dot(b) if return_b: return Y_out, b else: return Y_out
regresses out X from Y
11,573
def Transformer(source_vocab_size, target_vocab_size, mode=, num_layers=6, feature_depth=512, feedforward_depth=2048, num_heads=8, dropout=0.1, shared_embedding=True, max_len=200, return_evals=False): inject_position = layers.Serial( layers.Dropout(dropout, mode=mode), layers.PositionalEncoding(feature_depth, max_len=max_len) ) if shared_embedding: assert source_vocab_size == target_vocab_size embedding = layers.Share(layers.Embedding(feature_depth, source_vocab_size)) source_embedding_layer = layers.Serial(embedding, inject_position) target_embedding_layer = source_embedding_layer else: source_embedding = layers.Embedding(feature_depth, source_vocab_size) target_embedding = layers.Embedding(feature_depth, target_vocab_size) source_embedding_layer = layers.Serial(source_embedding, inject_position) target_embedding_layer = layers.Serial(target_embedding, inject_position) multi_attention = layers.MultiHeadedAttention( feature_depth, num_heads=num_heads, dropout=dropout, mode=mode) @layers.Lambda def Encoder(source, source_mask): encoder_layer = layers.Serial( layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), layers.Identity(), layers.Identity(), source_mask), multi_attention, layers.Dropout(dropout, mode=mode)), ResidualFeedForward( feature_depth, feedforward_depth, dropout, mode=mode), ) return layers.Serial( source, source_embedding_layer, layers.repeat(encoder_layer, num_layers), layers.LayerNorm(), ) @layers.Lambda def Decoder(memory, target, target_mask, memory_mask): decoder_layer = layers.Serial( layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), layers.Identity(), layers.Identity(), target_mask), multi_attention, layers.Dropout(dropout, mode=mode)), layers.Residual(layers.LayerNorm(), layers.Branch(size=4), layers.Parallel(layers.Identity(), memory, memory, memory_mask), multi_attention, layers.Dropout(dropout, mode=mode)), ResidualFeedForward( feature_depth, feedforward_depth, dropout, mode=mode) ) return layers.Serial( target, target_embedding_layer, layers.repeat(decoder_layer, num_layers), layers.LayerNorm(), ) @layers.Lambda def transformer(source, target, source_mask, target_mask, memory_mask): encoded_source = Encoder(source, source_mask) return Decoder(encoded_source, target, target_mask, memory_mask) @layers.Lambda def Generator(encoded_target): return layers.Serial( encoded_target, layers.Dense(target_vocab_size), layers.LogSoftmax )
Transformer model. Args: source_vocab_size: int: source vocab size target_vocab_size: int: target vocab size mode: str: 'train' or 'eval' num_layers: int: number of encoder/decoder layers feature_depth: int: depth of embedding feedforward_depth: int: depth of feed-forward layer num_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) shared_embedding: bool: specify whether source/target embeddings are tied. max_len: int: maximum symbol length for positional encoding return_evals: bool: whether to generate decode-time evaluation functions Returns: A namedtuple containing model 'init' and 'apply' functions for training and the 'evals' functions that itself returns a namedtuple containing evaluation functions for the trained encoder, decoder, and generator substax.
11,574
def _default_hparams(): return hparam.HParams( loss_multiplier=1.0, batch_size_multiplier=1, stop_at_eos=False, modality={}, vocab_size={}, input_space_id=SpaceID.GENERIC, target_space_id=SpaceID.GENERIC)
A set of basic model hyperparameters.
11,575
def generate_dict_schema(size, valid): schema = {} generator_items = [] for i in range(0, size): while True: key_schema, key_generator = generate_random_schema(valid) if key_schema not in schema: break value_schema, value_generator = generate_random_schema(valid) schema[key_schema] = value_schema generator_items.append((key_generator, value_generator)) generator = ({next(k_gen): next(v_gen) for k_gen, v_gen in generator_items} for i in itertools.count()) return schema, generator
Generate a schema dict of size `size` using library `lib`. In addition, it returns samples generator :param size: Schema size :type size: int :param samples: The number of samples to generate :type samples: int :param valid: Generate valid samples? :type valid: bool :returns
11,576
def before(point): if not point: return True if isinstance(point, six.string_types): point = str_to_time(point) elif isinstance(point, int): point = time.gmtime(point) return time.gmtime() <= point
True if point datetime specification is before now. NOTE: If point is specified it is supposed to be in local time. Not UTC/GMT !! This is because that is what gmtime() expects.
11,577
def reminder_validator(input_str): match = re.match(REMINDER_REGEX, input_str) if match or input_str == : return input_str else: raise ValidationError( )
Allows a string that matches utils.REMINDER_REGEX. Raises ValidationError otherwise.
11,578
def crypto_config_from_table_info(materials_provider, attribute_actions, table_info): ec_kwargs = table_info.encryption_context_values if table_info.primary_index is not None: ec_kwargs.update( {"partition_key_name": table_info.primary_index.partition, "sort_key_name": table_info.primary_index.sort} ) return CryptoConfig( materials_provider=materials_provider, encryption_context=EncryptionContext(**ec_kwargs), attribute_actions=attribute_actions, )
Build a crypto config from the provided values and table info. :returns: crypto config and updated kwargs :rtype: tuple(CryptoConfig, dict)
11,579
def trim_snapshots(self, hourly_backups = 8, daily_backups = 7, weekly_backups = 4): now = datetime.utcnow() last_hour = datetime(now.year, now.month, now.day, now.hour) last_midnight = datetime(now.year, now.month, now.day) last_sunday = datetime(now.year, now.month, now.day) - timedelta(days = (now.weekday() + 1) % 7) start_of_month = datetime(now.year, now.month, 1) target_backup_times = [] oldest_snapshot_date = datetime(2007, 1, 1) for hour in range(0, hourly_backups): target_backup_times.append(last_hour - timedelta(hours = hour)) for day in range(0, daily_backups): target_backup_times.append(last_midnight - timedelta(days = day)) for week in range(0, weekly_backups): target_backup_times.append(last_sunday - timedelta(weeks = week)) one_day = timedelta(days = 1) while start_of_month > oldest_snapshot_date: target_backup_times.append(start_of_month) target_backup_times.sort() all_snapshots = self.get_all_snapshots(owner = ) all_snapshots.sort(cmp = lambda x, y: cmp(x.start_time, y.start_time)) snaps_for_each_volume = {} for snap in all_snapshots: volume_name = snap.tags.get() if volume_name: snaps_for_volume = snaps_for_each_volume.get(volume_name) if not snaps_for_volume: snaps_for_volume = [] snaps_for_each_volume[volume_name] = snaps_for_volume snaps_for_volume.append(snap) for volume_name in snaps_for_each_volume: snaps = snaps_for_each_volume[volume_name] snaps = snaps[:-1] time_period_number = 0 snap_found_for_this_time_period = False for snap in snaps: check_this_snap = True while check_this_snap and time_period_number < target_backup_times.__len__(): snap_date = datetime.strptime(snap.start_time, ) if snap_date < target_backup_times[time_period_number]: if snap_found_for_this_time_period == True: if not snap.tags.get(): else: snap_found_for_this_time_period = True check_this_snap = False else: time_period_number += 1 snap_found_for_this_time_period = False
Trim excess snapshots, based on when they were taken. More current snapshots are retained, with the number retained decreasing as you move back in time. If ebs volumes have a 'Name' tag with a value, their snapshots will be assigned the same tag when they are created. The values of the 'Name' tags for snapshots are used by this function to group snapshots taken from the same volume (or from a series of like-named volumes over time) for trimming. For every group of like-named snapshots, this function retains the newest and oldest snapshots, as well as, by default, the first snapshots taken in each of the last eight hours, the first snapshots taken in each of the last seven days, the first snapshots taken in the last 4 weeks (counting Midnight Sunday morning as the start of the week), and the first snapshot from the first Sunday of each month forever. :type hourly_backups: int :param hourly_backups: How many recent hourly backups should be saved. :type daily_backups: int :param daily_backups: How many recent daily backups should be saved. :type weekly_backups: int :param weekly_backups: How many recent weekly backups should be saved.
11,580
def extract_pool_attr(cls, req): attr = {} if in req: attr[] = int(req[]) if in req: attr[] = req[] if in req: attr[] = req[] if in req: attr[] = req[] if in req: attr[] = int(req[]) if in req: attr[] = int(req[]) return attr
Extract pool attributes from arbitary dict.
11,581
def delete_report(report): for path in glob.glob(os.path.join(_get_reports_path(), report)): shutil.rmtree(path)
Delete report(s), supports globbing.
11,582
def cnvlGauss2D(idxPrc, aryBoxCar, aryMdlParamsChnk, tplPngSize, varNumVol, queOut): varChnkSze = np.size(aryMdlParamsChnk, axis=0) varNumMtnDrtn = aryBoxCar.shape[2] aryOut = np.zeros([varChnkSze, varNumMtnDrtn, varNumVol]) for idxMtn in range(0, varNumMtnDrtn): for idxMdl in range(0, varChnkSze): varTmpX = aryMdlParamsChnk[idxMdl, 1] varTmpY = aryMdlParamsChnk[idxMdl, 2] varTmpSd = aryMdlParamsChnk[idxMdl, 3] aryGauss = crtGauss2D(tplPngSize[0], tplPngSize[1], varTmpX, varTmpY, varTmpSd) aryPrfTcTmp = np.multiply(aryBoxCar[:, :, idxMtn, :], aryGauss[:, :, None]) aryPrfTcTmp = np.sum(aryPrfTcTmp, axis=(0, 1)) aryOut[idxMdl, idxMtn, :] = aryPrfTcTmp lstOut = [idxPrc, aryOut] queOut.put(lstOut)
Spatially convolve boxcar functions with 2D Gaussian. Parameters ---------- idxPrc : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. aryBoxCar : float, positive Description of input 2. aryMdlParamsChnk : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. tplPngSize : float, positive Description of input 2. varNumVol : 2d numpy array, shape [n_samples, n_measurements] Description of input 1. queOut : float, positive Description of input 2. Returns ------- data : 2d numpy array, shape [n_samples, n_measurements] Closed data. Reference --------- [1]
11,583
def list_metrics(): for name, operator in ALL_OPERATORS.items(): print(f"{name} operator:") if len(operator.cls.metrics) > 0: print( tabulate.tabulate( headers=("Name", "Description", "Type"), tabular_data=operator.cls.metrics, tablefmt=DEFAULT_GRID_STYLE, ) )
List metrics available.
11,584
def search_variants( self, variant_set_id, start=None, end=None, reference_name=None, call_set_ids=None): request = protocol.SearchVariantsRequest() request.reference_name = pb.string(reference_name) request.start = pb.int(start) request.end = pb.int(end) request.variant_set_id = variant_set_id request.call_set_ids.extend(pb.string(call_set_ids)) request.page_size = pb.int(self._page_size) return self._run_search_request( request, "variants", protocol.SearchVariantsResponse)
Returns an iterator over the Variants fulfilling the specified conditions from the specified VariantSet. :param str variant_set_id: The ID of the :class:`ga4gh.protocol.VariantSet` of interest. :param int start: Required. The beginning of the window (0-based, inclusive) for which overlapping variants should be returned. Genomic positions are non-negative integers less than reference length. Requests spanning the join of circular genomes are represented as two requests one on each side of the join (position 0). :param int end: Required. The end of the window (0-based, exclusive) for which overlapping variants should be returned. :param str reference_name: The name of the :class:`ga4gh.protocol.Reference` we wish to return variants from. :param list call_set_ids: Only return variant calls which belong to call sets with these IDs. If an empty array, returns variants without any call objects. If null, returns all variant calls. :return: An iterator over the :class:`ga4gh.protocol.Variant` objects defined by the query parameters. :rtype: iter
11,585
def get_slide_vars(self, slide_src, source=None): presenter_notes = None find = re.search(r, slide_src, re.DOTALL | re.UNICODE | re.IGNORECASE) if find: if self.presenter_notes: presenter_notes = slide_src[find.end():].strip() slide_src = slide_src[:find.start()] find = re.search(r, slide_src, re.DOTALL | re.UNICODE) if not find: header = level = title = None content = slide_src.strip() else: header = find.group(1) level = int(find.group(2)) title = find.group(3) content = find.group(4).strip() if find.group(4) else find.group(4) slide_classes = [] if header: header, _ = self.process_macros(header, source) if content: content, slide_classes = self.process_macros(content, source) source_dict = {} if source: source_dict = {: source, : os.path.abspath(source)} if header or content: return {: header, : title, : level, : content, : slide_classes, : source_dict, : presenter_notes, : self.math_output}
Computes a single slide template vars from its html source code. Also extracts slide informations for the table of contents.
11,586
def _correctArtefacts(self, image, threshold): image = np.nan_to_num(image) medianThreshold(image, threshold, copy=False) return image
Apply a thresholded median replacing high gradients and values beyond the boundaries
11,587
def autodiscover(): global LOADED if LOADED: return LOADED = True for app in get_app_name_list(): try: module = import_module(app) except ImportError: pass else: try: import_module("%s.page_processors" % app) except: if module_has_submodule(module, "page_processors"): raise
Taken from ``django.contrib.admin.autodiscover`` and used to run any calls to the ``processor_for`` decorator.
11,588
def unique(func, num_args=0, max_attempts=100, cache=None): if cache is None: cache = _cache_unique @wraps(func) def wrapper(*args): key = "%s_%s" % (str(func.__name__), str(args[:num_args])) attempt = 0 while attempt < max_attempts: attempt += 1 drawn = cache.get(key, []) result = func(*args) if result not in drawn: drawn.append(result) cache[key] = drawn return result raise MaxAttemptException() return wrapper
wraps a function so that produce unique results :param func: :param num_args: >>> import random >>> choices = [1,2] >>> a = unique(random.choice, 1) >>> a,b = a(choices), a(choices) >>> a == b False
11,589
def drain_events(self, timeout=None): chanmap = self.channels chanid, method_sig, args, content = self._wait_multiple( chanmap, None, timeout=timeout, ) channel = chanmap[chanid] if (content and channel.auto_encode_decode and hasattr(content, )): try: content.body = content.body.decode(content.content_encoding) except Exception: pass amqp_method = (self._method_override.get(method_sig) or channel._METHOD_MAP.get(method_sig, None)) if amqp_method is None: raise AMQPNotImplementedError( .format(method_sig)) if content is None: return amqp_method(channel, args) else: return amqp_method(channel, args, content)
Wait for an event on a channel.
11,590
def _read_fasta_files(f, args): seq_l = {} sample_l = [] idx = 1 for line1 in f: line1 = line1.strip() cols = line1.split("\t") with open(cols[0], ) as fasta: sample_l.append(cols[1]) for line in fasta: if line.startswith(">"): idx += 1 counts = int(re.search("x([0-9]+)", line.strip()).group(1)) else: seq = line.strip() seq = seq[0:int(args.maxl)] if len(seq) > int(args.maxl) else seq if counts > int(args.minc) and len(seq) > int(args.minl): if seq not in seq_l: seq_l[seq] = sequence_unique(idx, seq) seq_l[seq].add_exp(cols[1], counts) return seq_l, sample_l
read fasta files of each sample and generate a seq_obj with the information of each unique sequence in each sample :param f: file containing the path for each fasta file and the name of the sample. Two column format with `tab` as field separator :returns: * :code:`seq_l`: is a list of seq_obj objects, containing the information of each sequence * :code:`sample_l`: is a list with the name of the samples (column two of the config file)
11,591
def parse_boolargs(self, args): if not isinstance(args, list): args = [args] return_vals = [] bool_args = [] for arg in args: if not isinstance(arg, tuple): return_val = arg bool_arg = None elif len(arg) == 1: return_val = arg[0] bool_arg = None elif len(arg) == 2: return_val, bool_arg = arg else: raise ValueError("argument not formatted correctly") return_vals.append(return_val) bool_args.append(bool_arg) outdtype = numpy.array(return_vals).dtype out = numpy.zeros(self.size, dtype=outdtype) mask = numpy.zeros(self.size, dtype=bool) leftovers = numpy.ones(self.size, dtype=bool) for ii,(boolarg,val) in enumerate(zip(bool_args, return_vals)): if boolarg is None or boolarg == or boolarg.lower() == : if ii+1 != len(bool_args): raise ValueError("only the last item may not provide " "any boolean arguments") mask = leftovers else: mask = leftovers & self[boolarg] out[mask] = val leftovers &= ~mask return out, numpy.where(leftovers)[0]
Returns an array populated by given values, with the indices of those values dependent on given boolen tests on self. The given `args` should be a list of tuples, with the first element the return value and the second argument a string that evaluates to either True or False for each element in self. Each boolean argument is evaluated on elements for which every prior boolean argument was False. For example, if array `foo` has a field `bar`, and `args = [(1, 'bar < 10'), (2, 'bar < 20'), (3, 'bar < 30')]`, then the returned array will have `1`s at the indices for which `foo.bar < 10`, `2`s where `foo.bar < 20 and not foo.bar < 10`, and `3`s where `foo.bar < 30 and not (foo.bar < 10 or foo.bar < 20)`. The last argument in the list may have "else", an empty string, None, or simply list a return value. In any of these cases, any element not yet populated will be assigned the last return value. Parameters ---------- args : {(list of) tuples, value} One or more return values and boolean argument determining where they should go. Returns ------- return_values : array An array with length equal to self, with values populated with the return values. leftover_indices : array An array of indices that evaluated to False for all arguments. These indices will not have been popluated with any value, defaulting to whatever numpy uses for a zero for the return values' dtype. If there are no leftovers, an empty array is returned. Examples -------- Given the following array: >>> arr = FieldArray(5, dtype=[('mtotal', float)]) >>> arr['mtotal'] = numpy.array([3., 5., 2., 1., 4.]) Return `"TaylorF2"` for all elements with `mtotal < 4` (note that the elements 1 and 4 are leftover): >>> arr.parse_boolargs(('TaylorF2', 'mtotal<4')) (array(['TaylorF2', '', 'TaylorF2', 'TaylorF2', ''], dtype='|S8'), array([1, 4])) Return `"TaylorF2"` for all elements with `mtotal < 4`, `"SEOBNR_ROM_DoubleSpin"` otherwise: >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', 'else')]) (array(['TaylorF2', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2', 'SEOBNRv2_ROM_DoubleSpin'], dtype='|S23'), array([], dtype=int64)) The following will also return the same: >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin',)]) >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), ('SEOBNRv2_ROM_DoubleSpin', '')]) >>> arr.parse_boolargs([('TaylorF2', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin']) Return `"TaylorF2"` for all elements with `mtotal < 3`, `"IMRPhenomD"` for all elements with `3 <= mtotal < 4`, `"SEOBNRv2_ROM_DoubleSpin"` otherwise: >>> arr.parse_boolargs([('TaylorF2', 'mtotal<3'), ('IMRPhenomD', 'mtotal<4'), 'SEOBNRv2_ROM_DoubleSpin']) (array(['IMRPhenomD', 'SEOBNRv2_ROM_DoubleSpin', 'TaylorF2', 'TaylorF2', 'SEOBNRv2_ROM_DoubleSpin'], dtype='|S23'), array([], dtype=int64)) Just return `"TaylorF2"` for all elements: >>> arr.parse_boolargs('TaylorF2') (array(['TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2', 'TaylorF2'], dtype='|S8'), array([], dtype=int64))
11,592
def set_nsxcontroller_ip(self, **kwargs): name = kwargs.pop() ip_addr = str((kwargs.pop(, None))) nsxipaddress = ip_interface(unicode(ip_addr)) if nsxipaddress.version != 4: raise ValueError() ip_args = dict(name=name, address=ip_addr) method_name = method_class = self._brocade_tunnels nsxcontroller_attr = getattr(method_class, method_name) config = nsxcontroller_attr(**ip_args) output = self._callback(config) return output
Set nsx-controller IP Args: IP (str): IPV4 address. callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
11,593
def getAddPerson(self): fragment = AddPersonFragment(self.organizer) fragment.setFragmentParent(self) return fragment
Return an L{AddPersonFragment} which is a child of this fragment and which will add a person to C{self.organizer}.
11,594
def assets(lon=None, lat=None, begin=None, end=None): base_url = "https://api.nasa.gov/planetary/earth/assets?" if not lon or not lat: raise ValueError( "assets endpoint expects lat and lon, type has to be float. Call the method with keyword args. Ex : lon=100.75, lat=1.5") else: try: validate_float(lon, lat) lon = decimal.Decimal(lon) lat = decimal.Decimal(lat) base_url += "lon=" + str(lon) + "&" + "lat=" + str(lat) + "&" except: raise ValueError( "assets endpoint expects lat and lon, type has to be float. Call the method with keyword args. Ex : lon=100.75, lat=1.5") if not begin: raise ValueError( "Begin date is missing, which is mandatory. Format : YYYY-MM-DD") else: try: vali_date(begin) base_url += "begin=" + begin + "&" except: raise ValueError("Incorrect date format, should be YYYY-MM-DD") if end: try: vali_date(end) base_url += "end=" + end + "&" except: raise ValueError("Incorrect date format, should be YYYY-MM-DD") req_url = base_url + "api_key=" + nasa_api_key() return dispatch_http_get(req_url)
HTTP REQUEST GET https://api.nasa.gov/planetary/earth/assets QUERY PARAMETERS Parameter Type Default Description lat float n/a Latitude lon float n/a Longitude begin YYYY-MM-DD n/a beginning of date range end YYYY-MM-DD today end of date range api_key string DEMO_KEY api.nasa.gov key for expanded usage EXAMPLE QUERY https://api.nasa.gov/planetary/earth/assets?lon=100.75&lat=1.5&begin=2014-02-01&api_key=DEMO_KEY
11,595
def visit_BinaryOperation(self, node): self.visit(node.left) self.visit(node.right)
Visitor for `BinaryOperation` AST node.
11,596
def make_shift_function(alphabet): def shift_case_sensitive(shift, symbol): case = [case for case in alphabet if symbol in case] if not case: return symbol case = case[0] index = case.index(symbol) return case[(index - shift) % len(case)] return shift_case_sensitive
Construct a shift function from an alphabet. Examples: Shift cases independently >>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase]) <function make_shift_function.<locals>.shift_case_sensitive> Additionally shift punctuation characters >>> make_shift_function([string.ascii_uppercase, string.ascii_lowercase, string.punctuation]) <function make_shift_function.<locals>.shift_case_sensitive> Shift entire ASCII range, overflowing cases >>> make_shift_function([''.join(chr(x) for x in range(32, 127))]) <function make_shift_function.<locals>.shift_case_sensitive> Args: alphabet (iterable): Ordered iterable of strings representing separate cases of an alphabet Returns: Function (shift, symbol)
11,597
def new_job(frontier, job_conf): validate_conf(job_conf) job = Job(frontier.rr, { "conf": job_conf, "status": "ACTIVE", "started": doublethink.utcnow()}) if "id" in job_conf: job.id = job_conf["id"] if "max_claimed_sites" in job_conf: job.max_claimed_sites = job_conf["max_claimed_sites"] job.save() sites = [] pages = [] for seed_conf in job_conf["seeds"]: merged_conf = merge(seed_conf, job_conf) merged_conf.pop("seeds") merged_conf["job_id"] = job.id merged_conf["seed"] = merged_conf.pop("url") site = brozzler.Site(frontier.rr, merged_conf) site.id = str(uuid.uuid4()) sites.append(site) pages.append(new_seed_page(frontier, site)) for batch in (pages[i:i+500] for i in range(0, len(pages), 500)): logging.info(, len(batch)) result = frontier.rr.table().insert(batch).run() for batch in (sites[i:i+100] for i in range(0, len(sites), 100)): logging.info(, len(batch)) result = frontier.rr.table().insert(batch).run() logging.info(, job.id) return job
Returns new Job.
11,598
def run_unlock(device_type, args): util.setup_logging(verbosity=args.verbose) with device_type() as d: log.info(, d)
Unlock hardware device (for future interaction).
11,599
def get_metric_parsers(metric_packages=tuple(), include_defaults=True): metric_parsers = set() if include_defaults: import git_code_debt.metrics metric_parsers.update(discover(git_code_debt.metrics, is_metric_cls)) for metric_package in metric_packages: metric_parsers.update(discover(metric_package, is_metric_cls)) return metric_parsers
Gets all of the metric parsers. Args: metric_packages - Defaults to no extra packages. An iterable of metric containing packages. A metric inherits DiffParserBase and does not have __metric__ = False A metric package must be imported using import a.b.c include_defaults - Whether to include the generic metric parsers