Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
13,100
def pre_save(self, instance, add: bool): if not isinstance(instance, AtomicSlugRetryMixin): raise ImproperlyConfigured(( %s\ ) % type(instance).__name__) slugs = LocalizedValue() for lang_code, value in self._get_populate_values(instance): if not value: continue slug = slugify(value, allow_unicode=True) slugs.set(lang_code, slug) setattr(instance, self.name, slugs) return slugs
Ran just before the model is saved, allows us to built the slug. Arguments: instance: The model that is being saved. add: Indicates whether this is a new entry to the database or an update. Returns: The localized slug that was generated.
13,101
def getFullFMAtIndex(self, index): if index == self.totalSize: return np.cumsum(self.totalCounts) binID = index >> self.bitPower bwtIndex = self.refFM[binID] ret = np.copy(self.partialFM[binID]) trueIndex = np.sum(ret)-self.offsetSum dist = index-trueIndex if dist == 0: return ret if binID == self.refFM.shape[0]-1: endRange = self.bwt.shape[0] else: endRange = self.refFM[binID+1]+1 while endRange < self.bwt.shape[0] and (self.bwt[endRange] & self.mask) == (self.bwt[endRange-1] & self.mask): endRange += 1 letters = np.bitwise_and(self.bwt[bwtIndex:endRange], self.mask) counts = np.right_shift(self.bwt[bwtIndex:endRange], self.letterBits, dtype=) i = 1 same = (letters[0:-1] == letters[1:]) while np.count_nonzero(same) > 0: (counts[i:])[same] *= self.numPower i += 1 same = np.bitwise_and(same[0:-1], same[1:]) cs = np.subtract(np.cumsum(counts), counts) x = np.searchsorted(cs, dist, ) if x > 1: ret += np.bincount(letters[0:x-1], counts[0:x-1], minlength=self.vcLen) ret[letters[x-1]] += dist-cs[x-1] return ret
This function creates a complete FM-index for a specific position in the BWT. Example using the above example: BWT Full FM-index $ A C G T C 0 1 2 4 4 $ 0 1 3 4 4 C 1 1 3 4 4 A 1 1 4 4 4 1 2 4 4 4 @return - the above information in the form of an array that already incorporates the offset value into the counts
13,102
def list(self, page=1, per_page=50): data = {"page": page, "per_page": per_page} return self.get(self.base_url, data=data)
Lists Jobs. https://app.zencoder.com/docs/api/jobs/list
13,103
def _get_create_table_sql(self, table_name, columns, options=None): options = options or {} column_list_sql = self.get_column_declaration_list_sql(columns) if options.get("unique_constraints"): for name, definition in options["unique_constraints"].items(): column_list_sql += ", %s" % self.get_unique_constraint_declaration_sql( name, definition ) if options.get("primary"): column_list_sql += ", PRIMARY KEY(%s)" % ", ".join(options["primary"]) if options.get("indexes"): for index, definition in options["indexes"]: column_list_sql += ", %s" % self.get_index_declaration_sql( index, definition ) query = "CREATE TABLE %s (%s" % (table_name, column_list_sql) check = self.get_check_declaration_sql(columns) if check: query += ", %s" % check query += ")" sql = [query] if options.get("foreign_keys"): for definition in options["foreign_keys"]: sql.append(self.get_create_foreign_key_sql(definition, table_name)) return sql
Returns the SQL used to create a table. :param table_name: The name of the table to create :type table_name: str :param columns: The table columns :type columns: dict :param options: The options :type options: dict :rtype: str
13,104
def new_pic(cls, id_, name, desc, rId, left, top, width, height): xml = cls._pic_tmpl() % ( id_, name, desc, rId, left, top, width, height ) pic = parse_xml(xml) return pic
Return a new ``<p:pic>`` element tree configured with the supplied parameters.
13,105
def _try_larger_image(self, roi, cur_text, cur_mrz, filter_order=3): if roi.shape[1] <= 700: scale_by = int(1050.0 / roi.shape[1] + 0.5) roi_lg = transform.rescale(roi, scale_by, order=filter_order, mode=, multichannel=False, anti_aliasing=True) new_text = ocr(roi_lg, extra_cmdline_params=self.extra_cmdline_params) new_mrz = MRZ.from_ocr(new_text) new_mrz.aux[] = % filter_order if new_mrz.valid_score > cur_mrz.valid_score: cur_mrz = new_mrz cur_text = new_text return cur_text, cur_mrz
Attempts to improve the OCR result by scaling the image. If the new mrz is better, returns it, otherwise returns the old mrz.
13,106
def _get_symbol_index(stroke_id_needle, segmentation): for symbol_index, symbol in enumerate(segmentation): if stroke_id_needle in symbol: return symbol_index return None
Parameters ---------- stroke_id_needle : int Identifier for the stroke of which the symbol should get found. segmentation : list of lists of integers An ordered segmentation of strokes to symbols. Returns ------- The symbol index in which stroke_id_needle occurs Examples -------- >>> _get_symbol_index(3, [[0, 1, 2], [3, 4, 5], [6, 7]]) 1 >>> _get_symbol_index(6, [[0, 1, 2], [3, 4, 5], [6, 7]]) 2 >>> _get_symbol_index(7, [[0, 1, 2], [3, 4, 5], [6, 7]]) 2
13,107
def connect_attenuator(self, connect=True): if connect: try: pa5 = win32com.client.Dispatch("PA5.x") success = pa5.ConnectPA5(, 1) if success == 1: print pass else: print errmsg = pa5.GetError() print u"Error: ", errmsg raise Exception(u"Attenuator connection failed") except: print "Error connecting to attenuator" pa5 = None self.attenuator = pa5 else: if self.attenuator: self.attenuator.setAtten(0) self.attenuator = None return self.attenuator
Establish a connection to the TDT PA5 attenuator
13,108
def select_python_parser(parser=None): if parser == or os.environ.get(): PythonFile.Class = RedbaronPythonFile else: PythonFile.Class = ParsoPythonFile
Select default parser for loading and refactoring steps. Passing `redbaron` as argument will select the old paring engine from v0.3.3 Replacing the redbaron parser was necessary to support Python 3 syntax. We have tried our best to make sure there is no user impact on users. However, there may be regressions with new parser backend. To revert to the old parser implementation, add `GETGAUGE_USE_0_3_3_PARSER=true` property to the `python.properties` file in the `<PROJECT_DIR>/env/default directory. This property along with the redbaron parser will be removed in future releases.
13,109
def get_authorizations_by_ids(self, authorization_ids): collection = JSONClientValidated(, collection=, runtime=self._runtime) object_id_list = [] for i in authorization_ids: object_id_list.append(ObjectId(self._get_id(i, ).get_identifier())) result = collection.find( dict({: {: object_id_list}}, **self._view_filter())) result = list(result) sorted_result = [] for object_id in object_id_list: for object_map in result: if object_map[] == object_id: sorted_result.append(object_map) break return objects.AuthorizationList(sorted_result, runtime=self._runtime, proxy=self._proxy)
Gets an ``AuthorizationList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the authorizations specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``Authorizations`` may be omitted from the list and may present the elements in any order including returning a unique set. arg: authorization_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.authorization.AuthorizationList) - the returned ``Authorization list`` raise: NotFound - an ``Id was`` not found raise: NullArgument - ``authorization_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
13,110
def json_encoder_default(obj): if np is not None and hasattr(obj, ) and hasattr(obj, ): if obj.size == 1: if np.issubdtype(obj.dtype, np.integer): return int(obj) elif np.issubdtype(obj.dtype, np.floating): return float(obj) if isinstance(obj, set): return list(obj) elif hasattr(obj, ): return obj.to_native() elif hasattr(obj, ) and hasattr(obj, ): return obj.tolist() return obj
Handle more data types than the default JSON encoder. Specifically, it treats a `set` and a `numpy.array` like a `list`. Example usage: ``json.dumps(obj, default=json_encoder_default)``
13,111
def _merge_results(self, results): self.results[] += results[] for key, value in results[].items(): self.results[][key] += value
Combine results of test run with exisiting dict.
13,112
def hsl_to_rgb(h, s=None, l=None): if type(h) in [list,tuple]: h, s, l = h if s==0: return (l, l, l) if l<0.5: n2 = l * (1.0 + s) else: n2 = l+s - (l*s) n1 = (2.0 * l) - n2 h /= 60.0 hueToRgb = _hue_to_rgb r = hueToRgb(n1, n2, h + 2) g = hueToRgb(n1, n2, h) b = hueToRgb(n1, n2, h - 2) return (r, g, b)
Convert the color from HSL coordinates to RGB. Parameters: :h: The Hue component value [0...1] :s: The Saturation component value [0...1] :l: The Lightness component value [0...1] Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] >>> hsl_to_rgb(30.0, 1.0, 0.5) (1.0, 0.5, 0.0)
13,113
def weld_iloc_int(array, index): obj_id, weld_obj = create_weld_object(array) weld_template = weld_obj.weld_code = weld_template.format(array=obj_id, index=index) return weld_obj
Retrieves the value at index. Parameters ---------- array : numpy.ndarray or WeldObject Input data. Assumed to be bool data. index : int The array index from which to retrieve value. Returns ------- WeldObject Representation of this computation.
13,114
def port(self, value): if value is not None: assert type(value) is int, " attribute: type is not !".format( "port", value) assert type(value) >= 0 and type(value) >= 65535, \ " attribute: value must be in 0-65535 range!".format("port", value) self.__port = value
Setter for **self.__port** attribute. :param value: Attribute value. :type value: int
13,115
def approve(self): url = self.reddit_session.config[] data = {: self.fullname} response = self.reddit_session.request_json(url, data=data) urls = [self.reddit_session.config[x] for x in [, ]] if isinstance(self, Submission): urls += self.subreddit._listing_urls self.reddit_session.evict(urls) return response
Approve object. This reverts a removal, resets the report counter, marks it with a green check mark (only visible to other moderators) on the website view and sets the approved_by attribute to the logged in user. :returns: The json response from the server.
13,116
def _add_request_data(data, request): try: request_data = _build_request_data(request) except Exception as e: log.exception("Exception while building request_data for Rollbar payload: %r", e) else: if request_data: _filter_ip(request_data, SETTINGS[]) data[] = request_data
Attempts to build request data; if successful, sets the 'request' key on `data`.
13,117
def createTileUrl(self, x, y, z): return self.tileTemplate.replace(, str(x)).replace(, str( y)).replace(, str(z))
returns new tile url based on template
13,118
def run_apidoc(_): import os dirname = os.path.dirname(__file__) ignore_paths = [os.path.join(dirname, ),] argv = [ , , , , , os.path.join(dirname, ), os.path.join(dirname, ), ] + ignore_paths from sphinx.ext import apidoc apidoc.main(argv)
This method is required by the setup method below.
13,119
def validate_args(self): def validate_name(): allowed_re = assert isinstance(self.params[], basestring), ( % repr(self.params[])) assert re.match(allowed_re, self.params[]), ( % ( repr(self.params[]), repr(allowed_re))) validate_name() def validate_deps(): if in self.params: assert type(self.params[]) in (type(None), list), ( % repr(self.params[])) validate_deps()
Input validation!
13,120
def get_change_values(change): action, rrset = change if action == : values[key] = getattr(rrset, key) return values else: return rrset._initial_vals
In the case of deletions, we pull the change values for the XML request from the ResourceRecordSet._initial_vals dict, since we want the original values. For creations, we pull from the attributes on ResourceRecordSet. Since we're dealing with attributes vs. dict key/vals, we'll abstract this part away here and just always pass a dict to write_change. :rtype: dict :returns: A dict of change data, used by :py:func:`write_change` to write the change request XML.
13,121
def count(args): from jcvi.graphics.histogram import stem_leaf_plot from jcvi.utils.cbook import SummaryStats p = OptionParser(count.__doc__) p.add_option("--csv", help="Write depth per contig to file") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args csv = open(opts.csv, "w") if opts.csv else None f = Fasta(fastafile, lazy=True) sizes = [] for desc, rec in f.iterdescriptions_ordered(): if desc.startswith("singleton"): sizes.append(1) continue if "with" in desc: name, w, size, seqs = desc.split() if csv: print("\t".join(str(x) for x in (name, size, len(rec))), file=csv) assert w == "with" sizes.append(int(size)) else: name, size, tail = desc.split(";") sizes.append(int(size.replace("size=", ""))) if csv: csv.close() logging.debug("File written to `{0}`".format(opts.csv)) s = SummaryStats(sizes) print(s, file=sys.stderr) stem_leaf_plot(s.data, 0, 100, 20, title="Cluster size")
%prog count cdhit.consensus.fasta Scan the headers for the consensus clusters and count the number of reads.
13,122
def volume(self) -> float: m = self._matrix return float(abs(dot(np.cross(m[0], m[1]), m[2])))
Volume of the unit cell.
13,123
def expand_file_names(path, files_root): if not any(wildcard in path for wildcard in ): return [path] else: dir_path, filename = os.path.split(path) return [os.path.join(dir_path, f) for f in fnmatch.filter(os.listdir(os.path.join(files_root, dir_path)), filename)]
Expands paths (e.g. css/*.css in files_root /actual/path/to/css/files/)
13,124
def get_annotations(self): try: obj_list = self.__dict__[] return [Annotation(i) for i in obj_list] except KeyError: self._lazy_load() obj_list = self.__dict__[] return [Annotation(i) for i in obj_list]
Fetch the annotations field if it does not exist.
13,125
def posted_data_dict(self): if not self.query: return None from django.http import QueryDict roughdecode = dict(item.split(, 1) for item in self.query.split()) encoding = roughdecode.get(, None) if encoding is None: encoding = DEFAULT_ENCODING query = self.query.encode() data = QueryDict(query, encoding=encoding) return data.dict()
All the data that PayPal posted to us, as a correctly parsed dictionary of values.
13,126
def delete_pool_member(hostname, username, password, name, member): ret = {: name, : {}, : False, : } if __opts__[]: return _test_output(ret, , params={ : hostname, : username, : password, : name, : member } ) existing = __salt__[](hostname, username, password, name) if existing[] == 200: current_members = existing[][][] exists = False for current_member in current_members: if current_member[] == member: exists = True existing_member = current_member break if exists: deleted = __salt__[](hostname, username, password, name, member) if deleted[] == 200: ret[] = True ret[] = .format(member=member) ret[][] = existing_member ret[][] = {} else: ret[] = True ret[] = ret[][] = {} ret[][] = {} else: ret = _load_result(existing, ret) return ret
Delete an existing pool member. hostname The host/address of the bigip device username The iControl REST username password The iControl REST password name The name of the pool to be modified member The name of the member to delete from the pool
13,127
def render(template, namespace, app=None): app = app or state.app return app.render(template, namespace)
Render the specified template using the Pecan rendering framework with the specified template namespace as a dictionary. Useful in a controller where you have no template specified in the ``@expose``. :param template: The path to your template, as you would specify in ``@expose``. :param namespace: The namespace to use for rendering the template, as a dictionary. :param app: The instance of :class:`pecan.Pecan` to use
13,128
def follow(user, obj, send_action=True, actor_only=True, flag=, **kwargs): check(obj) instance, created = apps.get_model(, ).objects.get_or_create( user=user, object_id=obj.pk, flag=flag, content_type=ContentType.objects.get_for_model(obj), actor_only=actor_only ) if send_action and created: if not flag: action.send(user, verb=_(), target=obj, **kwargs) else: action.send(user, verb=_( % flag), target=obj, **kwargs) return instance
Creates a relationship allowing the object's activities to appear in the user's stream. Returns the created ``Follow`` instance. If ``send_action`` is ``True`` (the default) then a ``<user> started following <object>`` action signal is sent. Extra keyword arguments are passed to the action.send call. If ``actor_only`` is ``True`` (the default) then only actions where the object is the actor will appear in the user's activity stream. Set to ``False`` to also include actions where this object is the action_object or the target. If ``flag`` not an empty string then the relationship would marked by this flag. Example:: follow(request.user, group, actor_only=False) follow(request.user, group, actor_only=False, flag='liking')
13,129
def make_mixture_prior(latent_size, mixture_components): if mixture_components == 1: return tfd.MultivariateNormalDiag( loc=tf.zeros([latent_size]), scale_identity_multiplier=1.0) loc = tf.compat.v1.get_variable( name="loc", shape=[mixture_components, latent_size]) raw_scale_diag = tf.compat.v1.get_variable( name="raw_scale_diag", shape=[mixture_components, latent_size]) mixture_logits = tf.compat.v1.get_variable( name="mixture_logits", shape=[mixture_components]) return tfd.MixtureSameFamily( components_distribution=tfd.MultivariateNormalDiag( loc=loc, scale_diag=tf.nn.softplus(raw_scale_diag)), mixture_distribution=tfd.Categorical(logits=mixture_logits), name="prior")
Creates the mixture of Gaussians prior distribution. Args: latent_size: The dimensionality of the latent representation. mixture_components: Number of elements of the mixture. Returns: random_prior: A `tfd.Distribution` instance representing the distribution over encodings in the absence of any evidence.
13,130
def unpack_thin(thin_path): tfile = tarfile.TarFile.gzopen(thin_path) old_umask = os.umask(0o077) tfile.extractall(path=OPTIONS.saltdir) tfile.close() os.umask(old_umask) try: os.unlink(thin_path) except OSError: pass reset_time(OPTIONS.saltdir)
Unpack the Salt thin archive.
13,131
def _iter_module_files(): for module in list(sys.modules.values()): if module is None: continue filename = getattr(module, "__file__", None) if filename: if os.path.isdir(filename) and os.path.exists( os.path.join(filename, "__init__.py") ): filename = os.path.join(filename, "__init__.py") old = None while not os.path.isfile(filename): old = filename filename = os.path.dirname(filename) if filename == old: break else: if filename[-4:] in (".pyc", ".pyo"): filename = filename[:-1] yield filename
This iterates over all relevant Python files. It goes through all loaded files from modules, all files in folders of already loaded modules as well as all files reachable through a package.
13,132
def smooth(x, window_len=7, window=): if len(x) < window_len: raise ValueError("Input vector length must be >= window length.") if window_len < 3: raise ValueError("Window length must be at least 3.") if not window_len % 2: window_len += 1 print("Window length reset to {}".format(window_len)) windows = {: np.hanning, : np.hamming, : np.bartlett, : np.blackman, : np.ones } k = int(window_len / 2) xb = x[:k] xt = x[-k:] s = np.concatenate((xb[::-1], x, xt[::-1])) if window in windows.keys(): w = windows[window](window_len) else: msg = "Unrecognized window type ".format(window) print(msg + " Defaulting to hanning") w = windows[](window_len) return np.convolve(w / w.sum(), s, mode=)
Smooth the data in x using convolution with a window of requested size and type. Parameters ---------- x : array_like(float) A flat NumPy array containing the data to smooth window_len : scalar(int), optional An odd integer giving the length of the window. Defaults to 7. window : string A string giving the window type. Possible values are 'flat', 'hanning', 'hamming', 'bartlett' or 'blackman' Returns ------- array_like(float) The smoothed values Notes ----- Application of the smoothing window at the top and bottom of x is done by reflecting x around these points to extend it sufficiently in each direction.
13,133
def search(name, jail=None, chroot=None, root=None, exact=False, glob=False, regex=False, pcre=False, comment=False, desc=False, full=False, depends=False, size=False, quiet=False, origin=False, prefix=False): *************** opts = if exact: opts += if glob: opts += if regex: opts += if pcre: opts += if comment: opts += if desc: opts += if full: opts += if depends: opts += if size: opts += if quiet: opts += if origin: opts += if prefix: opts += cmd = _pkg(jail, chroot, root) cmd.append() if opts: cmd.append( + opts) cmd.append(name) return __salt__[]( cmd, output_loglevel=, python_shell=False )
Searches in remote package repositories CLI Example: .. code-block:: bash salt '*' pkg.search pattern jail Perform the search using the ``pkg.conf(5)`` from the specified jail CLI Example: .. code-block:: bash salt '*' pkg.search pattern jail=<jail name or id> chroot Perform the search using the ``pkg.conf(5)`` from the specified chroot (ignored if ``jail`` is specified) root Perform the search using the ``pkg.conf(5)`` from the specified root (ignored if ``jail`` is specified) CLI Example: .. code-block:: bash salt '*' pkg.search pattern chroot=/path/to/chroot exact Treat pattern as exact pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern exact=True glob Treat pattern as a shell glob pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern glob=True regex Treat pattern as a regular expression. CLI Example: .. code-block:: bash salt '*' pkg.search pattern regex=True pcre Treat pattern as an extended regular expression. CLI Example: .. code-block:: bash salt '*' pkg.search pattern pcre=True comment Search for pattern in the package comment one-line description. CLI Example: .. code-block:: bash salt '*' pkg.search pattern comment=True desc Search for pattern in the package description. CLI Example: .. code-block:: bash salt '*' pkg.search pattern desc=True full Displays full information about the matching packages. CLI Example: .. code-block:: bash salt '*' pkg.search pattern full=True depends Displays the dependencies of pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern depends=True size Displays the size of the package CLI Example: .. code-block:: bash salt '*' pkg.search pattern size=True quiet Be quiet. Prints only the requested information without displaying many hints. CLI Example: .. code-block:: bash salt '*' pkg.search pattern quiet=True origin Displays pattern origin. CLI Example: .. code-block:: bash salt '*' pkg.search pattern origin=True prefix Displays the installation prefix for each package matching pattern. CLI Example: .. code-block:: bash salt '*' pkg.search pattern prefix=True
13,134
def _cloglog_transform_deriv_v(systematic_utilities, alt_IDs, rows_to_alts, shape_params, output_array=None, *args, **kwargs): exp_neg_v = np.exp(-1 * systematic_utilities) exp_v = np.exp(systematic_utilities) denom_part_1 = 1 - np.exp(-1 * exp_v) exp_neg_v[np.isposinf(exp_neg_v)] = max_comp_value exp_neg_v[np.where(exp_neg_v == 0)] = min_comp_value derivs = 1.0 / (denom_part_1 * exp_neg_v) derivs[np.where(denom_part_1 == 0)] = 1 derivs[np.isposinf(derivs)] = max_comp_value output_array.data = derivs return output_array
Parameters ---------- systematic_utilities : 1D ndarray. All elements should be ints, floats, or longs. Should contain the systematic utilities of each observation per available alternative. Note that this vector is formed by the dot product of the design matrix with the vector of utility coefficients. alt_IDs : 1D ndarray. All elements should be ints. There should be one row per obervation per available alternative for the given observation. Elements denote the alternative corresponding to the given row of the design matrix. rows_to_alts : 2D scipy sparse matrix. There should be one row per observation per available alternative and one column per possible alternative. This matrix maps the rows of the design matrix to the possible alternatives for this dataset. All elements should be zeros or ones. shape_params : None or 1D ndarray. If an array, each element should be an int, float, or long. There should be one value per shape parameter of the model being used. output_array : 2D scipy sparse array. The array should be square and it should have `systematic_utilities.shape[0]` rows. It's data is to be replaced with the correct derivatives of the transformation vector with respect to the vector of systematic utilities. This argument is NOT optional. Returns ------- output_array : 2D scipy sparse array. The shape of the returned array is `(systematic_utilities.shape[0], systematic_utilities.shape[0])`. The returned array specifies the derivative of the transformed utilities with respect to the systematic utilities. All elements are ints, floats, or longs.
13,135
def dashboard(request): "Counts, aggregations and more!" end_time = now() start_time = end_time - timedelta(days=7) defaults = {: start_time, : end_time} form = DashboardForm(data=request.GET or defaults) if form.is_valid(): start_time = form.cleaned_data[] end_time = form.cleaned_data[] try: obj = Visitor.objects.order_by()[0] track_start_time = obj.start_time except (IndexError, Visitor.DoesNotExist): track_start_time = now() warn_incomplete = (start_time < track_start_time) user_stats = Visitor.objects.user_stats(start_time, end_time) visitor_stats = Visitor.objects.stats(start_time, end_time) if TRACK_PAGEVIEWS: pageview_stats = Pageview.objects.stats(start_time, end_time) else: pageview_stats = None context = { : form, : track_start_time, : warn_incomplete, : user_stats, : visitor_stats, : pageview_stats, } return render(request, , context)
Counts, aggregations and more!
13,136
def get(cls, resource_type): if isinstance(resource_type, str): obj = getattr(db, cls.__name__).find_one(cls.resource_type == resource_type) elif isinstance(resource_type, int): obj = getattr(db, cls.__name__).find_one(cls.resource_type_id == resource_type) elif isinstance(resource_type, cls): return resource_type else: obj = None if not obj: obj = cls() obj.resource_type = resource_type db.session.add(obj) db.session.commit() db.session.refresh(obj) return obj
Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will be created in the database and returned Args: resource_type (str): Resource type name Returns: :obj:`ResourceType`
13,137
def CreatePattern(patternId: int, pattern: ctypes.POINTER(comtypes.IUnknown)): subPattern = pattern.QueryInterface(GetPatternIdInterface(patternId)) if subPattern: return PatternConstructors[patternId](pattern=subPattern)
Create a concreate pattern by pattern id and pattern(POINTER(IUnknown)).
13,138
def transform_folder(args): command, (transform, src, dest) = args try: print(progress.value, "remaining") data = [] data_dir = os.path.join(src, command) for filename in os.listdir(data_dir): path = os.path.join(data_dir, filename) data.append(transform({: path})) pickleFile = os.path.join(dest, "{}.pkl".format(command)) gc.disable() with open(pickleFile, "wb") as f: pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) gc.enable() with progress.get_lock(): progress.value -= 1 except Exception as e: print(command, e, file=sys.stderr) traceback.print_exc()
Transform all the files in the source dataset for the given command and save the results as a single pickle file in the destination dataset :param args: tuple with the following arguments: - the command name: 'zero', 'one', 'two', ... - transforms to apply to wav file - full path of the source dataset - full path of the destination dataset
13,139
def zadd(self, key, score, member, *pairs, exist=None): if not isinstance(score, (int, float)): raise TypeError("score argument must be int or float") if len(pairs) % 2 != 0: raise TypeError("length of pairs must be even number") scores = (item for i, item in enumerate(pairs) if i % 2 == 0) if any(not isinstance(s, (int, float)) for s in scores): raise TypeError("all scores must be int or float") args = [] if exist is self.ZSET_IF_EXIST: args.append(b) elif exist is self.ZSET_IF_NOT_EXIST: args.append(b) args.extend([score, member]) if pairs: args.extend(pairs) return self.execute(b, key, *args)
Add one or more members to a sorted set or update its score. :raises TypeError: score not int or float :raises TypeError: length of pairs is not even number
13,140
def _set_channels(self): logger.debug("=====================") logger.debug("Setting main channels") logger.debug("=====================") for i, p in enumerate(self.processes): logger.debug("[{}] Setting main channels with pid: {}".format( p.template, i)) p.set_channels(pid=i) logger.debug("{} {} {}".format(p.parent_lane, p.input_type, p.template)) if not p.parent_lane and p.input_type: self._update_raw_input(p) self._update_extra_inputs(p) self._update_secondary_channels(p) logger.info(colored_print( "\tChannels set for {} \u2713".format(p.template)))
Sets the main channels for the pipeline This method will parse de the :attr:`~Process.processes` attribute and perform the following tasks for each process: - Sets the input/output channels and main input forks and adds them to the process's :attr:`flowcraft.process.Process._context` attribute (See :func:`~NextflowGenerator.set_channels`). - Automatically updates the main input channel of the first process of each lane so that they fork from the user provide parameters (See :func:`~NextflowGenerator._update_raw_input`). - Check for the presence of secondary channels and adds them to the :attr:`~NextflowGenerator.secondary_channels` attribute. Notes ----- **On the secondary channel setup**: With this approach, there can only be one secondary link start for each type of secondary link. For instance, If there are two processes that start a secondary channel for the ``SIDE_max_len`` channel, only the last one will be recorded, and all receiving processes will get the channel from the latest process. Secondary channels can only link if the source process if downstream of the sink process in its "forking" path.
13,141
def last_first_initial(self): return ("{}{} ".format(self.last_name, ", " + self.first_name[:1] + "." if self.first_name else "") + ("({}) ".format(self.nickname) if self.nickname else ""))
Return a name in the format of: Lastname, F [(Nickname)]
13,142
def get_state_actions(self, state, **kwargs): if state.base_state == State.ABSENT: if state.config_id.config_type == ItemType.IMAGE: return [ItemAction(state, ImageAction.PULL)] actions = [ItemAction(state, Action.CREATE, extra_data=kwargs)] if state.config_id.config_type == ItemType.CONTAINER: actions.append(ItemAction(state, ContainerUtilAction.CONNECT_ALL)) return actions
Creates all missing containers, networks, and volumes. :param state: Configuration state. :type state: dockermap.map.state.ConfigState :param kwargs: Additional keyword arguments. :return: Actions on the client, map, and configurations. :rtype: list[dockermap.map.action.ItemAction]
13,143
def getfield(self, pkt, s): ext = pkt.get_field(self.length_of) tmp_len = ext.length_from(pkt) if tmp_len is None or tmp_len <= 0: v = pkt.tls_session.tls_version if v is None or v < 0x0304: return s, None return super(_ExtensionsLenField, self).getfield(pkt, s)
We try to compute a length, usually from a msglen parsed earlier. If this length is 0, we consider 'selection_present' (from RFC 5246) to be False. This means that there should not be any length field. However, with TLS 1.3, zero lengths are always explicit.
13,144
def edit(i): o=i.get(,) ruoa=i.get(,) muoa=i.get(,) duoa=i.get(,) iu=i.get(,) if iu==: iu= ed=i.get(,) sk=i.get(,) if sk==: sk= ii={:, :ruoa, :muoa, :duoa, :} r=access(ii) if r[]>0: return r desc=r.get(,{}) meta=r[] import tempfile fd, fn=tempfile.mkstemp(suffix=, prefix=) os.close(fd) os.remove(fn) if ed==: dd=desc else: dd=meta r=save_json_to_file({:fn, :dd, :sk}) if r[]>0: return r r=get_os_ck({}) if r[]>0: return r plat=r[] x=cfg[][plat].replace(, fn) os.system(x) r=load_json_file({:fn}) if r[]>0: return r if ed==: desc=r[] else: meta=r[] ii={:, :ruoa, :muoa, :duoa, :, :iu, :meta, :desc, :, :sk, :o} r=access(ii) if os.path.isfile(fn): os.remove(fn) return r
Input: { (repo_uoa) - repo UOA module_uoa - module UOA data_uoa - data UOA (ignore_update) - (default==yes) if 'yes', do not add info about update (sort_keys) - (default==yes) if 'yes', sort keys (edit_desc) - if 'yes', edit description rather than meta (useful for compiler descriptions) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 }
13,145
def get_subscription_by_channel_id_and_endpoint_id( self, channel_id, endpoint_id): subscriptions = self.search_subscriptions( channel_id=channel_id, endpoint_id=endpoint_id) try: return subscriptions[0] except IndexError: raise DataFailureException(url, 404, "No subscription found")
Search for subscription by a given channel and endpoint
13,146
def search_reports(self, search_term=None, enclave_ids=None, from_time=None, to_time=None, tags=None, excluded_tags=None): return Page.get_generator(page_generator=self._search_reports_page_generator(search_term, enclave_ids, from_time, to_time, tags, excluded_tags))
Uses the |search_reports_page| method to create a generator that returns each successive report. :param str search_term: The term to search for. If empty, no search term will be applied. Otherwise, must be at least 3 characters. :param list(str) enclave_ids: list of enclave ids used to restrict reports to specific enclaves (optional - by default reports from all of user's enclaves are returned) :param int from_time: start of time window in milliseconds since epoch (optional) :param int to_time: end of time window in milliseconds since epoch (optional) :param list(str) tags: Name (or list of names) of tag(s) to filter reports by. Only reports containing ALL of these tags will be returned. (optional) :param list(str) excluded_tags: Reports containing ANY of these tags will be excluded from the results. :return: The generator of Report objects. Note that the body attributes of these reports will be ``None``.
13,147
def main(device_type): args = create_agent_parser(device_type=device_type).parse_args() util.setup_logging(verbosity=args.verbose, filename=args.log_file) public_keys = None filename = None if args.identity.startswith(): filename = args.identity contents = open(filename, ).read().decode() if filename.endswith(): public_keys = list(import_public_keys(contents)) identities = list(parse_config(contents)) else: identities = [device.interface.Identity( identity_str=args.identity, curve_name=args.ecdsa_curve_name)] for index, identity in enumerate(identities): identity.identity_dict[] = u log.info(, index, identity.to_string()) device_type.ui = device.ui.UI(device_type=device_type, config=vars(args)) device_type.ui.cached_passphrase_ack = util.ExpiringCache( args.cache_expiry_seconds) conn = JustInTimeConnection( conn_factory=lambda: client.Client(device_type()), identities=identities, public_keys=public_keys) sock_path = _get_sock_path(args) command = args.command context = _dummy_context() if args.connect: command = [] + ssh_args(conn) + args.command elif args.mosh: command = [] + mosh_args(conn) + args.command elif args.daemonize: out = .format(sock_path) sys.stdout.write(out) sys.stdout.flush() context = daemon.DaemonContext() log.info(, sock_path) elif args.foreground: log.info(, sock_path) use_shell = bool(args.shell) if use_shell: command = os.environ[] sys.stdin.close() if command or args.daemonize or args.foreground: with context: return run_server(conn=conn, command=command, sock_path=sock_path, debug=args.debug, timeout=args.timeout) else: for pk in conn.public_keys(): sys.stdout.write(pk) return 0
Run ssh-agent using given hardware client factory.
13,148
def send(self, stream, retry=16, timeout=60, quiet=0, callback=None): /etc/issuerb try: packet_size = dict( xmodem = 128, xmodem1k = 1024, )[self.mode] except AttributeError: raise ValueError("An invalid mode was supplied") error_count = 0 crc_mode = 0 cancel = 0 while True: char = self.getc(1) if char: if char == NAK: crc_mode = 0 break elif char == CRC: crc_mode = 1 break elif char == CAN: if not quiet: print >> sys.stderr, if cancel: return False else: cancel = 1 else: log.error( % \ (ord(char),)) error_count += 1 if error_count >= retry: self.abort(timeout=timeout) return False error_count = 0 success_count = 0 total_packets = 0 sequence = 1 while True: data = stream.read(packet_size) if not data: log.info() break total_packets += 1 data = data.ljust(packet_size, self.pad) if crc_mode: crc = self.calc_crc(data) else: crc = self.calc_checksum(data) while True: if packet_size == 128: self.putc(SOH) else: self.putc(STX) self.putc(chr(sequence)) self.putc(chr(0xff - sequence)) self.putc(data) if crc_mode: self.putc(chr(crc >> 8)) self.putc(chr(crc & 0xff)) else: self.putc(chr(crc)) char = self.getc(1, timeout) if char == ACK: success_count += 1 if callable(callback): callback(total_packets, success_count, error_count) break if char == NAK: error_count += 1 if callable(callback): callback(total_packets, success_count, error_count) if error_count >= retry: self.abort(timeout=timeout) log.warning() return False continue self.abort(timeout=timeout) log.error() return False sequence = (sequence + 1) % 0x100 while True: self.putc(EOT) char = self.getc(1, timeout) if char == ACK: break else: error_count += 1 if error_count >= retry: self.abort(timeout=timeout) log.warning() return False return True
Send a stream via the XMODEM protocol. >>> stream = file('/etc/issue', 'rb') >>> print modem.send(stream) True Returns ``True`` upon succesful transmission or ``False`` in case of failure. :param stream: The stream object to send data from. :type stream: stream (file, etc.) :param retry: The maximum number of times to try to resend a failed packet before failing. :type retry: int :param timeout: The number of seconds to wait for a response before timing out. :type timeout: int :param quiet: If 0, it prints info to stderr. If 1, it does not print any info. :type quiet: int :param callback: Reference to a callback function that has the following signature. This is useful for getting status updates while a xmodem transfer is underway. Expected callback signature: def callback(total_packets, success_count, error_count) :type callback: callable
13,149
def update_widget(self, idx=None): if idx is None: for w in self._widgets: idx = self._get_idx_from_widget(w) self._write_widget(self._read_property(idx), idx) pass else: self._write_widget(self._read_property(idx), idx) return
Forces the widget at given index to be updated from the property value. If index is not given, all controlled widgets will be updated. This method should be called directly by the user when the property is not observable, or in very unusual conditions.
13,150
def delete(config, username, type): client = Client() client.prepare_connection() user_api = API(client) user_api.delete(username, type)
Delete an LDAP user.
13,151
def parse_user_params(user_params): if user_params: params = {} try: for param in options.params.split(): param_key, param_value = param.split(, 1) params[param_key] = param_value except ValueError as e: sys.stdout.write("Invalid params specified. Should be in format: <key=value>[,<key=value>..]\n") sys.exit(1) return params else: return {}
Parse the user params (-p/--params) and them as a dict.
13,152
def detach(self): from . import _ndarray_cls hdl = NDArrayHandle() check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl))) return _ndarray_cls(hdl)
Returns a new NDArray, detached from the current graph.
13,153
def add_relation(app_f, app_t, weight=1): recs = TabRel.select().where( (TabRel.post_f_id == app_f) & (TabRel.post_t_id == app_t) ) if recs.count() > 1: for record in recs: MRelation.delete(record.uid) if recs.count() == 0: uid = tools.get_uuid() entry = TabRel.create( uid=uid, post_f_id=app_f, post_t_id=app_t, count=1, ) return entry.uid elif recs.count() == 1: MRelation.update_relation(app_f, app_t, weight) else: return False
Adding relation between two posts.
13,154
def add_sources_from_roi(self, names, roi, free=False, **kwargs): for name in names: self.add_source(name, roi[name].data, free=free, **kwargs)
Add multiple sources to the current ROI model copied from another ROI model. Parameters ---------- names : list List of str source names to add. roi : `~fermipy.roi_model.ROIModel` object The roi model from which to add sources. free : bool Initialize the source with a free normalization paramter.
13,155
def yaml2tree(cls, yamltree): if not cls.YAML_setup: cls.setup_yaml() cls.YAML_setup = True if os.path.isfile(yamltree): with open(yamltree) as fh: yaml_data = fh.read() else: yaml_data = yamltree list_of_nodes = yaml.safe_load(yaml_data) yamltree_root = list_of_nodes[0] return yamltree_root
Class method that creates a tree from YAML. | # Example yamltree data: | - !Node &root | name: "root node" | parent: null | data: | testpara: 111 | - !Node &child1 | name: "child node" | parent: *root | - !Node &gc1 | name: "grand-child node" | parent: *child1 :param yamltree: a string of YAML describing the nodes in the tree, or the path to a file containing the data. :type yamltree: str :returns: the root node of the tree. :rtype: Node
13,156
def get_ser_val_alt(lat: float, lon: float, da_alt_x: xr.DataArray, da_alt: xr.DataArray, da_val: xr.DataArray)->pd.Series: alt_t_1d = da_alt.sel( latitude=lat, longitude=lon, method=) val_t_1d = da_val.sel( latitude=lat, longitude=lon, method=) alt_x = da_alt_x.sel( latitude=lat, longitude=lon, method=)[0] val_alt = np.array( [interp1d(alt_1d, val_1d)(alt_x) for alt_1d, val_1d in zip(alt_t_1d, val_t_1d)]) ser_alt = pd.Series( val_alt, index=da_val.time.values, name=da_val.name, ) return ser_alt
interpolate atmospheric variable to a specified altitude Parameters ---------- lat : float latitude of specified site lon : float longitude of specified site da_alt_x : xr.DataArray desired altitude to interpolate variable at da_alt : xr.DataArray altitude associated with `da_val`: variable array to interpolate da_val : xr.DataArray atmospheric varialble to interpolate Returns ------- pd.Series interpolated values at the specified altitude of site positioned by [`lat`, `lon`]
13,157
def transform(self, X): self._check_fitted() M = self.smoothness dim = self.dim_ inds = self.inds_ do_check = self.do_bounds_check X = as_features(X) if X.dim != dim: msg = "model fit for dimension {} but got dim {}" raise ValueError(msg.format(dim, X.dim)) Xt = np.empty((len(X), self.inds_.shape[0])) Xt.fill(np.nan) if self.basis == : coefs = (np.pi * np.arange(M + 1))[..., :] for i, bag in enumerate(X): if do_check: if np.min(bag) < 0 or np.max(bag) > 1: raise ValueError("Bag {} not in [0, 1]".format(i)) phi = coefs * bag[..., np.newaxis] np.cos(phi, out=phi) phi[:, :, 1:] *= np.sqrt(2) B = reduce(op.mul, (phi[:, i, inds[:, i]] for i in xrange(dim))) Xt[i, :] = np.mean(B, axis=0) else: raise ValueError("unknown basis ".format(self.basis)) return Xt
Transform a list of bag features into its projection series representation. Parameters ---------- X : :class:`skl_groups.features.Features` or list of bag feature arrays New data to transform. The data should all lie in [0, 1]; use :class:`skl_groups.preprocessing.BagMinMaxScaler` if not. Returns ------- X_new : integer array, shape ``[len(X), dim_]`` X transformed into the new space.
13,158
def open(self): self.device = serial.Serial(port=self.devfile, baudrate=self.baudrate, bytesize=self.bytesize, parity=serial.PARITY_NONE, stopbits=serial.STOPBITS_ONE, timeout=self.timeout, dsrdtr=True) if self.device is not None: print "Serial printer enabled" else: print "Unable to open serial printer on: %s" % self.devfile
Setup serial port and set is as escpos device
13,159
def get_free_sphere_params(structure, rad_dict=None, probe_rad=0.1): with ScratchDir(): name = "temp_zeo1" zeo_inp_filename = name + ".cssr" ZeoCssr(structure).write_file(zeo_inp_filename) rad_file = None rad_flag = False if rad_dict: rad_file = name + ".rad" rad_flag = True with open(rad_file, ) as fp: for el in rad_dict.keys(): fp.write("{} {}\n".format(el, rad_dict[el].real)) atmnet = AtomNetwork.read_from_CSSR( zeo_inp_filename, rad_flag=rad_flag, rad_file=rad_file) out_file = "temp.res" atmnet.calculate_free_sphere_parameters(out_file) if os.path.isfile(out_file) and os.path.getsize(out_file) > 0: with open(out_file, "rt") as fp: output = fp.readline() else: output = "" fields = [val.strip() for val in output.split()][1:4] if len(fields) == 3: fields = [float(field) for field in fields] free_sphere_params = {: fields[0], : fields[1], : fields[2]} return free_sphere_params
Analyze the void space in the input structure using voronoi decomposition Calls Zeo++ for Voronoi decomposition. Args: structure: pymatgen.core.structure.Structure rad_dict (optional): Dictionary of radii of elements in structure. If not given, Zeo++ default values are used. Note: Zeo++ uses atomic radii of elements. For ionic structures, pass rad_dict with ionic radii probe_rad (optional): Sampling probe radius in Angstroms. Default is 0.1 A Returns: voronoi nodes as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure voronoi face centers as pymatgen.core.structure.Strucutre within the unit cell defined by the lattice of input structure
13,160
def generate_files(engine, crypto_factory, min_dt=None, max_dt=None, logger=None): return _generate_notebooks(files, files.c.created_at, engine, crypto_factory, min_dt, max_dt, logger)
Create a generator of decrypted files. Files are yielded in ascending order of their timestamp. This function selects all current notebooks (optionally, falling within a datetime range), decrypts them, and returns a generator yielding dicts, each containing a decoded notebook and metadata including the user, filepath, and timestamp. Parameters ---------- engine : SQLAlchemy.engine Engine encapsulating database connections. crypto_factory : function[str -> Any] A function from user_id to an object providing the interface required by PostgresContentsManager.crypto. Results of this will be used for decryption of the selected notebooks. min_dt : datetime.datetime, optional Minimum last modified datetime at which a file will be included. max_dt : datetime.datetime, optional Last modified datetime at and after which a file will be excluded. logger : Logger, optional
13,161
def nv_tuple_list_replace(l, v): _found = False for i, x in enumerate(l): if x[0] == v[0]: l[i] = v _found = True if not _found: l.append(v)
replace a tuple in a tuple list
13,162
def _hybrid_select_metrics(self, dup_bam, bait_file, target_file): metrics = self._check_metrics_file(dup_bam, "hs_metrics") if not file_exists(metrics): with bed_to_interval(bait_file, dup_bam) as ready_bait: with bed_to_interval(target_file, dup_bam) as ready_target: with file_transaction(metrics) as tx_metrics: opts = [("BAIT_INTERVALS", ready_bait), ("TARGET_INTERVALS", ready_target), ("INPUT", dup_bam), ("OUTPUT", tx_metrics)] try: self._picard.run("CollectHsMetrics", opts) except subprocess.CalledProcessError: return None return metrics
Generate metrics for hybrid selection efficiency.
13,163
def deleted(message): def deleted(value, _context, **_params): return Deleted(value, message) return deleted
Create a Deleted response builder with specified message.
13,164
def get_software_package_compilation_timestamp(cls,calc,**kwargs): from dateutil.parser import parse try: date = calc.out.job_info.get_dict()[] return parse(date.replace(, )).isoformat() except Exception: return None
Returns the timestamp of package/program compilation in ISO 8601 format.
13,165
def update_features(self, poly): for feature in self.features: feature.wavelength = poly(feature.xpos)
Evaluate wavelength at xpos using the provided polynomial.
13,166
def error_and_result(f): @wraps(f) def error_and_result_decorator(*args, **kwargs): return error_and_result_decorator_inner_fn(f, False, *args, **kwargs) return error_and_result_decorator
Format task result into json dictionary `{'data': task return value}` if no exception was raised during the task execution. If there was raised an exception during task execution, formats task result into dictionary `{'error': exception message with traceback}`.
13,167
def dict_from_prefix(cls, prefix, dictionary): o_dictionary = OrderedDict() for key, val in dictionary.items(): if key.startswith(prefix): o_dictionary[key[len(prefix):].strip()] = val dictionary = o_dictionary if len(dictionary) == 0: return None elif len(dictionary) == 1 and "" in dictionary: return dictionary[""] else: return_dict = OrderedDict() for key, val in dictionary.items(): ret = re.search(r"^\[([^\]]+)\](.*)$", key) if ret is None: continue return_dict[ret.group(1)] = cls.dict_from_prefix("[{}]".format(ret.group(1)), dictionary) return return_dict
>>> from collections import OrderedDict >>> od = OrderedDict() >>> od["problem[q0][a]"]=1 >>> od["problem[q0][b][c]"]=2 >>> od["problem[q1][first]"]=1 >>> od["problem[q1][second]"]=2 >>> AdminCourseEditTask.dict_from_prefix("problem",od) OrderedDict([('q0', OrderedDict([('a', 1), ('b', OrderedDict([('c', 2)]))])), ('q1', OrderedDict([('first', 1), ('second', 2)]))])
13,168
def OnPasteFormat(self, event): with undo.group(_("Paste format")): self.grid.actions.paste_format() self.grid.ForceRefresh() self.grid.update_attribute_toolbar() self.grid.actions.zoom()
Paste format event handler
13,169
def iteritems(self, indices=None): if indices is None: indices = force_list(self.indices.keys()) for x in self.itervalues(indices): yield x
Iterate through items in the ``indices`` (defaults to all indices)
13,170
def gen_cartesian_product(*args): if not args: return [] elif len(args) == 1: return args[0] product_list = [] for product_item_tuple in itertools.product(*args): product_item_dict = {} for item in product_item_tuple: product_item_dict.update(item) product_list.append(product_item_dict) return product_list
generate cartesian product for lists Args: args (list of list): lists to be generated with cartesian product Returns: list: cartesian product in list Examples: >>> arg1 = [{"a": 1}, {"a": 2}] >>> arg2 = [{"x": 111, "y": 112}, {"x": 121, "y": 122}] >>> args = [arg1, arg2] >>> gen_cartesian_product(*args) >>> # same as below >>> gen_cartesian_product(arg1, arg2) [ {'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122} ]
13,171
def AddContract(self, contract): super(UserWallet, self).AddContract(contract) try: db_contract = Contract.get(ScriptHash=contract.ScriptHash.ToBytes()) db_contract.delete_instance() except Exception as e: logger.debug("contract does not exist yet") sh = bytes(contract.ScriptHash.ToArray()) address, created = Address.get_or_create(ScriptHash=sh) address.IsWatchOnly = False address.save() db_contract = Contract.create(RawData=contract.ToArray(), ScriptHash=contract.ScriptHash.ToBytes(), PublicKeyHash=contract.PublicKeyHash.ToBytes(), Address=address, Account=self.__dbaccount) logger.debug("Creating db contract %s " % db_contract) db_contract.save()
Add a contract to the database. Args: contract(neo.SmartContract.Contract): a Contract instance.
13,172
def load_conf(cfg_path): global config try: cfg = open(cfg_path, ) except Exception as ex: if verbose: print("Unable to open {0}".format(cfg_path)) print(str(ex)) return False cfg_json = cfg.read() cfg.close() try: config = json.loads(cfg_json) except Exception as ex: print("Unable to parse configuration file as JSON") print(str(ex)) return False return True
Try to load the given conf file.
13,173
def add_keywords_from_list(self, keyword_list): if not isinstance(keyword_list, list): raise AttributeError("keyword_list should be a list") for keyword in keyword_list: self.add_keyword(keyword)
To add keywords from a list Args: keyword_list (list(str)): List of keywords to add Examples: >>> keyword_processor.add_keywords_from_list(["java", "python"]}) Raises: AttributeError: If `keyword_list` is not a list.
13,174
def start_adc_comparator(self, channel, high_threshold, low_threshold, gain=1, data_rate=None, active_low=True, traditional=True, latching=False, num_readings=1): assert 0 <= channel <= 3, return self._read_comparator(channel + 0x04, gain, data_rate, ADS1x15_CONFIG_MODE_CONTINUOUS, high_threshold, low_threshold, active_low, traditional, latching, num_readings)
Start continuous ADC conversions on the specified channel (0-3) with the comparator enabled. When enabled the comparator to will check if the ADC value is within the high_threshold & low_threshold value (both should be signed 16-bit integers) and trigger the ALERT pin. The behavior can be controlled by the following parameters: - active_low: Boolean that indicates if ALERT is pulled low or high when active/triggered. Default is true, active low. - traditional: Boolean that indicates if the comparator is in traditional mode where it fires when the value is within the threshold, or in window mode where it fires when the value is _outside_ the threshold range. Default is true, traditional mode. - latching: Boolean that indicates if the alert should be held until get_last_result() is called to read the value and clear the alert. Default is false, non-latching. - num_readings: The number of readings that match the comparator before triggering the alert. Can be 1, 2, or 4. Default is 1. Will return an initial conversion result, then call the get_last_result() function continuously to read the most recent conversion result. Call stop_adc() to stop conversions.
13,175
def po_to_ods(languages, locale_root, po_files_path, temp_file_path): title_row = [, , ] title_row += map(lambda s: s + , languages) ods = ODS() _prepare_ods_columns(ods, title_row) po_files = _get_all_po_filenames(locale_root, languages[0], po_files_path) i = 1 for po_filename in po_files: po_file_path = os.path.join(locale_root, languages[0], po_files_path, po_filename) start_row = i po = polib.pofile(po_file_path) for entry in po: meta = dict(entry.__dict__) meta.pop(, None) meta.pop(, None) meta.pop(, None) ods.content.getSheet(1) ods.content.getCell(0, i).stringValue( str(meta)).setCellColor(settings.EVEN_COLUMN_BG_COLOR) ods.content.getSheet(0) ods.content.getCell(0, i) \ .stringValue(po_filename) \ .setCellColor(settings.ODD_COLUMN_BG_COLOR) ods.content.getCell(1, i) \ .stringValue(_escape_apostrophe(entry.tcomment)) \ .setCellColor(settings.ODD_COLUMN_BG_COLOR) ods.content.getCell(2, i) \ .stringValue(_escape_apostrophe(entry.msgid)) \ .setCellColor(settings.EVEN_COLUMN_BG_COLOR) ods.content.getCell(3, i) \ .stringValue(_escape_apostrophe(entry.msgstr))\ .setCellColor(settings.ODD_COLUMN_BG_COLOR) i += 1 _write_trans_into_ods(ods, languages, locale_root, po_files_path, po_filename, start_row) ods.save(temp_file_path)
Converts po file to csv GDocs spreadsheet readable format. :param languages: list of language codes :param locale_root: path to locale root folder containing directories with languages :param po_files_path: path from lang directory to po file :param temp_file_path: path where temporary files will be saved
13,176
def ParseOptions(cls, options, analysis_plugin): if not isinstance(analysis_plugin, viper.ViperAnalysisPlugin): raise errors.BadConfigObject( ) lookup_hash = cls._ParseStringOption( options, , default_value=cls._DEFAULT_HASH) analysis_plugin.SetLookupHash(lookup_hash) host = cls._ParseStringOption( options, , default_value=cls._DEFAULT_HOST) analysis_plugin.SetHost(host) port = cls._ParseNumericOption( options, , default_value=cls._DEFAULT_PORT) analysis_plugin.SetPort(port) protocol = cls._ParseStringOption( options, , default_value=cls._DEFAULT_PROTOCOL) protocol = protocol.lower().strip() analysis_plugin.SetProtocol(protocol) if not analysis_plugin.TestConnection(): raise errors.BadConfigOption( .format(host, port))
Parses and validates options. Args: options (argparse.Namespace): parser options. analysis_plugin (ViperAnalysisPlugin): analysis plugin to configure. Raises: BadConfigObject: when the output module object is of the wrong type. BadConfigOption: when unable to connect to Viper instance.
13,177
def to_string(value, ctx): if isinstance(value, bool): return "TRUE" if value else "FALSE" elif isinstance(value, int): return str(value) elif isinstance(value, Decimal): return format_decimal(value) elif isinstance(value, str): return value elif type(value) == datetime.date: return value.strftime(ctx.get_date_format(False)) elif isinstance(value, datetime.time): return value.strftime() elif isinstance(value, datetime.datetime): return value.astimezone(ctx.timezone).isoformat() raise EvaluationError("Can%s' to a string" % str(value))
Tries conversion of any value to a string
13,178
def start(self, historics_id): return self.request.post(, data=dict(id=historics_id))
Start the historics job with the given ID. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/historicsstart :param historics_id: hash of the job to start :type historics_id: str :return: dict of REST API output with headers attached :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
13,179
def authorize(): auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET) url = auth.get_authorization_url() print(.format(url)) webbrowser.open(url) pin = input().strip() token_key, token_secret = auth.get_access_token(verifier=pin) return OAuthToken(token_key, token_secret)
Authorize to twitter. Use PIN authentification. :returns: Token for authentificate with Twitter. :rtype: :class:`autotweet.twitter.OAuthToken`
13,180
def install_extensions(extensions, **connection_parameters): from postpy.connections import connect conn = connect(**connection_parameters) conn.autocommit = True for extension in extensions: install_extension(conn, extension)
Install Postgres extension if available. Notes ----- - superuser is generally required for installing extensions. - Currently does not support specific schema.
13,181
def _compile(cls, lines): m = cls.RE_FOR.match(lines.current) if m is None: raise DefineBlockError( .format(lines.pos, lines.current)) return m.group(1), m.group(2).replace(, )
Return both variable names used in the #for loop in the current line.
13,182
def get_loader(vm_, **kwargs): * conn = __get_conn(**kwargs) loader = _get_loader(_get_domain(conn, vm_)) conn.close() return loader
Returns the information on the loader for a given vm :param vm_: name of the domain :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults CLI Example: .. code-block:: bash salt '*' virt.get_loader <domain> .. versionadded:: 2019.2.0
13,183
def get_default_config(self): config = super(MonitCollector, self).get_default_config() config.update({ : , : 2812, : , : , : , : [], : False, }) return config
Returns the default collector settings
13,184
def _epd_residual(coeffs, mags, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd): f = _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd) residual = mags - f return residual
This is the residual function to minimize using scipy.optimize.leastsq.
13,185
def add(self, post_id): post_data = self.get_post_data() post_data[] = self.userinfo.user_name post_data[] = self.userinfo.uid post_data[] = post_id replyid = MReply.create_reply(post_data) if replyid: out_dic = {: post_data[], : replyid} logger.info(.format(out_dic)) return json.dump(out_dic, self)
Adding reply to a post.
13,186
def add_bias(self, name, b, input_name, output_name, shape_bias = [1]): spec = self.spec nn_spec = self.nn_spec spec_layer = nn_spec.layers.add() spec_layer.name = name spec_layer.input.append(input_name) spec_layer.output.append(output_name) spec_layer_params = spec_layer.bias bias = spec_layer_params.bias spec_layer_params.shape.extend(shape_bias) if isinstance(b, int): bias.floatValue.append(float(b)) else: bias.floatValue.extend(map(float, b.flatten())) if len(bias.floatValue) != np.prod(shape_bias): raise ValueError("Dimensions of do not match the size of the provided parameter")
Add bias layer to the model. Parameters ---------- name: str The name of this layer. b: int | numpy.array Bias to add to the input. input_name: str The input blob name of this layer. output_name: str The output blob name of this layer. shape_bias: [int] List of ints that specifies the shape of the bias parameter (if present). Can be [1] or [C] or [1,H,W] or [C,H,W]. See Also -------- add_scale
13,187
def receive_request(self, transaction): if transaction.request.observe == 0: host, port = transaction.request.source key_token = hash(str(host) + str(port) + str(transaction.request.token)) non_counter = 0 if key_token in self._relations: allowed = True else: allowed = False self._relations[key_token] = ObserveItem(time.time(), non_counter, allowed, transaction) elif transaction.request.observe == 1: host, port = transaction.request.source key_token = hash(str(host) + str(port) + str(transaction.request.token)) logger.info("Remove Subscriber") try: del self._relations[key_token] except KeyError: pass return transaction
Manage the observe option in the request end eventually initialize the client for adding to the list of observers or remove from the list. :type transaction: Transaction :param transaction: the transaction that owns the request :rtype : Transaction :return: the modified transaction
13,188
def get_route_shape_segments(cur, route_id): cur.execute(, (route_id,)) shape_points = [dict(seq=row[0], lat=row[1], lon=row[2]) for row in cur] return shape_points
Given a route_id, return its stop-sequence. Parameters ---------- cur: sqlite3.Cursor cursor to a GTFS database route_id: str id of the route Returns ------- shape_points: list elements are dictionaries containing the 'seq', 'lat', and 'lon' of the shape
13,189
def info(path): output, err = cli_syncthing_adapter.info(folder=path) if err: click.echo(output, err=err) else: stat = output[] click.echo("State: %s" % stat[]) click.echo("\nTotal Files: %s" % stat[]) click.echo("Files Needed: %s" % stat[]) click.echo("\nTotal Bytes: %s" % stat[]) click.echo("Bytes Needed: %s" % stat[]) progress = output[][] queued = output[][] rest = output[][] if len(progress) or len(queued) or len(rest): click.echo("\nFiles Needed:") for f in progress: click.echo(" " + f[]) for f in queued: click.echo(" " + f[]) for f in rest: click.echo(" " + f[]) click.echo("\nDevices Authorized:\n%s" % output[])
Display synchronization information.
13,190
def parse_response(self, connection, command_name, **options): response = connection.read_response() if command_name in self.response_callbacks and len(response): status = nativestr(response[0]) if status == RES_STATUS.OK: return self.response_callbacks[command_name](response[1:], **options) elif status == RES_STATUS.NOT_FOUND: return None else: raise DataError(RES_STATUS_MSG[status]+.join(response)) return response
Parses a response from the ssdb server
13,191
def send_media_group( self, media: str, disable_notification: bool = False, reply_to_message_id: int = None, **options ): return self.bot.api_call( "sendMediaGroup", chat_id=str(self.id), media=media, disable_notification=disable_notification, reply_to_message_id=reply_to_message_id, **options )
Send a group of photos or videos as an album :param media: A JSON-serialized array describing photos and videos to be sent, must include 2–10 items :param disable_notification: Sends the messages silently. Users will receive a notification with no sound. :param reply_to_message_id: If the messages are a reply, ID of the original message :param options: Additional sendMediaGroup options (see https://core.telegram.org/bots/api#sendmediagroup) :Example: >>> from json import dumps >>> photos_urls = [ >>> "https://telegram.org/img/t_logo.png", >>> "https://telegram.org/img/SiteAndroid.jpg?1", >>> "https://telegram.org/img/SiteiOs.jpg?1", >>> "https://telegram.org/img/SiteWP.jpg?2" >>> ] >>> tg_album = [] >>> count = len(photos_urls) >>> for i, p in enumerate(photos_urls): >>> { >>> 'type': 'photo', >>> 'media': p, >>> 'caption': f'{i} of {count}' >>> } >>> await chat.send_media_group(dumps(tg_album))
13,192
def displayEmptyInputWarningBox(display=True, parent=None): if sys.version_info[0] >= 3: from tkinter.messagebox import showwarning else: from tkMessageBox import showwarning if display: msg = +\ showwarning(parent=parent,message=msg, title="No valid inputs!") return "yes"
Displays a warning box for the 'input' parameter.
13,193
def _main(args): if not args.apikey: print("\nPlease provide TinyPNG API key") print("To obtain key visit https://api.tinypng.com/developers\n") sys.exit(1) input_dir = realpath(args.input) if not args.output: output_dir = input_dir + "-output" else: output_dir = realpath(args.output) if input_dir == output_dir: print("\nPlease specify different output directory\n") sys.exit(1) handler = ScreenHandler() try: process_directory(input_dir, output_dir, args.apikey, handler) except KeyboardInterrupt: handler.on_finish(output_dir=output_dir)
Batch compression. args contains: * input - path to input directory * output - path to output directory or None * apikey - TinyPNG API key * overwrite - boolean flag
13,194
def setup_ui(self, ): grid = QtGui.QGridLayout(self) grid.setContentsMargins(0, 0, 0, 0) self.setLayout(grid)
Create the layouts and set some attributes of the ui :returns: None :rtype: None :raises: None
13,195
def construct_mail(self): canonical_format = self.body.encode() textpart = MIMEText(canonical_format, , ) if self.attachments: inner_msg = MIMEMultipart() inner_msg.attach(textpart) for a in self.attachments: inner_msg.attach(a.get_mime_representation()) else: inner_msg = textpart if self.sign: plaintext = inner_msg.as_bytes(policy=email.policy.SMTP) logging.debug(, plaintext) try: signatures, signature_str = crypto.detached_signature_for( plaintext, [self.sign_key]) if len(signatures) != 1: raise GPGProblem("Could not sign message (GPGME " "did not return a signature)", code=GPGCode.KEY_CANNOT_SIGN) except gpg.errors.GPGMEError as e: if e.getcode() == gpg.errors.BAD_PASSPHRASE: if os.environ.get(, ).strip() == : msg = "Got invalid passphrase and GPG_AGENT_INFO\ not set. Please set up gpg-agent." raise GPGProblem(msg, code=GPGCode.BAD_PASSPHRASE) else: raise GPGProblem("Bad passphrase. Is gpg-agent " "running?", code=GPGCode.BAD_PASSPHRASE) raise GPGProblem(str(e), code=GPGCode.KEY_CANNOT_SIGN) micalg = crypto.RFC3156_micalg_from_algo(signatures[0].hash_algo) unencrypted_msg = MIMEMultipart( , micalg=micalg, protocol=) stype = signature_mime = MIMEApplication( _data=signature_str.decode(), _subtype=stype, _encoder=encode_7or8bit) signature_mime[] = signature_mime.set_charset() unencrypted_msg.attach(inner_msg) unencrypted_msg.attach(signature_mime) unencrypted_msg[] = else: unencrypted_msg = inner_msg if self.encrypt: plaintext = unencrypted_msg.as_bytes(policy=email.policy.SMTP) logging.debug(, plaintext) try: encrypted_str = crypto.encrypt( plaintext, list(self.encrypt_keys.values())) except gpg.errors.GPGMEError as e: raise GPGProblem(str(e), code=GPGCode.KEY_CANNOT_ENCRYPT) outer_msg = MIMEMultipart(, protocol=) version_str = encryption_mime = MIMEApplication(_data=version_str, _subtype=, _encoder=encode_7or8bit) encryption_mime.set_charset() encrypted_mime = MIMEApplication( _data=encrypted_str.decode(), _subtype=, _encoder=encode_7or8bit) encrypted_mime.set_charset() outer_msg.attach(encryption_mime) outer_msg.attach(encrypted_mime) else: outer_msg = unencrypted_msg headers = self.headers.copy() if not in headers: headers[] = [email.utils.make_msgid()] if in headers: uastring_format = headers[][0] else: uastring_format = settings.get().strip() uastring = uastring_format.format(version=__version__) if uastring: headers[] = [uastring] for k, vlist in headers.items(): for v in vlist: outer_msg.add_header(k, v) return outer_msg
compiles the information contained in this envelope into a :class:`email.Message`.
13,196
def dict_merge(a, b, dict_boundary): if not isinstance(b, dict): return b result = deepcopy(a) for k, v in b.iteritems(): exploded_k = k.split(dict_boundary) if len(exploded_k) > 1: new_dict = None for key in reversed(exploded_k): if not key: continue if not new_dict: new_dict = OrderedDict([(key, v)]) else: new_dict = OrderedDict([(key, deepcopy(new_dict))]) result = dict_merge(result, new_dict, dict_boundary) elif k in result and isinstance(result[k], dict): result[k] = dict_merge(result[k], v, dict_boundary) else: result[k] = deepcopy(v) return result
Recursively merges dicts. not just simple a['key'] = b['key'], if both a and b have a key who's value is a dict then dict_merge is called on both values and the result stored in the returned dictionary. Also, if keys contain `self._dict_boundary`, they will be split into sub dictionaries. :param a: :param b: :return:
13,197
def init_common(app): if app.config[]: security_ext = app.extensions[] security_ext.confirm_register_form = confirm_register_form_factory( security_ext.confirm_register_form) security_ext.register_form = register_form_factory( security_ext.register_form)
Post initialization.
13,198
def _attribute_iterator(self, mapped_class, key): for attr in \ itervalues_(self.__get_attribute_map(mapped_class, key, 0)): if self.is_pruning: do_ignore = attr.should_ignore(key) else: do_ignore = False if not do_ignore: yield attr
Returns an iterator over the attributes in this mapping for the given mapped class and attribute key. If this is a pruning mapping, attributes that are ignored because of a custom configuration or because of the default ignore rules are skipped.
13,199
def __prepare_dataset_parameter(self, dataset): if not isinstance(dataset, _SFrame): def raise_dataset_type_exception(): raise TypeError("The dataset parameter must be either an SFrame, " "or a dictionary of (str : list) or (str : value).") if type(dataset) is dict: if not all(type(k) is str for k in _six.iterkeys(dataset)): raise_dataset_type_exception() if all(type(v) in (list, tuple, _array.array) for v in _six.itervalues(dataset)): dataset = _SFrame(dataset) else: dataset = _SFrame({k : [v] for k, v in _six.iteritems(dataset)}) else: raise_dataset_type_exception() return dataset
Processes the dataset parameter for type correctness. Returns it as an SFrame.