Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
14,800
def get_corpus_path(name: str) -> [str, None]: db = TinyDB(corpus_db_path()) temp = Query() if len(db.search(temp.name == name)) > 0: path = get_full_data_path(db.search(temp.name == name)[0]["file"]) db.close() if not os.path.exists(path): download(name) return path return None
Get corpus path :param string name: corpus name
14,801
def completion_acd(edm, X0, W=None, tol=1e-6, sweeps=3): from .algorithms import reconstruct_acd Xhat, costs = reconstruct_acd(edm, X0, W, tol=tol, sweeps=sweeps) return get_edm(Xhat)
Complete an denoise EDM using alternating decent. The idea here is to simply run reconstruct_acd for a few iterations, yieding a position estimate, which can in turn be used to get a completed and denoised edm. :param edm: noisy matrix (NxN) :param X0: starting points (Nxd) :param W: optional weight matrix. :param tol: Stopping criterion of iterative algorithm. :param sweeps: Maximum number of sweeps.
14,802
def post(self, url, headers=None, params=None, **kwargs): if len(kwargs) > 1: raise InvalidArgumentsError("Too many extra args ({} > 1)".format( len(kwargs))) if kwargs: kwarg = next(iter(kwargs)) if kwarg not in ("json", "data"): raise InvalidArgumentsError("Invalid kwarg: " + kwarg) resp = self.session.post(url, headers=headers, params=params, **kwargs) resp.raise_for_status() return _to_json(resp)
Send a JSON POST request with the given request headers, additional URL query parameters, and the given JSON in the request body. The extra query parameters are merged with any which already exist in the URL. The 'json' and 'data' parameters may not both be given. Args: url (str): URL to retrieve headers (dict): Any other headers to be added to the request. params: dictionary or bytes to be sent in the query string for the request. (optional) json: json to send in the body of the Request. This must be a JSON-serializable object. (optional) data: raw request body data. May be a dictionary, list of tuples, bytes, or file-like object to send in the body of the Request. (optional)
14,803
def is_dtype(cls, dtype): dtype = getattr(dtype, , dtype) if isinstance(dtype, (ABCSeries, ABCIndexClass, ABCDataFrame, np.dtype)): return False elif dtype is None: return False elif isinstance(dtype, cls): return True try: return cls.construct_from_string(dtype) is not None except TypeError: return False
Check if we match 'dtype'. Parameters ---------- dtype : object The object to check. Returns ------- is_dtype : bool Notes ----- The default implementation is True if 1. ``cls.construct_from_string(dtype)`` is an instance of ``cls``. 2. ``dtype`` is an object and is an instance of ``cls`` 3. ``dtype`` has a ``dtype`` attribute, and any of the above conditions is true for ``dtype.dtype``.
14,804
def start(self): self.manager = multiprocessing.Manager() self.policies = self.manager.dict() policies = copy.deepcopy(operation_policy.policies) for policy_name, policy_set in six.iteritems(policies): self.policies[policy_name] = policy_set self.policy_monitor = monitor.PolicyDirectoryMonitor( self.config.settings.get(), self.policies, self.live_policies ) def interrupt_handler(trigger, frame): self.policy_monitor.stop() signal.signal(signal.SIGINT, interrupt_handler) signal.signal(signal.SIGTERM, interrupt_handler) self.policy_monitor.start() self._engine = engine.KmipEngine( policies=self.policies, database_path=self.config.settings.get() ) self._logger.info("Starting server socket handler.") socket.setdefaulttimeout(10) self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._logger.debug( "Configured cipher suites: {0}".format( len(self.config.settings.get()) ) ) for cipher in self.config.settings.get(): self._logger.debug(cipher) auth_suite_ciphers = self.auth_suite.ciphers.split() self._logger.debug( "Authentication suite ciphers to use: {0}".format( len(auth_suite_ciphers) ) ) for cipher in auth_suite_ciphers: self._logger.debug(cipher) self._socket = ssl.wrap_socket( self._socket, keyfile=self.config.settings.get(), certfile=self.config.settings.get(), server_side=True, cert_reqs=ssl.CERT_REQUIRED, ssl_version=self.auth_suite.protocol, ca_certs=self.config.settings.get(), do_handshake_on_connect=False, suppress_ragged_eofs=True, ciphers=self.auth_suite.ciphers ) try: self._socket.bind( ( self.config.settings.get(), int(self.config.settings.get()) ) ) except Exception as e: self._logger.exception(e) raise exceptions.NetworkingError( "Server failed to bind socket handler to {0}:{1}".format( self.config.settings.get(), self.config.settings.get() ) ) else: self._logger.info( "Server successfully bound socket handler to {0}:{1}".format( self.config.settings.get(), self.config.settings.get() ) ) self._is_serving = True
Prepare the server to start serving connections. Configure the server socket handler and establish a TLS wrapping socket from which all client connections descend. Bind this TLS socket to the specified network address for the server. Raises: NetworkingError: Raised if the TLS socket cannot be bound to the network address.
14,805
def combining_search(self): start = ( self.get_pair(), ( self.cube["L"], self.cube["U"], self.cube["F"], self.cube["D"], self.cube["R"], self.cube["B"], ), ) return sum(path_actions(a_star_search(start, self.combining_successors, lambda x: len(x), self.combining_goal)), Formula())
Searching the path for combining the pair.
14,806
def run_procedure(self, process_number, std_vs_mfg, params=): seqnum = random.randint(2, 254) self.logger.info( + str(process_number) + + hex(process_number) + + str(seqnum) + + hex(seqnum) + ) procedure_request = C1219ProcedureInit(self.c1219_endian, process_number, std_vs_mfg, 0, seqnum, params).build() self.set_table_data(7, procedure_request) response = self.get_table_data(8) if response[:3] == procedure_request[:3]: return response[3], response[4:] else: self.logger.error() raise C1219ProcedureError()
Initiate a C1219 procedure, the request is written to table 7 and the response is read from table 8. :param int process_number: The numeric procedure identifier (0 <= process_number <= 2047). :param bool std_vs_mfg: Whether the procedure is manufacturer specified or not. True is manufacturer specified. :param bytes params: The parameters to pass to the procedure initiation request. :return: A tuple of the result code and the response data. :rtype: tuple
14,807
def _nonzero(self): nonzeros = np.nonzero(self.data) return tuple(Variable((dim), nz) for nz, dim in zip(nonzeros, self.dims))
Equivalent numpy's nonzero but returns a tuple of Varibles.
14,808
def println(msg): sys.stdout.write(msg) sys.stdout.flush() sys.stdout.write( * len(msg)) sys.stdout.flush()
Convenience function to print messages on a single line in the terminal
14,809
def get_views_traffic(self, per=github.GithubObject.NotSet): assert per is github.GithubObject.NotSet or (isinstance(per, (str, unicode)) and (per == "day" or per == "week")), "per must be day or week, day by default" url_parameters = dict() if per is not github.GithubObject.NotSet: url_parameters["per"] = per headers, data = self._requester.requestJsonAndCheck( "GET", self.url + "/traffic/views", parameters=url_parameters ) if (isinstance(data, dict)) and ("views" in data) and (isinstance(data["views"], list)): data["views"] = [ github.View.View(self._requester, headers, item, completed=True) for item in data["views"] ] return data
:calls: `GET /repos/:owner/:repo/traffic/views <https://developer.github.com/v3/repos/traffic/>`_ :param per: string, must be one of day or week, day by default :rtype: None or list of :class:`github.View.View`
14,810
async def delete(self, turn_context: TurnContext) -> None: if turn_context == None: raise TypeError() turn_context.turn_state.pop(self._context_service_key) storage_key = self.get_storage_key(turn_context) await self._storage.delete({ storage_key })
Delete any state currently stored in this state scope. :param turn_context: The context object for this turn. :return: None
14,811
def dry_run_from_args(args: argparse.Namespace): parameter_path = args.param_path serialization_dir = args.serialization_dir overrides = args.overrides params = Params.from_file(parameter_path, overrides) dry_run_from_params(params, serialization_dir)
Just converts from an ``argparse.Namespace`` object to params.
14,812
def _compute_ll(self): self.fracs = [] self.logP = [] self.ll = [] for i in range(self.width): Dll = {: 0, : 0, : 0, : 0} Df = {: 0, : 0, : 0, : 0} DlogP= {: 0, : 0, : 0, : 0} for key in self.counts[i].keys(): Pij = self.counts[i][key] / float(self.nseqs) Df [key] = Pij Dll[key] = (math.log( (self.counts[i][key] + self.bgscale*self.background[key] ) / ((self.nseqs + self.bgscale) * self.background[key]) ) / math.log(2)) if Pij > 0: DlogP[key] = math.log(Pij)/math.log(2) else: DlogP[key] = -100 self.fracs.append(Df) self.logP.append (DlogP) self.ll.append (Dll) self.P = self.fracs self._compute_bits() self._compute_ambig_ll() self._maxscore()
m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix
14,813
def set_options(self, **kw): r for k, v in kw.iteritems(): if k in self.__options: self.__options[k] = v
r"""Set Parser options. .. seealso:: ``kw`` argument have the same meaning as in :func:`lazyxml.loads`
14,814
def create_job_flow(self, job_flow_overrides): if not self.emr_conn_id: raise AirflowException() emr_conn = self.get_connection(self.emr_conn_id) config = emr_conn.extra_dejson.copy() config.update(job_flow_overrides) response = self.get_conn().run_job_flow(**config) return response
Creates a job flow using the config from the EMR connection. Keys of the json extra hash may have the arguments of the boto3 run_job_flow method. Overrides for this config may be passed as the job_flow_overrides.
14,815
def check_py(self, version, name, original, loc, tokens): internal_assert(len(tokens) == 1, "invalid " + name + " tokens", tokens) if self.target_info < get_target_info(version): raise self.make_err(CoconutTargetError, "found Python " + ".".join(version) + " " + name, original, loc, target=version) else: return tokens[0]
Check for Python-version-specific syntax.
14,816
def default_sort_key(item, order=None): from sympy.core import S, Basic from sympy.core.sympify import sympify, SympifyError from sympy.core.compatibility import iterable if isinstance(item, Basic): return item.sort_key(order=order) if iterable(item, exclude=string_types): if isinstance(item, dict): args = item.items() unordered = True elif isinstance(item, set): args = item unordered = True else: args = list(item) unordered = False args = [default_sort_key(arg, order=order) for arg in args] if unordered: args = sorted(args) cls_index, args = 10, (len(args), tuple(args)) else: if not isinstance(item, string_types): try: item = sympify(item) except SympifyError: pass else: if isinstance(item, Basic): return default_sort_key(item) cls_index, args = 0, (1, (str(item),)) return (cls_index, 0, item.__class__.__name__ ), args, S.One.sort_key(), S.One
Return a key that can be used for sorting. The key has the structure: (class_key, (len(args), args), exponent.sort_key(), coefficient) This key is supplied by the sort_key routine of Basic objects when ``item`` is a Basic object or an object (other than a string) that sympifies to a Basic object. Otherwise, this function produces the key. The ``order`` argument is passed along to the sort_key routine and is used to determine how the terms *within* an expression are ordered. (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex', and reversed values of the same (e.g. 'rev-lex'). The default order value is None (which translates to 'lex'). Examples ======== >>> from sympy import S, I, default_sort_key >>> from sympy.core.function import UndefinedFunction >>> from sympy.abc import x The following are equivalent ways of getting the key for an object: >>> x.sort_key() == default_sort_key(x) True Here are some examples of the key that is produced: >>> default_sort_key(UndefinedFunction('f')) ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key('1') ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1) >>> default_sort_key(S.One) ((1, 0, 'Number'), (0, ()), (), 1) >>> default_sort_key(2) ((1, 0, 'Number'), (0, ()), (), 2) While sort_key is a method only defined for SymPy objects, default_sort_key will accept anything as an argument so it is more robust as a sorting key. For the following, using key= lambda i: i.sort_key() would fail because 2 doesn't have a sort_key method; that's why default_sort_key is used. Note, that it also handles sympification of non-string items likes ints: >>> a = [2, I, -I] >>> sorted(a, key=default_sort_key) [2, -I, I] The returned key can be used anywhere that a key can be specified for a function, e.g. sort, min, max, etc...: >>> a.sort(key=default_sort_key); a[0] 2 >>> min(a, key=default_sort_key) 2 Note ---- The key returned is useful for getting items into a canonical order that will be the same across platforms. It is not directly useful for sorting lists of expressions: >>> a, b = x, 1/x Since ``a`` has only 1 term, its value of sort_key is unaffected by ``order``: >>> a.sort_key() == a.sort_key('rev-lex') True If ``a`` and ``b`` are combined then the key will differ because there are terms that can be ordered: >>> eq = a + b >>> eq.sort_key() == eq.sort_key('rev-lex') False >>> eq.as_ordered_terms() [x, 1/x] >>> eq.as_ordered_terms('rev-lex') [1/x, x] But since the keys for each of these terms are independent of ``order``'s value, they don't sort differently when they appear separately in a list: >>> sorted(eq.args, key=default_sort_key) [1/x, x] >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex')) [1/x, x] The order of terms obtained when using these keys is the order that would be obtained if those terms were *factors* in a product. See Also ======== sympy.core.expr.as_ordered_factors, sympy.core.expr.as_ordered_terms
14,817
def show_condition_operators(self, condition): permitted_operators = self.savedsearch.conditions_operators.get(condition) permitted_operators_list = set( [self.savedsearch.operators.get(op) for op in permitted_operators] ) return permitted_operators_list
Show available operators for a given saved search condition
14,818
def _create_at(self, timestamp=None, id=None, forced_identity=None, **kwargs): id = Versionable.uuid(id) if forced_identity: ident = Versionable.uuid(forced_identity) else: ident = id if timestamp is None: timestamp = get_utc_now() kwargs[] = id kwargs[] = ident kwargs[] = timestamp kwargs[] = timestamp return super(VersionManager, self).create(**kwargs)
WARNING: Only for internal use and testing. Create a Versionable having a version_start_date and version_birth_date set to some pre-defined timestamp :param timestamp: point in time at which the instance has to be created :param id: version 4 UUID unicode object. Usually this is not specified, it will be automatically created. :param forced_identity: version 4 UUID unicode object. For internal use only. :param kwargs: arguments needed for initializing the instance :return: an instance of the class
14,819
def logout(self): if self._logged_in is True: self.si.flush_cache() self.sc.sessionManager.Logout() self._logged_in = False
Logout of a vSphere server.
14,820
def extract(group_id, access_token, fields=None): fields = fields or [, , , , ] assert set(fields).issubset(VALID_FIELDS) get_args = {: .join(fields), : access_token} get_args_str = .join( [.format(x, y) for x, y in get_args.items()]) base_url = .format( group_id, get_args_str) logr.debug(.format(base_url)) response = rq.get(base_url) r_json = response.json() subscribers = r_json.get() return subscribers
FIXME: DOCS... Links: * https://developers.facebook.com/tools/explorer/
14,821
def remove_child_family(self, family_id, child_id): if self._catalog_session is not None: return self._catalog_session.remove_child_catalog(catalog_id=family_id, child_id=child_id) return self._hierarchy_session.remove_child(id_=family_id, child_id=child_id)
Removes a child from a family. arg: family_id (osid.id.Id): the ``Id`` of a family arg: child_id (osid.id.Id): the ``Id`` of the new child raise: NotFound - ``family_id`` not a parent of ``child_id`` raise: NullArgument - ``family_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
14,822
def apply_templates(toks, templates): for template in templates: name = .join([ % (f, o) for f, o in template]) for t in range(len(toks)): values_list = [] for field, offset in template: p = t + offset if p < 0 or p >= len(toks): values_list = [] break if field in toks[p]: value = toks[p][field] values_list.append(value if isinstance(value, (set, list)) else [value]) if len(template) == len(values_list): for values in product(*values_list): toks[t][].append( % (name, .join(values)))
Generate features for an item sequence by applying feature templates. A feature template consists of a tuple of (name, offset) pairs, where name and offset specify a field name and offset from which the template extracts a feature value. Generated features are stored in the 'F' field of each item in the sequence. Parameters ---------- toks: list of tokens A list of processed toknes. templates: list of template tuples (str, int) A feature template consists of a tuple of (name, offset) pairs, where name and offset specify a field name and offset from which the template extracts a feature value.
14,823
def commit_branches(sha1): cmd = .format(sha1) return shell.run( cmd, capture=True, never_pretend=True ).stdout.strip().split()
Get the name of the branches that this commit belongs to.
14,824
def qhalfx(self): if self.__qhalfx is None: self.log("qhalfx") self.__qhalfx = self.qhalf * self.jco self.log("qhalfx") return self.__qhalfx
get the half normal matrix attribute. Create the attribute if it has not yet been created Returns ------- qhalfx : pyemu.Matrix
14,825
def get_end(pos, alt, category, snvend=None, svend=None, svlen=None): end = pos if category in (, , ): end = snvend elif category == : end = svend if svend == pos: if svlen: end = pos + svlen if in alt: match = BND_ALT_PATTERN.match(alt) if match: end = int(match.group(2)) return end
Return the end coordinate for a variant Args: pos(int) alt(str) category(str) snvend(str) svend(int) svlen(int) Returns: end(int)
14,826
def create_or_update_record(data, pid_type, id_key, minter): resolver = Resolver( pid_type=pid_type, object_type=, getter=Record.get_record) try: pid, record = resolver.resolve(data[id_key]) data_c = deepcopy(data) del data_c[] record_c = deepcopy(record) del record_c[] if data_c != record_c: record.update(data) record.commit() record_id = record.id db.session.commit() RecordIndexer().index_by_id(str(record_id)) except PIDDoesNotExistError: record = Record.create(data) record_id = record.id minter(record.id, data) db.session.commit() RecordIndexer().index_by_id(str(record_id))
Register a funder or grant.
14,827
def get_versioned_delete_collector_class(): key = try: cls = _cache[key] except KeyError: collector_class_string = getattr(settings, key) cls = import_from_string(collector_class_string, key) _cache[key] = cls return cls
Gets the class to use for deletion collection. :return: class
14,828
def check_marker_kwargs(self, kwargs): text = kwargs.get("text", "") if not isinstance(text, str) and text is not None: raise TypeError("text argument is not of str type") for color in (item for item in (prefix + color for prefix in ["active_", "hover_", ""] for color in ["background", "foreground", "outline"])): value = kwargs.get(color, "") if value == "default": continue if not isinstance(value, str): raise TypeError("{} argument not of str type".format(color)) font = kwargs.get("font", ("default", 10)) if (not isinstance(font, tuple) or not len(font) > 0 or not isinstance(font[0], str)) and font != "default": raise ValueError("font argument is not a valid font tuple") for border in (prefix + "border" for prefix in ["active_", "hover_", ""]): border_v = kwargs.get(border, 0) if border_v == "default": continue if not isinstance(border_v, int) or border_v < 0: raise ValueError("{} argument is not of int type or smaller than zero".format(border)) iid = kwargs.get("iid", "-1") if not isinstance(iid, str): raise TypeError("iid argument not of str type") if iid == "": raise ValueError("iid argument empty string") for boolean_arg in ["move", "category_change", "allow_overlap", "snap_to_ticks"]: value = kwargs.get(boolean_arg, False) if value == "default": continue if not isinstance(value, bool): raise TypeError("{} argument is not of bool type".format(boolean_arg)) tags = kwargs.get("tags", ()) if not isinstance(tags, tuple): raise TypeError("tags argument is not of tuple type") for tag in tags: if not isinstance(tag, str): raise TypeError("one or more values in tags argument is not of str type") if tag not in self._tags: raise ValueError("unknown tag in tags argument")
Check the types of the keyword arguments for marker creation :param kwargs: dictionary of options for marker creation :type kwargs: dict :raises: TypeError, ValueError
14,829
def validate_extra_link(self, extra_link): if EXTRA_LINK_NAME_KEY not in extra_link or EXTRA_LINK_FORMATTER_KEY not in extra_link: raise Exception("Invalid extra.links format. " + "Extra link must include a and field") self.validated_formatter(extra_link[EXTRA_LINK_FORMATTER_KEY]) return extra_link
validate extra link
14,830
def blogroll(request, btype): response, site, cachekey = initview(request) if response: return response[0] template = loader.get_template(.format(btype)) ctx = dict() fjlib.get_extra_context(site, ctx) ctx = Context(ctx) response = HttpResponse( template.render(ctx), content_type= ) patch_vary_headers(response, []) fjcache.cache_set(site, cachekey, (response, ctx_get(ctx, ))) return response
View that handles the generation of blogrolls.
14,831
def visualize_detection(self, img, dets, classes=[], thresh=0.6): import matplotlib.pyplot as plt import random plt.imshow(img) height = img.shape[0] width = img.shape[1] colors = dict() for det in dets: (klass, score, x0, y0, x1, y1) = det if score < thresh: continue cls_id = int(klass) if cls_id not in colors: colors[cls_id] = (random.random(), random.random(), random.random()) xmin = int(x0 * width) ymin = int(y0 * height) xmax = int(x1 * width) ymax = int(y1 * height) rect = plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, fill=False, edgecolor=colors[cls_id], linewidth=3.5) plt.gca().add_patch(rect) class_name = str(cls_id) if classes and len(classes) > cls_id: class_name = classes[cls_id] plt.gca().text(xmin, ymin - 2, .format(class_name, score), bbox=dict(facecolor=colors[cls_id], alpha=0.5), fontsize=12, color=) plt.show()
visualize detections in one image Parameters: ---------- img : numpy.array image, in bgr format dets : numpy.array ssd detections, numpy.array([[id, score, x1, y1, x2, y2]...]) each row is one object classes : tuple or list of str class names thresh : float score threshold
14,832
def bgc(mag_file, dir_path=".", input_dir_path="", meas_file=, spec_file=, samp_file=, site_file=, loc_file=, append=False, location="unknown", site="", samp_con=, specnum=0, meth_code="LP-NO", volume=12, user="", timezone=, noave=False): version_num = pmag.get_version() input_dir_path, output_dir_path = pmag.fix_directories(input_dir_path, dir_path) samp_con = str(samp_con) specnum = - int(specnum) volume *= 1e-6 if "4" in samp_con: if "-" not in samp_con: print("option [4] must be in form 4-Z where Z is an integer") return False, "option [4] must be in form 4-Z where Z is an integer" else: Z = int(samp_con.split("-")[1]) samp_con = "4" if "7" in samp_con: if "-" not in samp_con: print("option [7] must be in form 7-Z where Z is an integer") return False, "option [7] must be in form 7-Z where Z is an integer" else: Z = int(samp_con.split("-")[1]) samp_con = "7" else: Z = 1 mag_file = os.path.join(input_dir_path, mag_file) if not os.path.isfile(mag_file): print("%s is not a BGC file" % mag_file) return False, print(, mag_file) pre_data = open(mag_file, ) line = pre_data.readline() line_items = line.split() specimen = line_items[2] specimen = specimen.replace(, ) line = pre_data.readline() line = pre_data.readline() line_items = line.split() azimuth = float(line_items[1]) dip = float(line_items[2]) bed_dip = line_items[3] sample_bed_azimuth = line_items[4] lon = line_items[5] lat = line_items[6] tmp_volume = line_items[7] if tmp_volume != 0.0: volume = float(tmp_volume) * 1e-6 pre_data.close() data = pd.read_csv(mag_file, sep=, header=3, index_col=False) cart = np.array([data[], data[], data[]]).transpose() direction = pmag.cart2dir(cart).transpose() data[] = direction[0] data[] = direction[1] data[] = direction[2] / 1000 data[] = (direction[2] / 1000) / \ volume MeasRecs, SpecRecs, SampRecs, SiteRecs, LocRecs = [], [], [], [], [] for rowNum, row in data.iterrows(): MeasRec, SpecRec, SampRec, SiteRec, LocRec = {}, {}, {}, {}, {} if specnum != 0: sample = specimen[:specnum] else: sample = specimen if site == : site = pmag.parse_site(sample, samp_con, Z) if specimen != "" and specimen not in [x[] if in list(x.keys()) else "" for x in SpecRecs]: SpecRec[] = specimen SpecRec[] = sample SpecRec[] = volume SpecRec[] = user SpecRec[] = SpecRecs.append(SpecRec) if sample != "" and sample not in [x[] if in list(x.keys()) else "" for x in SampRecs]: SampRec[] = sample SampRec[] = site SampRec[] = azimuth SampRec[] = dip SampRec[] = sample_bed_azimuth SampRec[] = bed_dip SampRec[] = meth_code SampRec[] = user SampRec[] = SampRecs.append(SampRec) if site != "" and site not in [x[] if in list(x.keys()) else "" for x in SiteRecs]: SiteRec[] = site SiteRec[] = location SiteRec[] = lat SiteRec[] = lon SiteRec[] = user SiteRec[] = SiteRecs.append(SiteRec) if location != "" and location not in [x[] if in list(x.keys()) else "" for x in LocRecs]: LocRec[] = location LocRec[] = user LocRec[] = LocRec[] = lat LocRec[] = lon LocRec[] = lat LocRec[] = lon LocRecs.append(LocRec) MeasRec[] = + \ str(row[]) + + str(row[]) if in row[]: datelist = row[].split() elif in row[]: datelist = row[].split() elif in row[]: datelist = row[].split() else: print( "unrecogized date formating on one of the measurement entries for specimen %s" % specimen) datelist = [, , ] if in row[]: timelist = row[].split() else: print( "unrecogized time formating on one of the measurement entries for specimen %s" % specimen) timelist = [, , ] datelist[2] = + \ datelist[2] if len(datelist[2]) <= 2 else datelist[2] dt = ":".join([datelist[1], datelist[0], datelist[2], timelist[0], timelist[1], timelist[2]]) local = pytz.timezone(timezone) naive = datetime.datetime.strptime(dt, "%m:%d:%Y:%H:%M:%S") local_dt = local.localize(naive, is_dst=None) utc_dt = local_dt.astimezone(pytz.utc) timestamp = utc_dt.strftime("%Y-%m-%dT%H:%M:%S")+"Z" MeasRec["timestamp"] = timestamp MeasRec["citations"] = "This study" MeasRec[] = version_num MeasRec["treat_temp"] = % (273) MeasRec["meas_temp"] = % (273) MeasRec["quality"] = MeasRec["standard"] = MeasRec["treat_step_num"] = rowNum MeasRec["specimen"] = specimen MeasRec["treat_ac_field"] = if row[] == : meas_type = "LT-NO" elif int(row[]) > 0.0: meas_type = "LT-AF-Z" treat = float(row[]) MeasRec["treat_ac_field"] = % ( treat*1e-3) elif int(row[]) == -1: meas_type = "LT-T-Z" treat = float(row[]) MeasRec["treat_temp"] = % (treat+273.) else: print("measurement type unknown:", row[], " in row ", rowNum) MeasRec["magn_moment"] = str(row[]) MeasRec["magn_volume"] = str(row[]) MeasRec["dir_dec"] = str(row[]) MeasRec["dir_inc"] = str(row[]) MeasRec[] = meas_type MeasRec[] = MeasRec[] = MeasRecs.append(MeasRec.copy()) con = cb.Contribution(output_dir_path, read_tables=[]) con.add_magic_table_from_data(dtype=, data=SpecRecs) con.add_magic_table_from_data(dtype=, data=SampRecs) con.add_magic_table_from_data(dtype=, data=SiteRecs) con.add_magic_table_from_data(dtype=, data=LocRecs) MeasOuts = pmag.measurements_methods3(MeasRecs, noave) con.add_magic_table_from_data(dtype=, data=MeasOuts) con.write_table_to_file(, custom_name=spec_file, append=append) con.write_table_to_file(, custom_name=samp_file, append=append) con.write_table_to_file(, custom_name=site_file, append=append) con.write_table_to_file(, custom_name=loc_file, append=append) meas_file = con.write_table_to_file( , custom_name=meas_file, append=append) return True, meas_file
Convert BGC format file to MagIC file(s) Parameters ---------- mag_file : str input file name dir_path : str working directory, default "." input_dir_path : str input file directory IF different from dir_path, default "" meas_file : str output measurement file name, default "measurements.txt" spec_file : str output specimen file name, default "specimens.txt" samp_file: str output sample file name, default "samples.txt" site_file : str output site file name, default "sites.txt" loc_file : str output location file name, default "locations.txt" append : bool append output files to existing files instead of overwrite, default False location : str location name, default "unknown" site : str site name, default "" samp_con : str sample/site naming convention, default '1', see info below specnum : int number of characters to designate a specimen, default 0 meth_code : str orientation method codes, default "LP-NO" e.g. [SO-MAG, SO-SUN, SO-SIGHT, ...] volume : float volume in ccs, default 12. user : str user name, default "" timezone : str timezone in pytz library format, default "US/Pacific" list of timezones can be found at http://pytz.sourceforge.net/ noave : bool do not average duplicate measurements, default False (so by default, DO average) Returns --------- Tuple : (True or False indicating if conversion was sucessful, meas_file name written) Info -------- Sample naming convention: [1] XXXXY: where XXXX is an arbitrary length site designation and Y is the single character sample designation. e.g., TG001a is the first sample from site TG001. [default] [2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length) [3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length) [4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX [5] site name same as sample [6] site is entered under a separate column -- NOT CURRENTLY SUPPORTED [7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
14,833
def get_calc_id(db, datadir, job_id=None): calcs = datastore.get_calc_ids(datadir) calc_id = 0 if not calcs else calcs[-1] if job_id is None: try: job_id = db(, scalar=True) except NotFound: job_id = 0 return max(calc_id, job_id)
Return the latest calc_id by looking both at the datastore and the database. :param db: a :class:`openquake.server.dbapi.Db` instance :param datadir: the directory containing the datastores :param job_id: a job ID; if None, returns the latest job ID
14,834
def part(z, s): r if sage_included: if s == 1: return np.real(z) elif s == -1: return np.imag(z) elif s == 0: return z else: if s == 1: return z.real elif s == -1: return z.imag elif s == 0: return z
r"""Get the real or imaginary part of a complex number.
14,835
def convert_basis(basis_dict, fmt, header=None): fmt = fmt.lower() if fmt not in _converter_map: raise RuntimeError(.format(fmt)) converter = _converter_map[fmt] if converter[] is not None: ftypes = set(basis_dict[]) if ftypes > converter[]: raise RuntimeError(.format(fmt, str(ftypes))) ret_str = converter[](basis_dict) if header is not None and fmt != : comment_str = _converter_map[fmt][] header_str = comment_str + comment_str.join(header.splitlines(True)) ret_str = header_str + + ret_str if fmt == : types = basis_dict[] harm_type = if in types else ret_str = harm_type + + ret_str return ret_str
Returns the basis set data as a string representing the data in the specified output format
14,836
def get_performance_signatures(self, project, **params): results = self._get_json(self.PERFORMANCE_SIGNATURES_ENDPOINT, project, **params) return PerformanceSignatureCollection(results)
Gets a set of performance signatures associated with a project and time range
14,837
def args_to_inject(self, function, bindings, owner_key): dependencies = {} key = (owner_key, function, tuple(sorted(bindings.items()))) def repr_key(k): owner_key, function, bindings = k return % (tuple(map(_describe, k[:2])) + (dict(k[2]),)) log.debug(, self._log_prefix, bindings, function) if key in self._stack: raise CircularDependency( % (.join(map(repr_key, self._stack)), repr_key(key)) ) self._stack += (key,) try: for arg, key in bindings.items(): try: instance = self.get(key.interface) except UnsatisfiedRequirement as e: if not e.args[0]: e = UnsatisfiedRequirement(owner_key, e.args[1]) raise e dependencies[arg] = instance finally: self._stack = tuple(self._stack[:-1]) return dependencies
Inject arguments into a function. :param function: The function. :param bindings: Map of argument name to binding key to inject. :param owner_key: A key uniquely identifying the *scope* of this function. For a method this will be the owning class. :returns: Dictionary of resolved arguments.
14,838
def get_newsentry_meta_description(newsentry): if newsentry.meta_description: return newsentry.meta_description text = newsentry.get_description() if len(text) > 160: return u.format(text[:160]) return text
Returns the meta description for the given entry.
14,839
def main(search, query): url = search.search(query) print(url) search.open_page(url)
main function that does the search
14,840
def get_um(method_name, response=False): key = (method_name, response) if key not in method_lookup: match = re.findall(r, method_name, re.I) if not match: return None interface, method, version = match[0] if interface not in service_lookup: return None package = import_module(service_lookup[interface]) service = getattr(package, interface, None) if service is None: return None for method_desc in service.GetDescriptor().methods: name = "%s.%s method_lookup[(name, False)] = getattr(package, method_desc.input_type.full_name, None) method_lookup[(name, True)] = getattr(package, method_desc.output_type.full_name, None) return method_lookup[key]
Get protobuf for given method name :param method_name: full method name (e.g. ``Player.GetGameBadgeLevels#1``) :type method_name: :class:`str` :param response: whether to return proto for response or request :type response: :class:`bool` :return: protobuf message
14,841
def thumbnail(self): if not isfile(self.thumb_path): self.logger.debug(, self) path = (self.dst_path if os.path.exists(self.dst_path) else self.src_path) try: s = self.settings if self.type == : image.generate_thumbnail( path, self.thumb_path, s[], fit=s[]) elif self.type == : video.generate_thumbnail( path, self.thumb_path, s[], s[], fit=s[], converter=s[]) except Exception as e: self.logger.error(, e) return return url_from_path(self.thumb_name)
Path to the thumbnail image (relative to the album directory).
14,842
def Parse(conditions): kind = rdf_file_finder.FileFinderCondition.Type classes = { kind.MODIFICATION_TIME: ModificationTimeCondition, kind.ACCESS_TIME: AccessTimeCondition, kind.INODE_CHANGE_TIME: InodeChangeTimeCondition, kind.SIZE: SizeCondition, kind.EXT_FLAGS: ExtFlagsCondition, } for condition in conditions: try: yield classes[condition.condition_type](condition) except KeyError: pass
Parses the file finder condition types into the condition objects. Args: conditions: An iterator over `FileFinderCondition` objects. Yields: `MetadataCondition` objects that correspond to the file-finder conditions.
14,843
def parse(self, data, extent): if self._initialized: raise pycdlibexception.PyCdlibInternalError() (self.tag_ident, self.desc_version, tag_checksum, reserved, self.tag_serial_number, desc_crc, self.desc_crc_length, self.tag_location) = struct.unpack_from(self.FMT, data, 0) if reserved != 0: raise pycdlibexception.PyCdlibInvalidISO() if _compute_csum(data[:16]) != tag_checksum: raise pycdlibexception.PyCdlibInvalidISO() if self.tag_location != extent: self._initialized = True
Parse the passed in data into a UDF Descriptor tag. Parameters: data - The data to parse. extent - The extent to compare against for the tag location. Returns: Nothing.
14,844
def GetPatternIdTripDict(self): d = {} for t in self._trips: d.setdefault(t.pattern_id, []).append(t) return d
Return a dictionary that maps pattern_id to a list of Trip objects.
14,845
def batch_get_documents( self, database, documents, mask=None, transaction=None, new_transaction=None, read_time=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): if "batch_get_documents" not in self._inner_api_calls: self._inner_api_calls[ "batch_get_documents" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.batch_get_documents, default_retry=self._method_configs["BatchGetDocuments"].retry, default_timeout=self._method_configs["BatchGetDocuments"].timeout, client_info=self._client_info, ) google.api_core.protobuf_helpers.check_oneof( transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) request = firestore_pb2.BatchGetDocumentsRequest( database=database, documents=documents, mask=mask, transaction=transaction, new_transaction=new_transaction, read_time=read_time, ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("database", database)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["batch_get_documents"]( request, retry=retry, timeout=timeout, metadata=metadata )
Gets multiple documents. Documents returned by this method are not guaranteed to be returned in the same order that they were requested. Example: >>> from google.cloud import firestore_v1beta1 >>> >>> client = firestore_v1beta1.FirestoreClient() >>> >>> database = client.database_root_path('[PROJECT]', '[DATABASE]') >>> >>> # TODO: Initialize `documents`: >>> documents = [] >>> >>> for element in client.batch_get_documents(database, documents): ... # process element ... pass Args: database (str): The database name. In the format: ``projects/{project_id}/databases/{database_id}``. documents (list[str]): The names of the documents to retrieve. In the format: ``projects/{project_id}/databases/{database_id}/documents/{document_path}``. The request will fail if any of the document is not a child resource of the given ``database``. Duplicate names will be elided. mask (Union[dict, ~google.cloud.firestore_v1beta1.types.DocumentMask]): The fields to return. If not set, returns all fields. If a document has a field that is not present in this mask, that field will not be returned in the response. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.DocumentMask` transaction (bytes): Reads documents in a transaction. new_transaction (Union[dict, ~google.cloud.firestore_v1beta1.types.TransactionOptions]): Starts a new transaction and reads the documents. Defaults to a read-only transaction. The new transaction ID will be returned as the first response in the stream. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.TransactionOptions` read_time (Union[dict, ~google.cloud.firestore_v1beta1.types.Timestamp]): Reads documents as they were at the given time. This may not be older than 60 seconds. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.firestore_v1beta1.types.Timestamp` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: Iterable[~google.cloud.firestore_v1beta1.types.BatchGetDocumentsResponse]. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
14,846
def is_micropython_usb_device(port): if type(port).__name__ == : if ( not in port or port[] != or not in port or port[] != ): return False usb_id = .format(port[], port[]) else: usb_id = port[2].lower() return True return False
Checks a USB device to see if it looks like a MicroPython device.
14,847
def print_validation_errors(result): click.echo(red()) click.echo(red( * 40)) messages = result.get_messages() for property in messages.keys(): click.echo(yellow(property + )) for error in messages[property]: click.echo(red( + error)) click.echo()
Accepts validation result object and prints report (in red)
14,848
def remove_widget(self, widget): button = self._buttons.pop(widget) self.layout().removeWidget(button) button.deleteLater()
Remove the given widget from the tooltip :param widget: the widget to remove :type widget: QtGui.QWidget :returns: None :rtype: None :raises: KeyError
14,849
def _traverse_repos(self, callback, repo_name=None): repo_files = [] if os.path.exists(self.opts[]): repo_files.append(self.opts[]) for (dirpath, dirnames, filenames) in salt.utils.path.os_walk(.format(self.opts[])): for repo_file in filenames: if not repo_file.endswith(): continue repo_files.append(repo_file) for repo_file in repo_files: repo_path = .format(self.opts[], repo_file) with salt.utils.files.fopen(repo_path) as rph: repo_data = salt.utils.yaml.safe_load(rph) for repo in repo_data: if repo_data[repo].get(, True) is False: continue if repo_name is not None and repo != repo_name: continue callback(repo, repo_data[repo])
Traverse through all repo files and apply the functionality provided in the callback to them
14,850
def login_failures(user): cmd = .format(user) cmd += " | grep -E " out = __salt__[](cmd, output_loglevel=, python_shell=True) ret = [] lines = out[].splitlines() for line in lines: ret.append(line.split()[0]) return ret
Query for all accounts which have 3 or more login failures. CLI Example: .. code-block:: bash salt <minion_id> shadow.login_failures ALL
14,851
def lookup(self, mbid, include=()): if include: for included in include: if included not in self.available_includes: raise ValueError( "{0!r} is not an includable entity for {1}".format( included, self.path, ), ) query_string = "?" + urlencode([("inc", " ".join(include))]) else: query_string = "" path = "{0}/{1}{2}".format(self.path, mbid, query_string) return self.client.request(path)
Lookup an entity directly from a specified :term:`MBID`\ .
14,852
def listDataTypes(self, datatype="", dataset=""): try: return self.dbsDataType.listDataType(dataType=datatype, dataset=dataset) except dbsException as de: dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError) except Exception as ex: sError = "DBSReaderModel/listDataTypes. %s\n. Exception trace: \n %s" \ % (ex, traceback.format_exc()) dbsExceptionHandler(, dbsExceptionCode[], self.logger.exception, sError)
API to list data types known to dbs (when no parameter supplied). :param dataset: Returns data type (of primary dataset) of the dataset (Optional) :type dataset: str :param datatype: List specific data type :type datatype: str :returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type) :rtype: list of dicts
14,853
def _freeze_relations(self, relations): if relations: sel = relations[0] sel.relations.extend(relations[1:]) return ct.SelectorList([sel.freeze()]) else: return ct.SelectorList()
Freeze relation.
14,854
def addports(self): timer = metrics.Timer() timer.start() for port in self.service.ports: p = self.findport(port) for op in port.binding.operations.values(): m = p[0].method(op.name) binding = m.binding.input method = (m.name, binding.param_defs(m)) p[1].append(method) metrics.log.debug("method created: %s", m.name, timer) p[1].sort() timer.stop()
Look through the list of service ports and construct a list of tuples where each tuple is used to describe a port and it's list of methods as: (port, [method]). Each method is tuple: (name, [pdef,..] where each pdef is a tuple: (param-name, type).
14,855
def discover(self, metafile): for report in self.reports: if report.remote_location == : if naarad.utils.is_valid_file(os.path.join(os.path.join(report.location, self.resource_path), metafile)): with open(os.path.join(os.path.join(report.location, self.resource_path), metafile), ) as meta_file: if metafile == CONSTANTS.STATS_CSV_LIST_FILE: report.stats = meta_file.readlines()[0].split() elif metafile == CONSTANTS.PLOTS_CSV_LIST_FILE: report.datasource = meta_file.readlines()[0].split() elif metafile == CONSTANTS.CDF_PLOTS_CSV_LIST_FILE: report.cdf_datasource = meta_file.readlines()[0].split() else: report.status = self.status = logger.error(, report.label) return False else: stats_url = report.remote_location + + self.resource_path + + metafile meta_file_data = naarad.httpdownload.stream_url(stats_url) if meta_file_data: if metafile == CONSTANTS.STATS_CSV_LIST_FILE: report.stats = meta_file_data.split() elif metafile == CONSTANTS.PLOTS_CSV_LIST_FILE: report.datasource = meta_file_data.split() elif metafile == CONSTANTS.CDF_PLOTS_CSV_LIST_FILE: report.cdf_datasource = meta_file_data.split() else: report.status = self.status = logger.error(, report.label) return False return True
Determine what summary stats, time series, and CDF csv exist for the reports that need to be diffed. :return: boolean: return whether the summary stats / time series / CDF csv summary was successfully located
14,856
def uimports(code): for uimport in UIMPORTLIST: uimport = bytes(uimport, ) code = code.replace(uimport, b + uimport) return code
converts CPython module names into MicroPython equivalents
14,857
def bios_image(self, bios_image): self._bios_image = self.manager.get_abs_image_path(bios_image) log.info(.format(name=self._name, id=self._id, bios_image=self._bios_image))
Sets the bios image for this QEMU VM. :param bios_image: QEMU bios image path
14,858
def names(self): if getattr(self, , None) is None: result = [] else: result = [self.key] if hasattr(self, ): result.extend(self.aliases) return result
Names, by which the instance can be retrieved.
14,859
def url(self): if self.parent is None: pieces = [self.client.base_url, , , ] else: pieces = [self.parent.url] pieces.append(self.model_class.path) return .join(pieces)
The url for this collection.
14,860
def translate_latex2unicode(text, kb_file=None): if kb_file is None: kb_file = get_kb_filename() try: text = decode_to_unicode(text) except UnicodeDecodeError: text = unicode(wash_for_utf8(text)) if CFG_LATEX_UNICODE_TRANSLATION_CONST == {}: _load_latex2unicode_constants(kb_file) for match in CFG_LATEX_UNICODE_TRANSLATION_CONST[] \ .finditer(text): text = re.sub("[\{\$]?%s[\}\$]?" % (re.escape(match.group()),), CFG_LATEX_UNICODE_TRANSLATION_CONST[ ][match.group()], text) return text
Translate latex text to unicode. This function will take given text, presumably containing LaTeX symbols, and attempts to translate it to Unicode using the given or default KB translation table located under CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb. The translated Unicode string will then be returned. If the translation table and compiled regular expression object is not previously generated in the current session, they will be. :param text: a text presumably containing LaTeX symbols. :type text: string :param kb_file: full path to file containing latex2unicode translations. Defaults to CFG_ETCDIR/bibconvert/KB/latex-to-unicode.kb :type kb_file: string :return: Unicode representation of translated text :rtype: unicode
14,861
def extendedEuclid(a, b): if a == 0: return b, 0, 1 else: g, y, x = extendedEuclid(b % a, a) return g, x - (b // a) * y, y
return a tuple of three values: x, y and z, such that x is the GCD of a and b, and x = y * a + z * b
14,862
def soma_points(self): db = self.data_block return db[db[:, COLS.TYPE] == POINT_TYPE.SOMA]
Get the soma points
14,863
def decrypt(self, ciphertext): cipherbytes = ciphertext.encode() try: combined = base64.b64decode(cipherbytes) except (base64.binascii.Error, TypeError) as e: raise DataIntegrityError("Cipher text is damaged: {}".format(e)) nonce = combined[:12] if len(nonce) != 12: raise DataIntegrityError("Cipher text is damaged: invalid nonce length") tag = combined[12:28] if len(tag) != 16: raise DataIntegrityError("Cipher text is damaged: invalid tag length") encrypted = combined[28:] cipher = AES.new(self.cipher_key, AES.MODE_GCM, nonce) try: compressed = cipher.decrypt_and_verify(encrypted, tag) except ValueError as e: raise DataIntegrityError("Cipher text is damaged: {}".format(e)) plainbytes = zlib.decompress(compressed) plaintext = plainbytes.decode() return plaintext
Return plaintext for given ciphertext.
14,864
def fetch_access_token_by_client_credentials(self): s Open/Partner API. The first way among them is to generate a client credential to fetch an access token to let KKBOX identify you. It allows you to access public data from KKBOX such as public albums, playlists and so on. However, you cannot use client credentials to access private data of a user. You have to let users to log-in into KKBOX and grant permissions for you to do so. You cannot use client credentials to do media playback either, since it requires a Premium Membership. :return: an access token :rtype: :class:`kkbox_sdk.KKBOXAccessToken` See `https://docs-en.kkbox.codes/docs/appendix-a`. %s:%sutf-8utf-8AuthorizationBasic Content-typeapplication/x-www-form-urlencodedgrant_typeclient_credentialsscopeuser_profile user_territory'} json_object = self.http._post_data(KKBOXOAuth.OAUTH_TOKEN_URL, post_parameters, headers) self.access_token = KKBOXAccessToken(**json_object) return self.access_token
There are three ways to let you start using KKBOX's Open/Partner API. The first way among them is to generate a client credential to fetch an access token to let KKBOX identify you. It allows you to access public data from KKBOX such as public albums, playlists and so on. However, you cannot use client credentials to access private data of a user. You have to let users to log-in into KKBOX and grant permissions for you to do so. You cannot use client credentials to do media playback either, since it requires a Premium Membership. :return: an access token :rtype: :class:`kkbox_sdk.KKBOXAccessToken` See `https://docs-en.kkbox.codes/docs/appendix-a`.
14,865
def pseudosection(self, column=, filename=None, log10=False, **kwargs): fig, ax, cb = PS.plot_pseudosection_type2( self.data, column=column, log10=log10, **kwargs ) if filename is not None: fig.savefig(filename, dpi=300) return fig, ax, cb
Plot a pseudosection of the given column. Note that this function only works with dipole-dipole data at the moment. Parameters ---------- column : string, optional Column to plot into the pseudosection, default: r filename : string, optional if not None, save the resulting figure directory to disc log10 : bool, optional if True, then plot values in log10, default: False **kwargs : dict all additional parameters are directly provided to :py:func:`reda.plotters.pseudoplots.PS.plot_pseudosection_type2` Returns ------- fig : :class:`matplotlib.Figure` matplotlib figure object ax : :class:`matplotlib.axes` matplotlib axes object cb : colorbar object matplotlib colorbar object
14,866
def content_children(self): text_types = {CT_RegularTextRun, CT_TextLineBreak, CT_TextField} return tuple(elm for elm in self if type(elm) in text_types)
A sequence containing the text-container child elements of this ``<a:p>`` element, i.e. (a:r|a:br|a:fld).
14,867
def get_pool_details(self, pool_id): uri = % pool_id return super(ApiPool, self).get(uri)
Method to return object pool by id Param pool_id: pool id Returns object pool
14,868
def MatrixTriangularSolve(a, rhs, lower, adj): trans = 0 if not adj else 2 r = np.empty(rhs.shape).astype(a.dtype) for coord in np.ndindex(a.shape[:-2]): pos = coord + (Ellipsis,) r[pos] = sp.linalg.solve_triangular(a[pos] if not adj else np.conj(a[pos]), rhs[pos], trans=trans, lower=lower) return r,
Matrix triangular solve op.
14,869
def print_file(self, f=sys.stdout, file_format="mwtab"): if file_format == "mwtab": for key in self: if key == "SUBJECT_SAMPLE_FACTORS": print(" elif key == "METABOLOMICS WORKBENCH": print(self.header, file=f) else: print(" self.print_block(key, f=f, file_format=file_format) print(" elif file_format == "json": print(self._to_json(), file=f)
Print :class:`~mwtab.mwtab.MWTabFile` into a file or stdout. :param io.StringIO f: writable file-like stream. :param str file_format: Format to use: `mwtab` or `json`. :param f: Print to file or stdout. :param int tw: Tab width. :return: None :rtype: :py:obj:`None`
14,870
def unindex_template(self, tpl): name = getattr(tpl, , ) try: del self.name_to_template[name] except KeyError: pass
Unindex a template from the `templates` container. :param tpl: The template to un-index :type tpl: alignak.objects.item.Item :return: None
14,871
def avl_release_parent(node): parent = node.parent if parent is not None: if parent.right is node: parent.right = None elif parent.left is node: parent.left = None else: raise AssertionError() node.parent = None parent.balance = max(height(parent.right), height(parent.left)) + 1 return node, parent
removes the parent of a child
14,872
def get_K(rho, z, alpha=1.0, zint=100.0, n2n1=0.95, get_hdet=False, K=1, Kprefactor=None, return_Kprefactor=False, npts=20, **kwargs): if type(rho) != np.ndarray or type(z) != np.ndarray or (rho.shape != z.shape): raise ValueError() pts, wts = np.polynomial.legendre.leggauss(npts) n1n2 = 1.0/n2n1 rr = np.ravel(rho) zr = np.ravel(z) cos_theta = 0.5*(1-np.cos(alpha))*pts+0.5*(1+np.cos(alpha)) if Kprefactor is None: Kprefactor = get_Kprefactor(z, cos_theta, zint=zint, \ n2n1=n2n1,get_hdet=get_hdet, **kwargs) if K==1: part_1 = j0(np.outer(rr,np.sqrt(1-cos_theta**2)))*\ np.outer(np.ones_like(rr), 0.5*(get_taus(cos_theta,n2n1=n2n1)+\ get_taup(cos_theta,n2n1=n2n1)*csqrt(1-n1n2**2*(1-cos_theta**2)))) integrand = Kprefactor * part_1 elif K==2: part_2=j2(np.outer(rr,np.sqrt(1-cos_theta**2)))*\ np.outer(np.ones_like(rr),0.5*(get_taus(cos_theta,n2n1=n2n1)-\ get_taup(cos_theta,n2n1=n2n1)*csqrt(1-n1n2**2*(1-cos_theta**2)))) integrand = Kprefactor * part_2 elif K==3: part_3=j1(np.outer(rho,np.sqrt(1-cos_theta**2)))*\ np.outer(np.ones_like(rr), n1n2*get_taup(cos_theta,n2n1=n2n1)*\ np.sqrt(1-cos_theta**2)) integrand = Kprefactor * part_3 else: raise ValueError() big_wts=np.outer(np.ones_like(rr), wts) kint = (big_wts*integrand).sum(axis=1) * 0.5*(1-np.cos(alpha)) if return_Kprefactor: return kint.reshape(rho.shape), Kprefactor else: return kint.reshape(rho.shape)
Calculates one of three electric field integrals. Internal function for calculating point spread functions. Returns one of three electric field integrals that describe the electric field near the focus of a lens; these integrals appear in Hell's psf calculation. Parameters ---------- rho : numpy.ndarray Rho in cylindrical coordinates, in units of 1/k. z : numpy.ndarray Z in cylindrical coordinates, in units of 1/k. `rho` and `z` must be the same shape alpha : Float, optional The acceptance angle of the lens, on (0,pi/2). Default is 1. zint : Float, optional The distance of the len's unaberrated focal point from the optical interface, in units of 1/k. Default is 100. n2n1 : Float, optional The ratio n2/n1 of the index mismatch between the sample (index n2) and the optical train (index n1). Must be on [0,inf) but should be near 1. Default is 0.95 get_hdet : Bool, optional Set to True to get the detection portion of the psf; False to get the illumination portion of the psf. Default is True K : {1, 2, 3}, optional Which of the 3 integrals to evaluate. Default is 1 Kprefactor : numpy.ndarray or None This array is calculated internally and optionally returned; pass it back to avoid recalculation and increase speed. Default is None, i.e. calculate it internally. return_Kprefactor : Bool, optional Set to True to also return the Kprefactor (parameter above) to speed up the calculation for the next values of K. Default is False npts : Int, optional The number of points to use for Gauss-Legendre quadrature of the integral. Default is 20, which is a good number for x,y,z less than 100 or so. Returns ------- kint : numpy.ndarray The integral K_i; rho.shape numpy.array [, Kprefactor] : numpy.ndarray The prefactor that is independent of which integral is being calculated but does depend on the parameters; can be passed back to the function for speed. Notes ----- npts=20 gives double precision (no difference between 20, 30, and doing all the integrals with scipy.quad). The integrals are only over the acceptance angle of the lens, so for moderate x,y,z they don't vary too rapidly. For x,y,z, zint large compared to 100, a higher npts might be necessary.
14,873
def _process_state_embryo(self, job_record): uow, is_duplicate = self.insert_and_publish_uow(job_record, 0, 0) self.update_job(job_record, uow, job.STATE_IN_PROGRESS)
method that takes care of processing job records in STATE_EMBRYO state
14,874
def add(self, field, op=None, val=None): if field.has_subfield(): self._fields[field.full_name] = 1 else: self._fields[field.name] = 1 if op and op.is_size() and not op.is_variable(): self._slices[field.name] = val + 1 if op and op.is_variable(): self._fields[val] = 1
Update report fields to include new one, if it doesn't already. :param field: The field to include :type field: Field :param op: Operation :type op: ConstraintOperator :return: None
14,875
def month_days(year, month): if month > 13: raise ValueError("Incorrect month index") if month in (IYYAR, TAMMUZ, ELUL, TEVETH, VEADAR): return 29 if month == HESHVAN and (year_days(year) % 10) != 5: return 29 if month == KISLEV and (year_days(year) % 10) == 3: return 29 return 30
How many days are in a given month of a given year
14,876
def transforms(self) -> Mapping[Type, Iterable[Type]]: try: return getattr(self.__class__, "transform")._transforms except AttributeError: return {}
The available data transformers.
14,877
def get_template_names(self): if self.request.is_ajax(): template = self.ajax_template_name else: template = self.template_name return template
Returns the template name to use for this request.
14,878
def _slice_area_from_bbox(self, src_area, dst_area, ll_bbox=None, xy_bbox=None): if ll_bbox is not None: dst_area = AreaDefinition( , , , {: }, 100, 100, ll_bbox) elif xy_bbox is not None: dst_area = AreaDefinition( , , , src_area.proj_dict, src_area.x_size, src_area.y_size, xy_bbox) x_slice, y_slice = src_area.get_area_slices(dst_area) return src_area[y_slice, x_slice], y_slice, x_slice
Slice the provided area using the bounds provided.
14,879
def configure(args, parser): if not args.force and on_travis(): parser.error(red("doctr appears to be running on Travis. Use " "doctr configure --force to run anyway.")) if not args.authenticate: args.upload_key = False if args.travis_tld: if args.travis_tld in [, , , ]: args.travis_tld = else: args.travis_tld = print(green(dedent())) login_kwargs = {} if args.authenticate: while not login_kwargs: try: login_kwargs = GitHub_login() except AuthenticationFailed as e: print(red(e)) else: login_kwargs = {: None, : None} GitHub_token = None get_build_repo = False default_repo = guess_github_repo() while not get_build_repo: try: if default_repo: build_repo = input("What repo do you want to build the docs for? [{default_repo}] ".format(default_repo=blue(default_repo))) if not build_repo: build_repo = default_repo else: build_repo = input("What repo do you want to build the docs for (org/reponame, like )? ") is_private = check_repo_exists(build_repo, service=, **login_kwargs)[] if is_private and not args.authenticate: sys.exit(red("--no-authenticate is not supported for private repositories.")) headers = {} travis_token = None if is_private: if args.token: GitHub_token = generate_GitHub_token(note="Doctr token for pushing to gh-pages from Travis (for {build_repo}).".format(build_repo=build_repo), scopes=["read:org", "user:email", "repo"], **login_kwargs)[] travis_token = get_travis_token(GitHub_token=GitHub_token, **login_kwargs) headers[] = "token {}".format(travis_token) service = args.travis_tld if args.travis_tld else c = check_repo_exists(build_repo, service=service, ask=True, headers=headers) tld = c[][-4:] is_private = c[] or is_private if is_private and not args.authenticate: sys.exit(red("--no-authenticate is not supported for private repos.")) get_build_repo = True except GitHubError: raise except RuntimeError as e: print(red(.format(e, 70))) get_deploy_repo = False while not get_deploy_repo: try: deploy_repo = input("What repo do you want to deploy the docs to? [{build_repo}] ".format(build_repo=blue(build_repo))) if not deploy_repo: deploy_repo = build_repo if deploy_repo != build_repo: check_repo_exists(deploy_repo, service=, **login_kwargs) get_deploy_repo = True except GitHubError: raise except RuntimeError as e: print(red(.format(e, 70))) N = IncrementingInt(1) header = green("\n================== You should now do the following ==================\n") if args.token: if not GitHub_token: GitHub_token = generate_GitHub_token(**login_kwargs)[] encrypted_variable = encrypt_variable("GH_TOKEN={GitHub_token}".format(GitHub_token=GitHub_token).encode(), build_repo=build_repo, tld=tld, travis_token=travis_token, **login_kwargs) print(dedent()) print(header) else: deploy_key_repo, env_name, keypath = get_deploy_key_repo(deploy_repo, args.key_path) private_ssh_key, public_ssh_key = generate_ssh_key() key = encrypt_to_file(private_ssh_key, keypath + ) del private_ssh_key public_ssh_key = public_ssh_key.decode() encrypted_variable = encrypt_variable(env_name.encode() + b"=" + key, build_repo=build_repo, tld=tld, travis_token=travis_token, **login_kwargs) deploy_keys_url = .format(deploy_repo=deploy_key_repo) if args.upload_key: upload_GitHub_deploy_key(deploy_key_repo, public_ssh_key, **login_kwargs) print(dedent(.format(deploy_repo=deploy_key_repo, deploy_keys_url=deploy_keys_url, keypath=keypath))) print(header) else: print(header) print(dedent(.format(ssh_key=public_ssh_key, deploy_keys_url=deploy_keys_url, N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) print(dedent(.format(keypath=keypath, N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) options = + bold_black() if args.key_path: options += .format(keypath=keypath) if deploy_repo != build_repo: options += .format(deploy_repo=deploy_repo) key_type = "deploy key" if args.token: options += key_type = "personal access token" print(dedent(.format(options=options, N=N, key_type=key_type, encrypted_variable=encrypted_variable.decode(), deploy_repo=deploy_repo, BOLD_MAGENTA=BOLD_MAGENTA, BOLD_BLACK=BOLD_BLACK, RESET=RESET))) print(dedent(.format(BOLD_BLACK=BOLD_BLACK, RESET=RESET))) print(dedent()) print(dedent(.format(N=N, BOLD_MAGENTA=BOLD_MAGENTA, RESET=RESET))) print("See the documentation at https://drdoctr.github.io/ for more information.")
Color guide - red: Error and warning messages - green: Welcome messages (use sparingly) - blue: Default values - bold_magenta: Action items - bold_black: Parts of code to be run or copied that should be modified
14,880
def create_resource_quota(self, name, quota_json): url = self._build_k8s_url("resourcequotas/") response = self._post(url, data=json.dumps(quota_json), headers={"Content-Type": "application/json"}) if response.status_code == http_client.CONFLICT: url = self._build_k8s_url("resourcequotas/%s" % name) response = self._put(url, data=json.dumps(quota_json), headers={"Content-Type": "application/json"}) check_response(response) return response
Prevent builds being scheduled and wait for running builds to finish. :return:
14,881
def __roll(self, unrolled): rolled = [] index = 0 for count in range(len(self.__sizes) - 1): in_size = self.__sizes[count] out_size = self.__sizes[count+1] theta_unrolled = np.matrix(unrolled[index:index+(in_size+1)*out_size]) theta_rolled = theta_unrolled.reshape((out_size, in_size+1)) rolled.append(theta_rolled) index += (in_size + 1) * out_size return rolled
Converts parameter array back into matrices.
14,882
def attribute(name, value, getter=None, setter=None, deleter=None, label=None, desc=None, meta=None): _annotate("attribute", name, value, getter=getter, setter=setter, deleter=deleter, label=label, desc=desc, meta=meta)
Annotates a model attribute. @param name: attribute name, unique for a model. @type name: str or unicode @param value: attribute type information. @type value: implementer of L{src.feat.models.interface.IValueInfo} @param getter: an effect or None if the attribute is write-only; the retrieved value that will be validated; see feat.models.call for effect information. @type getter: callable or None @param setter: an effect or None if the attribute is read-only; the new value will be validated, possibly converted and returned; see feat.models.call for effect information. @type setter: callable or None @param deleter: an effect or None if the attribute cannot be deleted; @type deleter: callable or None @param label: the attribute label or None. @type label: str or unicode or None @param desc: the description of the attribute or None if not documented. @type desc: str or unicode or None @param meta: model item metadata atoms. @type meta: list of tuple
14,883
def getMonitor(self): from .RegionMatching import Screen scr = self.getScreen() return scr if scr is not None else Screen(0)
Returns an instance of the ``Screen`` object this Location is inside. Returns the primary screen if the Location isn't positioned in any screen.
14,884
def remove_isolated_nodes(graph): nodes = list(nx.isolates(graph)) graph.remove_nodes_from(nodes)
Remove isolated nodes from the network, in place. :param pybel.BELGraph graph: A BEL graph
14,885
def hasLogger(self, logger): if isinstance(logger, logging.Logger): logger = logging.name return logger in self._loggers
Returns whether or not the inputed logger is tracked by this widget. :param logger | <str> || <logging.Logger>
14,886
def get_exception(self): self.lock.acquire() try: e = self.saved_exception self.saved_exception = None return e finally: self.lock.release()
Return any exception that happened during the last server request. This can be used to fetch more specific error information after using calls like `start_client`. The exception (if any) is cleared after this call. :return: an exception, or ``None`` if there is no stored exception. .. versionadded:: 1.1
14,887
def light_3d(self, r, kwargs_list, k=None): r = np.array(r, dtype=float) flux = np.zeros_like(r) for i, func in enumerate(self.func_list): if k is None or k == i: kwargs = {k: v for k, v in kwargs_list[i].items() if not k in [, ]} if self.profile_type_list[i] in [, , , , , , , , ]: flux += func.light_3d(r, **kwargs) else: raise ValueError( % self.profile_type_list[i]) return flux
computes 3d density at radius r :param x: coordinate in units of arcsec relative to the center of the image :type x: set or single 1d numpy array
14,888
def _delete(self, identifier=None): assert identifier is not None, writer = self.index.writer() writer.delete_by_term(, identifier) writer.commit()
Deletes given identifier from index. Args: identifier (str): identifier of the document to delete.
14,889
def cosine(brands, exemplars, weighted_avg=False, sqrt=False): scores = {} for brand, followers in brands: if weighted_avg: scores[brand] = np.average([_cosine(followers, others) for others in exemplars.values()], weights=[1. / len(others) for others in exemplars.values()]) else: scores[brand] = 1. * sum(_cosine(followers, others) for others in exemplars.values()) / len(exemplars) if sqrt: scores = dict([(b, math.sqrt(s)) for b, s in scores.items()]) return scores
Return the cosine similarity betwee a brand's followers and the exemplars.
14,890
def process_tokens(self, tokens): control_pragmas = {"disable", "enable"} for (tok_type, content, start, _, _) in tokens: if tok_type != tokenize.COMMENT: continue match = OPTION_RGX.search(content) if match is None: continue first_group = match.group(1) if ( first_group.strip() == "disable-all" or first_group.strip() == "skip-file" ): if first_group.strip() == "disable-all": self.add_message( "deprecated-pragma", line=start[0], args=("disable-all", "skip-file"), ) self.add_message("file-ignored", line=start[0]) self._ignore_file = True return try: opt, value = first_group.split("=", 1) except ValueError: self.add_message( "bad-inline-option", args=first_group.strip(), line=start[0] ) continue opt = opt.strip() if opt in self._options_methods or opt in self._bw_options_methods: try: meth = self._options_methods[opt] except KeyError: meth = self._bw_options_methods[opt] self.add_message( "deprecated-pragma", line=start[0], args=(opt, opt.replace("-msg", "")), ) for msgid in utils._splitstrip(value): if opt in control_pragmas: self._pragma_lineno[msgid] = start[0] try: if (opt, msgid) == ("disable", "all"): self.add_message( "deprecated-pragma", line=start[0], args=("disable=all", "skip-file"), ) self.add_message("file-ignored", line=start[0]) self._ignore_file = True return meth(msgid, "module", start[0]) except exceptions.UnknownMessageError: self.add_message("bad-option-value", args=msgid, line=start[0]) else: self.add_message("unrecognized-inline-option", args=opt, line=start[0])
process tokens from the current module to search for module/block level options
14,891
def get_group_target(self): return Surface._from_pointer( cairo.cairo_get_group_target(self._pointer), incref=True)
Returns the current destination surface for the context. This is either the original target surface as passed to :class:`Context` or the target surface for the current group as started by the most recent call to :meth:`push_group` or :meth:`push_group_with_content`.
14,892
def wsp(word): violations = 0 unstressed = [] for w in extract_words(word): unstressed += w.split()[1::2] if w.count() % 2 == 0: unstressed += [w.rsplit(, 1)[-1], ] for syll in unstressed: if re.search(r, syll, flags=FLAGS): violations += 1 return violations
Return the number of unstressed superheavy syllables.
14,893
def db_downgrade(version): v1 = get_db_version() migrate_api.downgrade(url=db_url, repository=db_repo, version=version) v2 = get_db_version() if v1 == v2: print else: print % (v1, v2)
Downgrade the database
14,894
def _fixpath(self, p): return os.path.abspath(os.path.expanduser(p))
Apply tilde expansion and absolutization to a path.
14,895
def __select_builder(lxml_builder, libxml2_builder, cmdline_builder): if prefer_xsltproc: return cmdline_builder if not has_libxml2: if has_lxml: return lxml_builder else: return cmdline_builder return libxml2_builder
Selects a builder, based on which Python modules are present.
14,896
def hardmask(self): p = re.compile("a|c|g|t|n") for seq_id in self.fasta_dict.keys(): self.fasta_dict[seq_id] = p.sub("N", self.fasta_dict[seq_id]) return self
Mask all lowercase nucleotides with N's
14,897
def log_request_data_send(self, target_system, target_component, id, ofs, count, force_mavlink1=False): return self.send(self.log_request_data_encode(target_system, target_component, id, ofs, count), force_mavlink1=force_mavlink1)
Request a chunk of a log target_system : System ID (uint8_t) target_component : Component ID (uint8_t) id : Log id (from LOG_ENTRY reply) (uint16_t) ofs : Offset into the log (uint32_t) count : Number of bytes (uint32_t)
14,898
def jsonRender(self, def_buf): try: ret_dict = SerialBlock() ret_dict[Field.Meter_Address] = self.getMeterAddress() for fld in def_buf: compare_fld = fld.upper() if not "RESERVED" in compare_fld and not "CRC" in compare_fld: ret_dict[str(fld)] = def_buf[fld][MeterData.StringValue] except: ekm_log(traceback.format_exc(sys.exc_info())) return "" return json.dumps(ret_dict, indent=4)
Translate the passed serial block into string only JSON. Args: def_buf (SerialBlock): Any :class:`~ekmmeters.SerialBlock` object. Returns: str: JSON rendering of meter record.
14,899
def cleanup(self): keys = self.client.smembers(self.keys_container) for key in keys: entry = self.client.get(key) if entry: entry = pickle.loads(entry) if self._is_expired(entry, self.timeout): self.delete_entry(key)
Cleanup all the expired keys