Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
9,200
def featurecounts_stats_table(self): headers = OrderedDict() headers[] = { : , : , : 100, : 0, : , : } headers[] = { : .format(config.read_count_prefix), : .format(config.read_count_desc), : 0, : , : lambda x: float(x) * config.read_count_multiplier, : } self.general_stats_addcols(self.featurecounts_data, headers)
Take the parsed stats from the featureCounts report and add them to the basic stats table at the top of the report
9,201
def argument_kind(args): kinds = set(arg.kind for arg in args) if len(kinds) != 1: return None return kinds.pop()
Return the kind of an argument, based on one or more descriptions of the argument. Return None if every item does not have the same kind.
9,202
def slackbuild(self, name, sbo_file): return URL(self.sbo_url + name + sbo_file).reading()
Read SlackBuild file
9,203
def _import_protobuf_from_file(grpc_pyfile, method_name, service_name = None): prefix = grpc_pyfile[:-12] pb2 = __import__("%s_pb2"%prefix) pb2_grpc = __import__("%s_pb2_grpc"%prefix) all_service_names = [stub_name[:-4] for stub_name in dir(pb2_grpc) if stub_name.endswith("Stub")] if (service_name): if (service_name not in all_service_names): return False, None all_service_names = [service_name] found_services = [] for service_name in all_service_names: service_descriptor = getattr(pb2, "DESCRIPTOR").services_by_name[service_name] for method in service_descriptor.methods: if(method.name == method_name): request_class = method.input_type._concrete_class response_class = method.output_type._concrete_class stub_class = getattr(pb2_grpc, "%sStub"%service_name) found_services.append(service_name) if (len(found_services) == 0): return False, None if (len(found_services) > 1): raise Exception("Error while loading protobuf. We found methods %s in multiply services [%s]." " You should specify service_name."%(method_name, ", ".join(found_services))) return True, (stub_class, request_class, response_class)
helper function which try to import method from the given _pb2_grpc.py file service_name should be provided only in case of name conflict return (False, None) in case of failure return (True, (stub_class, request_class, response_class)) in case of success
9,204
def get(self, request): try: consent_record = self.get_consent_record(request) if consent_record is None: return self.get_no_record_response(request) except ConsentAPIRequestError as invalid_request: return Response({: str(invalid_request)}, status=HTTP_400_BAD_REQUEST) return Response(consent_record.serialize(), status=HTTP_200_OK)
GET /consent/api/v1/data_sharing_consent?username=bob&course_id=id&enterprise_customer_uuid=uuid *username* The edX username from whom to get consent. *course_id* The course for which consent is granted. *enterprise_customer_uuid* The UUID of the enterprise customer that requires consent.
9,205
def _parse_proc_mount(self): for line in fileops.readlines(): if not in line: continue items = line.split() path = items[1] opts = items[3].split() name = None for opt in opts: if opt in self: name = opt self.paths[name] = path if in opt: name = opt self.paths[name] = path self[name] = {} self[name][] = name self[name][] = True self[name][] = 0 self[name][] = 0 for opt in opts: if in opt: self[name][] = opt.replace(, )
Parse /proc/mounts
9,206
def _is_and_or_ternary(node): return ( isinstance(node, astroid.BoolOp) and node.op == "or" and len(node.values) == 2 and isinstance(node.values[0], astroid.BoolOp) and not isinstance(node.values[1], astroid.BoolOp) and node.values[0].op == "and" and not isinstance(node.values[0].values[1], astroid.BoolOp) and len(node.values[0].values) == 2 )
Returns true if node is 'condition and true_value or false_value' form. All of: condition, true_value and false_value should not be a complex boolean expression
9,207
def sort_layout(thread, listfile, column=0): from jcvi.formats.base import DictFile outfile = listfile.rsplit(".", 1)[0] + ".sorted.list" threadorder = thread.order fw = open(outfile, "w") lt = DictFile(listfile, keypos=column, valuepos=None) threaded = [] imported = set() for t in thread: accn = t.accn if accn not in lt: continue imported.add(accn) atoms = lt[accn] threaded.append(atoms) assert len(threaded) == len(imported) total = sum(1 for x in open(listfile)) logging.debug("Total: {0}, currently threaded: {1}".format(total, len(threaded))) fp = open(listfile) for row in fp: atoms = row.split() accn = atoms[0] if accn in imported: continue insert_into_threaded(atoms, threaded, threadorder) for atoms in threaded: print("\t".join(atoms), file=fw) fw.close() logging.debug("File `{0}` sorted to `{1}`.".format(outfile, thread.filename))
Sort the syntelog table according to chromomomal positions. First orient the contents against threadbed, then for contents not in threadbed, insert to the nearest neighbor.
9,208
def construct_formset(self): formset_class = self.get_formset() if hasattr(self, ): klass = type(self).__name__ raise DeprecationWarning( .format(klass), ) return formset_class(**self.get_formset_kwargs())
Returns an instance of the formset
9,209
def list_patterns(refresh=False, root=None): * if refresh: refresh_db(root) return _get_patterns(root=root)
List all known patterns from available repos. refresh force a refresh if set to True. If set to False (default) it depends on zypper if a refresh is executed. root operate on a different root directory. CLI Examples: .. code-block:: bash salt '*' pkg.list_patterns
9,210
def attrs(self): ret = dict(self.__dict__) del ret["_matches"] if self.type != c.COMPUTER: del ret["difficulty"] return ret
provide a copy of this player's attributes as a dictionary
9,211
def parse(expected, query): return dict( (key, parser(query.get(key, []))) for key, parser in expected.items())
Parse query parameters. :type expected: `dict` mapping `bytes` to `callable` :param expected: Mapping of query argument names to argument parsing callables. :type query: `dict` mapping `bytes` to `list` of `bytes` :param query: Mapping of query argument names to lists of argument values, this is the form that Twisted Web's `IRequest.args <twisted:twisted.web.iweb.IRequest.args>` value takes. :rtype: `dict` mapping `bytes` to `object` :return: Mapping of query argument names to parsed argument values.
9,212
def _linux_brdel(br): brctl = _tool_path() return __salt__[](.format(brctl, br), python_shell=False)
Internal, deletes the bridge
9,213
def reset(self): fetches = [] for processor in self.preprocessors: fetches.extend(processor.reset() or []) return fetches
Calls `reset` on all our Preprocessor objects. Returns: A list of tensors to be fetched.
9,214
def load_blotter_args(blotter_name=None, logger=None): if logger is None: logger = tools.createLogger(__name__, logging.WARNING) if blotter_name is not None: args_cache_file = tempfile.gettempdir() + "/" + blotter_name.lower() + ".qtpylib" if not os.path.exists(args_cache_file): logger.critical( "Cannot connect to running Blotter [%s]", blotter_name) if os.isatty(0): sys.exit(0) return [] else: blotter_files = sorted( glob.glob(tempfile.gettempdir() + "/*.qtpylib"), key=os.path.getmtime) if not blotter_files: logger.critical( "Cannot connect to running Blotter [%s]", blotter_name) if os.isatty(0): sys.exit(0) return [] args_cache_file = blotter_files[-1] args = pickle.load(open(args_cache_file, "rb")) args[] = True return args
Load running blotter's settings (used by clients) :Parameters: blotter_name : str Running Blotter's name (defaults to "auto-detect") logger : object Logger to be use (defaults to Blotter's) :Returns: args : dict Running Blotter's arguments
9,215
def nlargest(n, mapping): try: it = mapping.iteritems() except AttributeError: it = iter(mapping.items()) pq = minpq() try: for i in range(n): pq.additem(*next(it)) except StopIteration: pass try: while it: pq.pushpopitem(*next(it)) except StopIteration: pass out = list(pq.popkeys()) out.reverse() return out
Takes a mapping and returns the n keys associated with the largest values in descending order. If the mapping has fewer than n items, all its keys are returned. Equivalent to: ``next(zip(*heapq.nlargest(mapping.items(), key=lambda x: x[1])))`` Returns ------- list of up to n keys from the mapping
9,216
def get_edge_type(self, edge_type): edges = [] for e in self.edges(): if self.adj[e[0]][e[1]].get() == edge_type: edges.append(e) return edges
Returns all edges with the specified edge type. Parameters ---------- edge_type : int An integer specifying what type of edges to return. Returns ------- out : list of 2-tuples A list of 2-tuples representing the edges in the graph with the specified edge type. Examples -------- Lets get type 2 edges from the following graph >>> import queueing_tool as qt >>> adjacency = { ... 0: {1: {'edge_type': 2}}, ... 1: {2: {'edge_type': 1}, ... 3: {'edge_type': 4}}, ... 2: {0: {'edge_type': 2}}, ... 3: {3: {'edge_type': 0}} ... } >>> G = qt.QueueNetworkDiGraph(adjacency) >>> ans = G.get_edge_type(2) >>> ans.sort() >>> ans [(0, 1), (2, 0)]
9,217
def get_hdulist_idx(self, ccdnum): for (extno, hdu) in enumerate(self.hdulist): if ccdnum == int(hdu.header.get(, -1)) or str(ccdnum) in hdu.header.get(, ): return extno raise ValueError("Failed to find requested CCD Number {} in cutout {}".format(ccdnum, self))
The SourceCutout is a list of HDUs, this method returns the index of the HDU that corresponds to the given ccd number. CCDs are numbers from 0, but the first CCD (CCDNUM=0) is often in extension 1 of an MEF. @param ccdnum: the number of the CCD in the MEF that is being referenced. @return: the index of in self.hdulist that corresponds to the given CCD number.
9,218
def plot_cdf(fignum, data, xlab, sym, title, **kwargs): fig = plt.figure(num=fignum) sdata = [] for d in data: sdata.append(d) sdata.sort() X, Y = [], [] color = "" for j in range(len(sdata)): Y.append(old_div(float(j), float(len(sdata)))) X.append(sdata[j]) if in list(kwargs.keys()): color = kwargs[] if in list(kwargs.keys()): lw = kwargs[] else: lw = 1 if color != "": plt.plot(X, Y, color=sym, linewidth=lw) else: plt.plot(X, Y, sym, linewidth=lw) plt.xlabel(xlab) plt.ylabel() plt.title(title) return X, Y
Makes a plot of the cumulative distribution function. Parameters __________ fignum : matplotlib figure number data : list of data to be plotted - doesn't need to be sorted sym : matplotlib symbol for plotting, e.g., 'r--' for a red dashed line **kwargs : optional dictionary with {'color': color, 'linewidth':linewidth} Returns __________ x : sorted list of data y : fraction of cdf
9,219
def parse_django_adminopt_node(env, sig, signode): from sphinx.domains.std import option_desc_re count = 0 firstname = for m in option_desc_re.finditer(sig): optname, args = m.groups() if count: signode += addnodes.desc_addname(, ) signode += addnodes.desc_name(optname, optname) signode += addnodes.desc_addname(args, args) if not count: firstname = optname count += 1 if not count: for m in simple_option_desc_re.finditer(sig): optname, args = m.groups() if count: signode += addnodes.desc_addname(, ) signode += addnodes.desc_name(optname, optname) signode += addnodes.desc_addname(args, args) if not count: firstname = optname count += 1 if not firstname: raise ValueError return firstname
A copy of sphinx.directives.CmdoptionDesc.parse_signature()
9,220
def prime(self, key, value): cache_key = self.get_cache_key(key) if cache_key not in self._promise_cache: if isinstance(value, Exception): promise = Promise.reject(value) else: promise = Promise.resolve(value) self._promise_cache[cache_key] = promise return self
Adds the provied key and value to the cache. If the key already exists, no change is made. Returns itself for method chaining.
9,221
def content(self): answer_wrap = self.soup.find(, id=) content = answer_wrap.find(, class_=) content = answer_content_process(content) return content
以处理过的Html代码形式返回答案内容. :return: 答案内容 :rtype: str
9,222
def enable(self): logger.debug() self.options.enabled = True logger.info()
(Re)enable the cache
9,223
def fail(message, exception_data=None): print(message, file=sys.stderr) if exception_data: print(repr(exception_data)) sys.exit(1)
Print a failure message and exit nonzero
9,224
def get_installed_distributions(local_only=True, skip=stdlib_pkgs, include_editables=True, editables_only=False, user_only=False): if local_only: local_test = dist_is_local else: def local_test(d): return True if include_editables: def editable_test(d): return True else: def editable_test(d): return not dist_is_editable(d) if editables_only: def editables_only_test(d): return dist_is_editable(d) else: def editables_only_test(d): return True if user_only: user_test = dist_in_usersite else: def user_test(d): return True return [d for d in pkg_resources.working_set if local_test(d) and d.key not in skip and editable_test(d) and editables_only_test(d) and user_test(d) ]
Return a list of installed Distribution objects. If ``local_only`` is True (default), only return installations local to the current virtualenv, if in a virtualenv. ``skip`` argument is an iterable of lower-case project names to ignore; defaults to stdlib_pkgs If ``include_editables`` is False, don't report editables. If ``editables_only`` is True , only report editables. If ``user_only`` is True , only report installations in the user site directory.
9,225
def _fill_empty_sessions(self, fill_subjects, fill_visits): if fill_subjects is None: fill_subjects = [s.id for s in self.subjects] if fill_visits is None: fill_visits = [v.id for v in self.complete_visits] for subject_id in fill_subjects: try: subject = self.subject(subject_id) except ArcanaNameError: subject = self._subjects[subject_id] = Subject( subject_id, [], [], []) for visit_id in fill_visits: try: subject.session(visit_id) except ArcanaNameError: session = Session(subject_id, visit_id, [], []) subject._sessions[visit_id] = session try: visit = self.visit(visit_id) except ArcanaNameError: visit = self._visits[visit_id] = Visit( visit_id, [], [], []) visit._sessions[subject_id] = session
Fill in tree with additional empty subjects and/or visits to allow the study to pull its inputs from external repositories
9,226
def _build_response(self): responses = [] self.event_urls = [] for index, event in enumerate(self.events): self.py3.threshold_get_color(index + 1, "event") self.py3.threshold_get_color(index + 1, "time") event_dict = {} event_dict["summary"] = event.get("summary") event_dict["location"] = event.get("location") event_dict["description"] = event.get("description") self.event_urls.append(event["htmlLink"]) if event["start"].get("date") is not None: start_dt = self._gstr_to_date(event["start"].get("date")) end_dt = self._gstr_to_date(event["end"].get("date")) else: start_dt = self._gstr_to_datetime(event["start"].get("dateTime")) end_dt = self._gstr_to_datetime(event["end"].get("dateTime")) if end_dt < datetime.datetime.now(tzlocal()): continue event_dict["start_time"] = self._datetime_to_str(start_dt, self.format_time) event_dict["end_time"] = self._datetime_to_str(end_dt, self.format_time) event_dict["start_date"] = self._datetime_to_str(start_dt, self.format_date) event_dict["end_date"] = self._datetime_to_str(end_dt, self.format_date) time_delta = self._delta_time(start_dt) if time_delta["days"] < 0: time_delta = self._delta_time(end_dt) is_current = True else: is_current = False event_dict["format_timer"] = self._format_timedelta( index, time_delta, is_current ) if self.warn_threshold > 0: self._check_warn_threshold(time_delta, event_dict) event_formatted = self.py3.safe_format( self.format_event, { "is_toggled": self.button_states[index], "summary": event_dict["summary"], "location": event_dict["location"], "description": event_dict["description"], "start_time": event_dict["start_time"], "end_time": event_dict["end_time"], "start_date": event_dict["start_date"], "end_date": event_dict["end_date"], "format_timer": event_dict["format_timer"], }, ) self.py3.composite_update(event_formatted, {"index": index}) responses.append(event_formatted) self.no_update = False format_separator = self.py3.safe_format(self.format_separator) self.py3.composite_update(format_separator, {"index": "sep"}) responses = self.py3.composite_join(format_separator, responses) return {"events": responses}
Builds the composite reponse to be output by the module by looping through all events and formatting the necessary strings. Returns: A composite containing the individual response for each event.
9,227
def _source_info(): ofi = inspect.getouterframes(inspect.currentframe())[2] try: calling_class = ofi[0].f_locals[].__class__ except KeyError: calling_class = None return ofi[1], ofi[2], calling_class, ofi[3]
Get information from the user's code (two frames up) to leave breadcrumbs for file, line, class and function.
9,228
def awd_lstm_lm_1150(dataset_name=None, vocab=None, pretrained=False, ctx=cpu(), root=os.path.join(get_home_dir(), ), **kwargs): r predefined_args = {: 400, : 1150, : , : 3, : True, : 0.4, : 0.5, : 0.2, : 0.65, : 0.1} mutable_args = frozenset([, , , , ]) assert all((k not in kwargs or k in mutable_args) for k in predefined_args), \ predefined_args.update(kwargs) return _get_rnn_model(AWDRNN, , dataset_name, vocab, pretrained, ctx, root, **predefined_args)
r"""3-layer LSTM language model with weight-drop, variational dropout, and tied weights. Embedding size is 400, and hidden layer size is 1150. Parameters ---------- dataset_name : str or None, default None The dataset name on which the pre-trained model is trained. Options are 'wikitext-2'. If specified, then the returned vocabulary is extracted from the training set of the dataset. If None, then vocab is required, for specifying embedding weight size, and is directly returned. The pre-trained model achieves 73.32/69.74 ppl on Val and Test of wikitext-2 respectively. vocab : gluonnlp.Vocab or None, default None Vocab object to be used with the language model. Required when dataset_name is not specified. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '$MXNET_HOME/models' Location for keeping the model parameters. MXNET_HOME defaults to '~/.mxnet'. Returns ------- gluon.Block, gluonnlp.Vocab
9,229
def workflow_script_reject(self): if skip(self, "reject"): return workflow = self.portal_workflow def copy_src_fields_to_dst(src, dst): ignore_fields = [ , , , , , , , , , , , , , , , , , ] fields = src.Schema().fields() for field in fields: fieldname = field.getName() if fieldname in ignore_fields: continue getter = getattr(src, + fieldname, src.Schema().getField(fieldname).getAccessor(src)) setter = getattr(dst, + fieldname, dst.Schema().getField(fieldname).getMutator(dst)) if getter is None or setter is None: continue setter(getter()) analysis_positions = {} for item in self.getLayout(): analysis_positions[item[]] = item[] old_layout = [] new_layout = [] worksheets = self.aq_parent new_ws = _createObjectByType(, worksheets, tmpID()) new_ws.unmarkCreationFlag() new_ws_id = renameAfterCreation(new_ws) copy_src_fields_to_dst(self, new_ws) new_ws.edit( Number=new_ws_id, Remarks=self.getRemarks() ) self.REQUEST[] = new_ws.UID() analyses = self.getAnalyses() new_ws_analyses = [] old_ws_analyses = [] for analysis in analyses: review_state = workflow.getInfoFor(analysis, , ) if review_state in [, , ]: old_ws_analyses.append(analysis.UID()) old_layout.append({: position, : , : analysis.UID(), : analysis.aq_parent.UID()}) continue if analysis.portal_type == : reject = _createObjectByType(, self, tmpID()) reject.unmarkCreationFlag() copy_src_fields_to_dst(analysis, reject) reject.setAnalysis(analysis) reject.reindexObject() analysis.edit( Result=None, Retested=True, ) analysis.reindexObject() position = analysis_positions[analysis.UID()] old_ws_analyses.append(reject.UID()) old_layout.append({: position, : , : reject.UID(), : self.UID()}) new_ws_analyses.append(analysis.UID()) new_layout.append({: position, : , : analysis.UID(), : analysis.aq_parent.UID()}) if analysis.portal_type == : service_uid = analysis.getServiceUID() reference = analysis.aq_parent new_reference = reference.addReferenceAnalysis(service_uid) reference_type = new_reference.getReferenceType() new_analysis_uid = api.get_uid(new_reference) position = analysis_positions[analysis.UID()] old_ws_analyses.append(analysis.UID()) old_layout.append({: position, : reference_type, : analysis.UID(), : reference.UID()}) new_ws_analyses.append(new_analysis_uid) new_layout.append({: position, : reference_type, : new_analysis_uid, : reference.UID()}) workflow.doActionFor(analysis, ) analysis.reindexObject() if analysis.portal_type == : duplicate_id = new_ws.generateUniqueId() new_duplicate = _createObjectByType(, new_ws, duplicate_id) new_duplicate.unmarkCreationFlag() copy_src_fields_to_dst(analysis, new_duplicate) new_duplicate.reindexObject() position = analysis_positions[analysis.UID()] old_ws_analyses.append(analysis.UID()) old_layout.append({: position, : , : analysis.UID(), : self.UID()}) new_ws_analyses.append(new_duplicate.UID()) new_layout.append({: position, : , : new_duplicate.UID(), : new_ws.UID()}) workflow.doActionFor(analysis, ) analysis.reindexObject() new_ws.setAnalyses(new_ws_analyses) new_ws.setLayout(new_layout) new_ws.replaces_rejected_worksheet = self.UID() for analysis in new_ws.getAnalyses(): review_state = workflow.getInfoFor(analysis, , ) if review_state == : changeWorkflowState(analysis, "bika_analysis_workflow", "assigned") self.REQUEST[] = self.UID() self.setLayout(old_layout) self.setAnalyses(old_ws_analyses) self.replaced_by = new_ws.UID()
Copy real analyses to RejectAnalysis, with link to real create a new worksheet, with the original analyses, and new duplicates and references to match the rejected worksheet.
9,230
def multiglob_compile(globs, prefix=False): if not globs: return re.compile() elif prefix: globs = [x + for x in globs] return re.compile(.join(fnmatch.translate(x) for x in globs))
Generate a single "A or B or C" regex from a list of shell globs. :param globs: Patterns to be processed by :mod:`fnmatch`. :type globs: iterable of :class:`~__builtins__.str` :param prefix: If ``True``, then :meth:`~re.RegexObject.match` will perform prefix matching rather than exact string matching. :type prefix: :class:`~__builtins__.bool` :rtype: :class:`re.RegexObject`
9,231
def _compute_mean(self, C, mag, r): mean = (C[] + self._compute_term1(C, mag) + self._compute_term2(C, mag, r)) return mean
Compute mean value according to equation 30, page 1021.
9,232
def updateUserTone(conversationPayload, toneAnalyzerPayload, maintainHistory): emotionTone = None writingTone = None socialTone = None if not in conversationPayload: conversationPayload[] = {} if not in conversationPayload[]: conversationPayload[] = initUser() user = conversationPayload[][] if toneAnalyzerPayload and toneAnalyzerPayload[]: for toneCategory in toneAnalyzerPayload[][]: if toneCategory[] == EMOTION_TONE_LABEL: emotionTone = toneCategory if toneCategory[] == LANGUAGE_TONE_LABEL: writingTone = toneCategory if toneCategory[] == SOCIAL_TONE_LABEL: socialTone = toneCategory updateEmotionTone(user, emotionTone, maintainHistory) updateWritingTone(user, writingTone, maintainHistory) updateSocialTone(user, socialTone, maintainHistory) conversationPayload[][] = user return conversationPayload
updateUserTone processes the Tone Analyzer payload to pull out the emotion, writing and social tones, and identify the meaningful tones (i.e., those tones that meet the specified thresholds). The conversationPayload json object is updated to include these tones. @param conversationPayload json object returned by the Watson Conversation Service @param toneAnalyzerPayload json object returned by the Watson Tone Analyzer Service @returns conversationPayload where the user object has been updated with tone information from the toneAnalyzerPayload
9,233
def set_rich_menu_image(self, rich_menu_id, content_type, content, timeout=None): self._post( .format(rich_menu_id=rich_menu_id), data=content, headers={: content_type}, timeout=timeout )
Call upload rich menu image API. https://developers.line.me/en/docs/messaging-api/reference/#upload-rich-menu-image Uploads and attaches an image to a rich menu. :param str rich_menu_id: IDs of the richmenu :param str content_type: image/jpeg or image/png :param content: image content as bytes, or file-like object :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float)
9,234
def setRelay(self, seconds, relay, status, password="00000000"): result = False self.setContext("setRelay") try: self.clearCmdMsg() if len(password) != 8: self.writeCmdMsg("Invalid password length.") self.setContext("") return result if seconds < 0 or seconds > 9999: self.writeCmdMsg("Relay duration must be between 0 and 9999.") self.setContext("") return result if not self.requestA(): self.writeCmdMsg("Bad read CRC on setting") else: if not self.serialCmdPwdAuth(password): self.writeCmdMsg("Password failure") else: req_str = "" req_str = ("01573102303038" + binascii.hexlify(str(relay)).zfill(2) + "28" + binascii.hexlify(str(status)).zfill(2) + binascii.hexlify(str(seconds).zfill(4)) + "2903") req_str += self.calc_crc16(req_str[2:].decode("hex")) self.m_serial_port.write(req_str.decode("hex")) if self.m_serial_port.getResponse(self.getContext()).encode("hex") == "06": self.writeCmdMsg("Success: 06 returned.") result = True self.serialPostEnd() except: ekm_log(traceback.format_exc(sys.exc_info())) self.setContext("") return result
Serial call to set relay. Args: seconds (int): Seconds to hold, ero is hold forever. See :class:`~ekmmeters.RelayInterval`. relay (int): Selected relay, see :class:`~ekmmeters.Relay`. status (int): Status to set, see :class:`~ekmmeters.RelayState` password (str): Optional password Returns: bool: True on completion and ACK.
9,235
def resolve_remote(self, uri): try: return super(LocalRefResolver, self).resolve_remote(uri) except ValueError: return super(LocalRefResolver, self).resolve_remote( + get_schema_path(uri.rsplit(, 1)[0]) )
Resolve a uri or relative path to a schema.
9,236
def _upload(param_dict, timeout, data): param_dict[] = param_dict[] = param_dict[] = result = util.callm(, param_dict, POST = True, socket_timeout = 300, data = data) return _track_from_response(result, timeout)
Calls upload either with a local audio file, or a url. Returns a track object.
9,237
def blame_incremental(self, rev, file, **kwargs): data = self.git.blame(rev, , file, p=True, incremental=True, stdout_as_string=False, **kwargs) commits = {} stream = (line for line in data.split(b) if line) while True: try: line = next(stream) except StopIteration: return hexsha, orig_lineno, lineno, num_lines = line.split() lineno = int(lineno) num_lines = int(num_lines) orig_lineno = int(orig_lineno) if hexsha not in commits: props = {} while True: try: line = next(stream) except StopIteration: return if line == b: continue tag, value = line.split(b, 1) props[tag] = value if tag == b: orig_filename = value break c = Commit(self, hex_to_bin(hexsha), author=Actor(safe_decode(props[b]), safe_decode(props[b].lstrip(b).rstrip(b))), authored_date=int(props[b]), committer=Actor(safe_decode(props[b]), safe_decode(props[b].lstrip(b).rstrip(b))), committed_date=int(props[b])) commits[hexsha] = c else: while True: try: line = next(stream) except StopIteration: return tag, value = line.split(b, 1) if tag == b: orig_filename = value break yield BlameEntry(commits[hexsha], range(lineno, lineno + num_lines), safe_decode(orig_filename), range(orig_lineno, orig_lineno + num_lines))
Iterator for blame information for the given file at the given revision. Unlike .blame(), this does not return the actual file's contents, only a stream of BlameEntry tuples. :param rev: revision specifier, see git-rev-parse for viable options. :return: lazy iterator of BlameEntry tuples, where the commit indicates the commit to blame for the line, and range indicates a span of line numbers in the resulting file. If you combine all line number ranges outputted by this command, you should get a continuous range spanning all line numbers in the file.
9,238
def gen_image(img, width, height, outfile, img_type=): assert len(img) == width * height or len(img) == width * height * 3 if img_type == : misc.imsave(outfile, img.reshape(width, height)) elif img_type == : misc.imsave(outfile, img.reshape(3, width, height))
Save an image with the given parameters.
9,239
def sign(self, storepass=None, keypass=None, keystore=None, apk=None, alias=None, name=): target = self.get_target() build_tool = android_helper.get_highest_build_tool(target.split()[1]) if keystore is None: (keystore, storepass, keypass, alias) = android_helper.get_default_keystore() dist = % (.join(apk.split()[:-1]), name) android_helper.jarsign(storepass, keypass, keystore, apk, alias, path=self.path) android_helper.zipalign(apk, dist, build_tool=build_tool, path=self.path)
Signs (jarsign and zipalign) a target apk file based on keystore information, uses default debug keystore file by default. :param storepass(str): keystore file storepass :param keypass(str): keystore file keypass :param keystore(str): keystore file path :param apk(str): apk file path to be signed :param alias(str): keystore file alias :param name(str): signed apk name to be used by zipalign
9,240
def validate(tool_class, model_class): if not hasattr(tool_class, ): raise ImproperlyConfigured("No attribute found for tool %s." % ( tool_class.__name__ )) if not hasattr(tool_class, ): raise ImproperlyConfigured("No attribute found for tool %s." % ( tool_class.__name__ )) if not hasattr(tool_class, ): raise NotImplementedError("No method found for tool %s." % ( tool_class.__name__ ))
Does basic ObjectTool option validation.
9,241
def run(self): try: language = self.arguments[0] except IndexError: language = code = .join(self.content) literal = docutils.nodes.literal_block(code, code) literal[].append() literal[] = language return [literal]
Run directive.
9,242
def _setup_metric_group_definitions(self): metric_group_definitions = dict() for mg_info in self.properties[]: mg_name = mg_info[] mg_def = MetricGroupDefinition( name=mg_name, resource_class=_resource_class_from_group(mg_name), metric_definitions=dict()) for i, m_info in enumerate(mg_info[]): m_name = m_info[] m_def = MetricDefinition( index=i, name=m_name, type=_metric_type(m_info[]), unit=_metric_unit_from_name(m_name)) mg_def.metric_definitions[m_name] = m_def metric_group_definitions[mg_name] = mg_def return metric_group_definitions
Return the dict of MetricGroupDefinition objects for this metrics context, by processing its 'metric-group-infos' property.
9,243
def register(device, data, facet): if isinstance(data, string_types): data = json.loads(data) if data[] != VERSION: raise ValueError( % data[]) app_id = data.get(, facet) verify_facet(app_id, facet) app_param = sha256(app_id.encode()).digest() client_data = { : , : data[], : facet } client_data = json.dumps(client_data) client_param = sha256(client_data.encode()).digest() request = client_param + app_param p1 = 0x03 p2 = 0 response = device.send_apdu(INS_ENROLL, p1, p2, request) return { : websafe_encode(response), : websafe_encode(client_data) }
Register a U2F device data = { "version": "U2F_V2", "challenge": string, //b64 encoded challenge "appId": string, //app_id }
9,244
def convert_uen(pinyin): return UN_RE.sub(lambda m: m.group(1) + UN_MAP[m.group(2)], pinyin)
uen 转换,还原原始的韵母 iou,uei,uen前面加声母的时候,写成iu,ui,un。 例如niu(牛),gui(归),lun(论)。
9,245
def _buffer_decode(self, input, errors, final): decoded_segments = [] position = 0 while True: decoded, consumed = self._buffer_decode_step( input[position:], errors, final ) if consumed == 0: break decoded_segments.append(decoded) position += consumed if final: assert position == len(input) return .join(decoded_segments), position
Decode bytes that may be arriving in a stream, following the Codecs API. `input` is the incoming sequence of bytes. `errors` tells us how to handle errors, though we delegate all error-handling cases to the real UTF-8 decoder to ensure correct behavior. `final` indicates whether this is the end of the sequence, in which case we should raise an error given incomplete input. Returns as much decoded text as possible, and the number of bytes consumed.
9,246
def staticproperty(func): doc = func.__doc__ if not isinstance(func, staticmethod): func = staticmethod(func) return ClassPropertyDescriptor(func, doc)
Use as a decorator on a method definition to make it a class-level attribute (without binding). This decorator can be applied to a method or a staticmethod. This decorator does not bind any arguments. Usage: >>> other_x = 'value' >>> class Foo(object): ... @staticproperty ... def x(): ... return other_x ... >>> Foo.x 'value' Setting or deleting the attribute of this name will overwrite this property. The docstring of the classproperty `x` for a class `C` can be obtained by `C.__dict__['x'].__doc__`.
9,247
def get(company=, company_uri=): if not company and not company_uri: raise Exception("glassdoor.gd.get(company=, company_uri=): "\ " company or company_uri required") payload = {} if not company_uri: payload.update({: , : company }) uri = % (GLASSDOOR_API, REVIEWS_URL) else: uri = % (GLASSDOOR_API, company_uri) r = requests.get(uri, params=payload) soup = BeautifulSoup(r.content) results = parse(soup) return results
Performs a HTTP GET for a glassdoor page and returns json
9,248
def from_df(cls, ratings:DataFrame, valid_pct:float=0.2, user_name:Optional[str]=None, item_name:Optional[str]=None, rating_name:Optional[str]=None, test:DataFrame=None, seed:int=None, path:PathOrStr=, bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False) -> : "Create a `DataBunch` suitable for collaborative filtering from `ratings`." user_name = ifnone(user_name, ratings.columns[0]) item_name = ifnone(item_name, ratings.columns[1]) rating_name = ifnone(rating_name,ratings.columns[2]) cat_names = [user_name,item_name] src = (CollabList.from_df(ratings, cat_names=cat_names, procs=Categorify) .split_by_rand_pct(valid_pct=valid_pct, seed=seed).label_from_df(cols=rating_name)) if test is not None: src.add_test(CollabList.from_df(test, cat_names=cat_names)) return src.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, device=device, collate_fn=collate_fn, no_check=no_check)
Create a `DataBunch` suitable for collaborative filtering from `ratings`.
9,249
def query_subdevice2index(self, ncfile) -> Subdevice2Index: subdevices = self.query_subdevices(ncfile) self._test_duplicate_exists(ncfile, subdevices) subdev2index = {subdev: idx for (idx, subdev) in enumerate(subdevices)} return Subdevice2Index(subdev2index, self.name, get_filepath(ncfile))
Return a |Subdevice2Index| that maps the (sub)device names to their position within the given NetCDF file. Method |NetCDFVariableBase.query_subdevice2index| is based on |NetCDFVariableBase.query_subdevices|. The returned |Subdevice2Index| object remembers the NetCDF file the (sub)device names stem from, allowing for clear error messages: >>> from hydpy.core.netcdftools import NetCDFVariableBase, str2chars >>> from hydpy import make_abc_testable, TestIO >>> from hydpy.core.netcdftools import netcdf4 >>> with TestIO(): ... ncfile = netcdf4.Dataset('model.nc', 'w') >>> Var = make_abc_testable(NetCDFVariableBase) >>> Var.subdevicenames = [ ... 'element3', 'element1', 'element1_1', 'element2'] >>> var = Var('flux_prec', isolate=True, timeaxis=1) >>> var.insert_subdevices(ncfile) >>> subdevice2index = var.query_subdevice2index(ncfile) >>> subdevice2index.get_index('element1_1') 2 >>> subdevice2index.get_index('element3') 0 >>> subdevice2index.get_index('element5') Traceback (most recent call last): ... OSError: No data for sequence `flux_prec` and (sub)device \ `element5` in NetCDF file `model.nc` available. Additionally, |NetCDFVariableBase.query_subdevice2index| checks for duplicates: >>> ncfile['station_id'][:] = str2chars( ... ['element3', 'element1', 'element1_1', 'element1']) >>> var.query_subdevice2index(ncfile) Traceback (most recent call last): ... OSError: The NetCDF file `model.nc` contains duplicate (sub)device \ names for variable `flux_prec` (the first found duplicate is `element1`). >>> ncfile.close()
9,250
def set_levels(self, levels): assert_is_type(levels, [str]) return H2OFrame._expr(expr=ExprNode("setDomain", self, False, levels), cache=self._ex._cache)
Replace the levels of a categorical column. New levels must be aligned with the old domain. This call has copy-on-write semantics. :param List[str] levels: A list of strings specifying the new levels. The number of new levels must match the number of old levels. :returns: A single-column H2OFrame with the desired levels.
9,251
def _to_power_basis_degree8(nodes1, nodes2): r evaluated = [ eval_intersection_polynomial(nodes1, nodes2, t_val) for t_val in _CHEB9 ] return polynomial.polyfit(_CHEB9, evaluated, 8)
r"""Compute the coefficients of an **intersection polynomial**. Helper for :func:`to_power_basis` in the case that B |eacute| zout's `theorem`_ tells us the **intersection polynomial** is degree :math:`8`. This happens if the two curves have degrees one and eight or have degrees two and four. .. note:: This uses a least-squares fit to the function evaluated at the Chebyshev nodes (scaled and shifted onto ``[0, 1]``). Hence, the coefficients may be less stable than those produced for smaller degrees. Args: nodes1 (numpy.ndarray): The nodes in the first curve. nodes2 (numpy.ndarray): The nodes in the second curve. Returns: numpy.ndarray: ``9``-array of coefficients.
9,252
def _raise_error_routes(iface, option, expected): msg = _error_msg_routes(iface, option, expected) log.error(msg) raise AttributeError(msg)
Log and raise an error with a logical formatted message.
9,253
def convert_coordinates(coords, origin, wgs84, wrapped): if isinstance(coords, list) or isinstance(coords, tuple): try: if isinstance(coords[0], list) or isinstance(coords[0], tuple): return [convert_coordinates(list(c), origin, wgs84, wrapped) for c in coords] elif isinstance(coords[0], float): c = list(transform(origin, wgs84, *coords)) if wrapped and c[0] < -170: c[0] = c[0] + 360 return c except IndexError: pass return None
Convert coordinates from one crs to another
9,254
def stringify_summary(summary): for index, suite_summary in enumerate(summary["details"]): if not suite_summary.get("name"): suite_summary["name"] = "testcase {}".format(index) for record in suite_summary.get("records"): meta_datas = record[] __stringify_meta_datas(meta_datas) meta_datas_expanded = [] __expand_meta_datas(meta_datas, meta_datas_expanded) record["meta_datas_expanded"] = meta_datas_expanded record["response_time"] = __get_total_response_time(meta_datas_expanded)
stringify summary, in order to dump json file and generate html report.
9,255
def __rst2graph(self, rs3_xml_tree): doc_root = rs3_xml_tree.getroot() for segment in doc_root.iter(): self.__add_segment(segment) for group in doc_root.iter(): self.__add_group(group)
Reads an RST tree (from an ElementTree representation of an RS3 XML file) and adds all segments (nodes representing text) and groups (nonterminal nodes in an RST tree) as well as the relationships that hold between them (typed edges) to this RSTGraph. Parameters ---------- rs3_xml_tree : lxml.etree._ElementTree lxml ElementTree representation of an RS3 XML file tokenize : bool If True, the RST segments (i.e. nuclei and satellites) will be tokenized and added as additonal token nodes to the document graph (with edges from the respective RST segments). If False, each RST segment will be labeled with the text it represents.
9,256
def _merge_meta(self, encoded_meta, meta): new_meta = None if meta: _meta = self._decode_meta(encoded_meta) for key, value in six.iteritems(meta): if value is None: _meta.pop(key, None) else: _meta[key] = value new_meta = self._encode_meta(_meta) return new_meta
Merge new meta dict into encoded meta. Returns new encoded meta.
9,257
def modify(self, modification, parameters): for src in self: src.modify(modification, parameters)
Apply a modification to the underlying point sources, with the same parameters for all sources
9,258
def insert(self, table, columns, values, execute=True): cols, vals = get_col_val_str(columns) statement = "INSERT INTO {0} ({1}) VALUES ({2})".format(wrap(table), cols, vals) if execute: self._cursor.execute(statement, values) self._commit() self._printer(.format(table)) else: return statement
Insert a single row into a table.
9,259
def get_random_user(self): from provider.models import User u = User.objects.order_by()[0] return {"username": u.username, "password": u.password, "fullname": u.fullname}
Gets a random user from the provider :returns: Dictionary
9,260
def pre_save(self, model_instance, add): value = super(UserField, self).pre_save(model_instance, add) if not value and not add: value = self.get_os_username() setattr(model_instance, self.attname, value) return value return value
Updates username created on ADD only.
9,261
def ci(data, statfunction=None, alpha=0.05, n_samples=10000, method=, output=, epsilon=0.001, multi=None, _iter=True): if np.iterable(alpha): alphas = np.array(alpha) else: alphas = np.array([alpha/2, 1-alpha/2]) if multi is None: if isinstance(data, tuple): multi = True else: multi = False if statfunction is None: if _iter: statfunction = np.average else: def statfunc_wrapper(x, *args, **kwargs): return np.average(x, axis=-1, *args, **kwargs) statfunction = statfunc_wrapper if method == : n = tdata[0].shape[0]*1.0 nn = tdata[0].shape[0] I = np.identity(nn) ep = epsilon / n*1.0 p0 = np.repeat(1.0/n,nn) try: t0 = statfunction(*tdata,weights=p0) except TypeError as e: raise TypeError("statfunction does not accept correct arguments for ABC ({0})".format(e.message)) di_full = I - p0 tp = np.fromiter((statfunction(*tdata, weights=p0+ep*di) for di in di_full), dtype=np.float) tm = np.fromiter((statfunction(*tdata, weights=p0-ep*di) for di in di_full), dtype=np.float) t1 = (tp-tm)/(2*ep) t2 = (tp-2*t0+tm)/ep**2 sighat = np.sqrt(np.sum(t1**2))/n a = (np.sum(t1**3))/(6*n**3*sighat**3) delta = t1/(n**2*sighat) cq = (statfunction(*tdata,weights=p0+ep*delta)-2*t0+statfunction(*tdata,weights=p0-ep*delta))/(2*sighat*ep**2) bhat = np.sum(t2)/(2*n**2) curv = bhat/sighat-cq z0 = nppf(2*ncdf(a)*ncdf(-curv)) Z = z0+nppf(alphas) za = Z/(1-a*Z)**2 abc = np.zeros_like(alphas) for i in range(0,len(alphas)): abc[i] = statfunction(*tdata,weights=p0+za[i]*delta) if output == : return abc elif output == : return abs(abc-statfunction(tdata))[np.newaxis].T else: raise ValueError("Output option {0} is not supported.".format(output)) if nvals.ndim == 1: return abs(statfunction(data)-stat[nvals])[np.newaxis].T else: return abs(statfunction(data)-stat[(nvals, np.indices(nvals.shape)[1:])])[np.newaxis].T else: raise ValueError("Output option {0} is not supported.".format(output))
Given a set of data ``data``, and a statistics function ``statfunction`` that applies to that data, computes the bootstrap confidence interval for ``statfunction`` on that data. Data points are assumed to be delineated by axis 0. Parameters ---------- data: array_like, shape (N, ...) OR tuple of array_like all with shape (N, ...) Input data. Data points are assumed to be delineated by axis 0. Beyond this, the shape doesn't matter, so long as ``statfunction`` can be applied to the array. If a tuple of array_likes is passed, then samples from each array (along axis 0) are passed in order as separate parameters to the statfunction. The type of data (single array or tuple of arrays) can be explicitly specified by the multi parameter. statfunction: function (data, weights=(weights, optional)) -> value This function should accept samples of data from ``data``. It is applied to these samples individually. If using the ABC method, the function _must_ accept a named ``weights`` parameter which will be an array_like with weights for each sample, and must return a _weighted_ result. Otherwise this parameter is not used or required. Note that numpy's np.average accepts this. (default=np.average) alpha: float or iterable, optional The percentiles to use for the confidence interval (default=0.05). If this is a float, the returned values are (alpha/2, 1-alpha/2) percentile confidence intervals. If it is an iterable, alpha is assumed to be an iterable of each desired percentile. n_samples: float, optional The number of bootstrap samples to use (default=10000) method: string, optional The method to use: one of 'pi', 'bca', or 'abc' (default='bca') output: string, optional The format of the output. 'lowhigh' gives low and high confidence interval values. 'errorbar' gives transposed abs(value-confidence interval value) values that are suitable for use with matplotlib's errorbar function. (default='lowhigh') epsilon: float, optional (only for ABC method) The step size for finite difference calculations in the ABC method. Ignored for all other methods. (default=0.001) multi: boolean, optional If False, assume data is a single array. If True, assume data is a tuple/other iterable of arrays of the same length that should be sampled together. If None, decide based on whether the data is an actual tuple. (default=None) Returns ------- confidences: tuple of floats The confidence percentiles specified by alpha Calculation Methods ------------------- 'pi': Percentile Interval (Efron 13.3) The percentile interval method simply returns the 100*alphath bootstrap sample's values for the statistic. This is an extremely simple method of confidence interval calculation. However, it has several disadvantages compared to the bias-corrected accelerated method, which is the default. 'bca': Bias-Corrected Accelerated (BCa) Non-Parametric (Efron 14.3) (default) This method is much more complex to explain. However, it gives considerably better results, and is generally recommended for normal situations. Note that in cases where the statistic is smooth, and can be expressed with weights, the ABC method will give approximated results much, much faster. Note that in a case where the statfunction results in equal output for every bootstrap sample, the BCa confidence interval is technically undefined, as the acceleration value is undefined. To match the percentile interval method and give reasonable output, the implementation of this method returns a confidence interval of zero width using the 0th bootstrap sample in this case, and warns the user. 'abc': Approximate Bootstrap Confidence (Efron 14.4, 22.6) This method provides approximated bootstrap confidence intervals without actually taking bootstrap samples. This requires that the statistic be smooth, and allow for weighting of individual points with a weights= parameter (note that np.average allows this). This is _much_ faster than all other methods for situations where it can be used. Examples -------- To calculate the confidence intervals for the mean of some numbers: >> boot.ci( np.randn(100), np.average ) Given some data points in arrays x and y calculate the confidence intervals for all linear regression coefficients simultaneously: >> boot.ci( (x,y), scipy.stats.linregress ) References ---------- Efron, An Introduction to the Bootstrap. Chapman & Hall 1993
9,262
def show(self): off = 0 for n, i in enumerate(self.get_instructions()): print("{:8d} (0x{:08x}) {:04x} {:30} {}".format(n, off, i.get_op_value(), i.get_name(), i.get_output(self.idx))) off += i.get_length()
Display (with a pretty print) this object
9,263
def get_worksheet(self, index): sheet_data = self.fetch_sheet_metadata() try: properties = sheet_data[][index][] return Worksheet(self, properties) except (KeyError, IndexError): return None
Returns a worksheet with specified `index`. :param index: An index of a worksheet. Indexes start from zero. :type index: int :returns: an instance of :class:`gsperad.models.Worksheet` or `None` if the worksheet is not found. Example. To get first worksheet of a spreadsheet: >>> sht = client.open('My fancy spreadsheet') >>> worksheet = sht.get_worksheet(0)
9,264
def as_params(self): params = {} if self.has_filters: params[] = self.get_filters() if self.has_order: params[] = self.get_order() if self.has_selects: params[] = self.get_selects() if self.has_expands: params[] = self.get_expands() if self._search: params[] = self._search params.pop(, None) params.pop(, None) return params
Returns the filters, orders, select, expands and search as query parameters :rtype: dict
9,265
def convert_sed_cols(tab): for colname in list(tab.columns.keys()): newname = colname.lower() newname = newname.replace(, ) if tab.columns[colname].name == newname: continue tab.columns[colname].name = newname return tab
Cast SED column names to lowercase.
9,266
def add(self, search): ms = self._clone() ms._searches.append(search) return ms
Adds a new :class:`~elasticsearch_dsl.Search` object to the request:: ms = MultiSearch(index='my-index') ms = ms.add(Search(doc_type=Category).filter('term', category='python')) ms = ms.add(Search(doc_type=Blog))
9,267
def _get_goid2dbids(associations): go2ids = cx.defaultdict(set) for ntd in associations: go2ids[ntd.GO_ID].add(ntd.DB_ID) return dict(go2ids)
Return gene2go data for user-specified taxids.
9,268
def _close_prepared_statement(self): self.prepared_sql = None self.flush_to_query_ready() self.connection.write(messages.Close(, self.prepared_name)) self.connection.write(messages.Flush()) self._message = self.connection.read_expected_message(messages.CloseComplete) self.connection.write(messages.Sync())
Close the prepared statement on the server.
9,269
def delete_managed_disk(call=None, kwargs=None): compconn = get_conn(client_type=) try: compconn.disks.delete(kwargs[], kwargs[]) except Exception as exc: log.error(, kwargs.get(), six.text_type(exc)) return False return True
Delete a managed disk from a resource group.
9,270
def distVersion(): from pkg_resources import parse_version build_number = buildNumber() parsedBaseVersion = parse_version(baseVersion) if isinstance(parsedBaseVersion, tuple): raise RuntimeError("Setuptools version 8.0 or newer required. Update by running " "") if build_number is not None and parsedBaseVersion.is_prerelease: return baseVersion + + build_number else: return baseVersion
The distribution version identifying a published release on PyPI.
9,271
def region_size(im): r if im.dtype == bool: im = spim.label(im)[0] counts = sp.bincount(im.flatten()) counts[0] = 0 chords = counts[im] return chords
r""" Replace each voxel with size of region to which it belongs Parameters ---------- im : ND-array Either a boolean image wtih ``True`` indicating the features of interest, in which case ``scipy.ndimage.label`` will be applied to find regions, or a greyscale image with integer values indicating regions. Returns ------- image : ND-array A copy of ``im`` with each voxel value indicating the size of the region to which it belongs. This is particularly useful for finding chord sizes on the image produced by ``apply_chords``.
9,272
def normalize_curves_eb(curves): non_zero_curves = [(losses, poes) for losses, poes in curves if losses[-1] > 0] if not non_zero_curves: return curves[0][0], numpy.array([poes for _losses, poes in curves]) else: max_losses = [losses[-1] for losses, _poes in non_zero_curves] reference_curve = non_zero_curves[numpy.argmax(max_losses)] loss_ratios = reference_curve[0] curves_poes = [interpolate.interp1d( losses, poes, bounds_error=False, fill_value=0)(loss_ratios) for losses, poes in curves] for cp in curves_poes: if numpy.isnan(cp[0]): cp[0] = 0 return loss_ratios, numpy.array(curves_poes)
A more sophisticated version of normalize_curves, used in the event based calculator. :param curves: a list of pairs (losses, poes) :returns: first losses, all_poes
9,273
def remote_mgmt_addr_uneq_store(self, remote_mgmt_addr): if remote_mgmt_addr != self.remote_mgmt_addr: self.remote_mgmt_addr = remote_mgmt_addr return True return False
This function saves the MGMT address, if different from stored.
9,274
def run(*steps): if not steps: return task = None run._sigint = False loop = asyncio.get_event_loop() def abort(): task.cancel() run._sigint = True added = False try: loop.add_signal_handler(signal.SIGINT, abort) added = True except (ValueError, OSError, RuntimeError) as e: raise try: for step in steps: task = loop.create_task(step) loop.run_until_complete(asyncio.wait([task], loop=loop)) if run._sigint: raise KeyboardInterrupt() if task.exception(): raise task.exception() return task.result() finally: if added: loop.remove_signal_handler(signal.SIGINT)
Helper to run one or more async functions synchronously, with graceful handling of SIGINT / Ctrl-C. Returns the return value of the last function.
9,275
def NewOutputModule(cls, name, output_mediator): output_class = cls.GetOutputClass(name) return output_class(output_mediator)
Creates a new output module object for the specified output format. Args: name (str): name of the output module. output_mediator (OutputMediator): output mediator. Returns: OutputModule: output module. Raises: KeyError: if there is no output class found with the supplied name. ValueError: if name is not a string.
9,276
def meth_set_acl(args): acl_updates = [{"user": user, "role": args.role} \ for user in set(expand_fc_groups(args.users)) \ if user != fapi.whoami()] id = args.snapshot_id if not id: r = fapi.list_repository_methods(namespace=args.namespace, name=args.method) fapi._check_response_code(r, 200) versions = r.json() if len(versions) == 0: if fcconfig.verbosity: eprint("method {0}/{1} not found".format(args.namespace, args.method)) return 1 latest = sorted(versions, key=lambda m: m[])[-1] id = latest[] r = fapi.update_repository_method_acl(args.namespace, args.method, id, acl_updates) fapi._check_response_code(r, 200) if fcconfig.verbosity: print("Updated ACL for {0}/{1}:{2}".format(args.namespace, args.method, id)) return 0
Assign an ACL role to a list of users for a workflow.
9,277
def p_example_multiline(self, p): p[0] = AstExampleField( self.path, p.lineno(1), p.lexpos(1), p[1], p[5])
example_field : ID EQ NL INDENT ex_map NL DEDENT
9,278
def _CSI(self, cmd): sys.stdout.write() sys.stdout.write(cmd)
Control sequence introducer
9,279
def get_args_parser(): parser = argparse.ArgumentParser( description=) parser.add_argument(, , action=EnvDefault, envvar=, required=True, help=) parser.add_argument(, , action=EnvDefault, envvar=, required=True, help="Odoo--db-user-uMARABUNTA_DB_USERs database user") parser.add_argument(, , action=EnvDefault, envvar=, required=True, help="Odoo--db-port-pMARABUNTA_DB_PORTs database port") parser.add_argument(, , default=os.environ.get(, ), help="Odoo--modeMARABUNTA_MODEdemoprod--allow-serieMARABUNTA_ALLOW_SERIEAllow to run more than 1 version upgrade at a time.--force-versionMARABUNTA_FORCE_VERSIONForce upgrade of a version, even if it has already been applied.WebConfiguration related to the internal web server, used to publish a maintenance page during the migration.--web-hostMARABUNTA_WEB_HOST0.0.0.0Host for the web server--web-portMARABUNTA_WEB_PORTPort for the web server--web-custom-htmlMARABUNTA_WEB_CUSTOM_HTMLPath to a custom html file to publish') return parser
Return a parser for command line options.
9,280
def find_near_matches_no_deletions_ngrams(subsequence, sequence, search_params): if not subsequence: raise ValueError() max_substitutions, max_insertions, max_deletions, max_l_dist = search_params.unpacked max_substitutions = min(max_substitutions, max_l_dist) max_insertions = min(max_insertions, max_l_dist) subseq_len = len(subsequence) seq_len = len(sequence) ngram_len = subseq_len // (max_substitutions + max_insertions + 1) if ngram_len == 0: raise ValueError( "The subsequence's length must be greater than max_subs + max_ins!" ) matches = [] matched_indexes = set() for ngram_start in range(0, len(subsequence) - ngram_len + 1, ngram_len): ngram_end = ngram_start + ngram_len subseq_before = subsequence[:ngram_start] subseq_before_reversed = subseq_before[::-1] subseq_after = subsequence[ngram_end:] start_index = max(0, ngram_start - max_insertions) end_index = min(seq_len, seq_len - (subseq_len - ngram_end) + max_insertions) for index in search_exact( subsequence[ngram_start:ngram_end], sequence, start_index, end_index, ): if index - ngram_start in matched_indexes: continue seq_after = sequence[index + ngram_len:index + subseq_len - ngram_start + max_insertions] if seq_after.startswith(subseq_after): matches_after = [(0, 0)] else: matches_after = _expand(subseq_after, seq_after, max_substitutions, max_insertions, max_l_dist) if not matches_after: continue _max_substitutions = max_substitutions - min(m[0] for m in matches_after) _max_insertions = max_insertions - min(m[1] for m in matches_after) _max_l_dist = max_l_dist - min(m[0] + m[1] for m in matches_after) seq_before = sequence[index - ngram_start - _max_insertions:index] if seq_before.endswith(subseq_before): matches_before = [(0, 0)] else: matches_before = _expand( subseq_before_reversed, seq_before[::-1], _max_substitutions, _max_insertions, _max_l_dist, ) for (subs_before, ins_before) in matches_before: for (subs_after, ins_after) in matches_after: if ( subs_before + subs_after <= max_substitutions and ins_before + ins_after <= max_insertions and subs_before + subs_after + ins_before + ins_after <= max_l_dist ): matches.append(Match( start=index - ngram_start - ins_before, end=index - ngram_start + subseq_len + ins_after, dist=subs_before + subs_after + ins_before + ins_after, )) matched_indexes |= set(range( index - ngram_start - ins_before, index - ngram_start - ins_before + max_insertions + 1, )) return sorted(matches, key=lambda match: match.start)
search for near-matches of subsequence in sequence This searches for near-matches, where the nearly-matching parts of the sequence must meet the following limitations (relative to the subsequence): * the maximum allowed number of character substitutions * the maximum allowed number of new characters inserted * no deletions are allowed * the total number of substitutions, insertions and deletions
9,281
def taskGroupCreationRequested(self, *args, **kwargs): ref = { : , : , : [ { : , : False, : , }, { : False, : , }, { : False, : , }, ], : , } return self._makeTopicExchange(ref, *args, **kwargs)
tc-gh requested the Queue service to create all the tasks in a group supposed to signal that taskCreate API has been called for every task in the task group for this particular repo and this particular organization currently used for creating initial status indicators in GitHub UI using Statuses API. This particular exchange can also be bound to RabbitMQ queues by custom routes - for that, Pass in the array of routes as a second argument to the publish method. Currently, we do use the statuses routes to bind the handler that creates the initial status. This exchange outputs: ``v1/task-group-creation-requested.json#``This exchange takes the following keys: * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required) * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required) * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
9,282
def pretty_print(self): print colored.blue("-" * 40) print colored.red("datacats: problem was encountered:") print self.message print colored.blue("-" * 40)
Print the error message to stdout with colors and borders
9,283
def unix_time(self, dt): epoch = datetime.utcfromtimestamp(0) delta = dt - epoch return int(delta.total_seconds())
Returns the number of seconds since the UNIX epoch for the given datetime (dt). PARAMETERS: dt -- datetime
9,284
def ipv6_link_local(self, **kwargs): int_type = kwargs.pop().lower() ve_name = kwargs.pop() rbridge_id = kwargs.pop(, ) callback = kwargs.pop(, self._callback) valid_int_types = [, ] if int_type not in valid_int_types: raise ValueError( % repr(valid_int_types)) link_args = dict(name=ve_name, rbridge_id=rbridge_id, int_type=int_type) method_name = \ % int_type method_class = self._rbridge v6_link_local = getattr(method_class, method_name) config = v6_link_local(**link_args) if kwargs.pop(, False): output = callback(config, handler=) item = output.data.find() if item is not None: return True if kwargs.pop(, False): config.find().set(, ) return callback(config)
Configure ipv6 link local address on interfaces on vdx switches Args: int_type: Interface type on which the ipv6 link local needs to be configured. name: 'Ve' or 'loopback' interface name. rbridge_id (str): rbridge-id for device. get (bool): Get config instead of editing config. (True, False) delete (bool): True, delete the mac-learning. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `int_type`, `name` is not passed. ValueError: if `int_type`, `name` is invalid. Examples: >>> import pynos.device >>> conn = ('10.24.39.211', '22') >>> auth = ('admin', 'password') >>> with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.ipv6_link_local(name='500', ... int_type='ve',rbridge_id='1') ... output = dev.interface.ipv6_link_local(get=True,name='500', ... int_type='ve',rbridge_id='1') ... output = dev.interface.ipv6_link_local(delete=True, ... name='500', int_type='ve', rbridge_id='1')
9,285
def getIdent(self, node): ident = self.getRawIdent(node) if ident is not None: return ident node = self.findNode(node) if node is None: return None return node.graphident
Get the graph identifier for a node
9,286
def bbox_to_resolution(bbox, width, height): utm_bbox = to_utm_bbox(bbox) east1, north1 = utm_bbox.lower_left east2, north2 = utm_bbox.upper_right return abs(east2 - east1) / width, abs(north2 - north1) / height
Calculates pixel resolution in meters for a given bbox of a given width and height. :param bbox: bounding box :type bbox: geometry.BBox :param width: width of bounding box in pixels :type width: int :param height: height of bounding box in pixels :type height: int :return: resolution east-west at north and south, and resolution north-south in meters for given CRS :rtype: float, float :raises: ValueError if CRS is not supported
9,287
def mark_offer_as_lose(self, offer_id): return self._create_put_request( resource=OFFERS, billomat_id=offer_id, command=LOSE, )
Mark offer as lose :param offer_id: the offer id :return Response
9,288
def run(self): logger.debug("Starting execution of {0}{1}".format(self, " (backwards)" if self.backward_execution else "")) self.setup_run() try: concurrency_history_item = self.setup_forward_or_backward_execution() concurrency_queue = self.start_child_states(concurrency_history_item) finished_thread_id = concurrency_queue.get() finisher_state = self.states[finished_thread_id] finisher_state.join() if not self.backward_execution: for state_id, state in self.states.items(): state.recursively_preempt_states() for history_index, state in enumerate(self.states.values()): self.join_state(state, history_index, concurrency_history_item) self.add_state_execution_output_to_scoped_data(state.output_data, state) self.update_scoped_variables_with_output_dictionary(state.output_data, state) self.add_state_execution_output_to_scoped_data(finisher_state.output_data, finisher_state) self.update_scoped_variables_with_output_dictionary(finisher_state.output_data, finisher_state) if self.states[finished_thread_id].backward_execution: return self.finalize_backward_execution() else: self.backward_execution = False transition = self.get_transition_for_outcome(self.states[finished_thread_id], self.states[finished_thread_id].final_outcome) if transition is None: transition = self.handle_no_transition(self.states[finished_thread_id]) if transition is None: self.output_data["error"] = RuntimeError("state aborted") else: if in self.states[finished_thread_id].output_data: self.output_data["error"] = self.states[finished_thread_id].output_data[] self.final_outcome = self.outcomes[transition.to_outcome] return self.finalize_concurrency_state(self.final_outcome) except Exception as e: logger.error("{0} had an internal error: {1}\n{2}".format(self, str(e), str(traceback.format_exc()))) self.output_data["error"] = e self.state_execution_status = StateExecutionStatus.WAIT_FOR_NEXT_STATE return self.finalize(Outcome(-1, "aborted"))
This defines the sequence of actions that are taken when the preemptive concurrency state is executed :return:
9,289
def taskdir(self): return os.path.join(self.BASE, self.TAG, self.task_family)
Return the directory under which all artefacts are stored.
9,290
def get_required(self, name): locator = self._locate(name) if locator == None: raise ReferenceException(None, name) return self._references.get_required(locator)
Gets all required dependencies by their name. At least one dependency must be present. If no dependencies was found it throws a [[ReferenceException]] :param name: the dependency name to locate. :return: a list with found dependencies.
9,291
def get_grouped_opcodes(self, n=3): codes = self.get_opcodes() if not codes: codes = [("equal", 0, 1, 0, 1)] if codes[0][0] == : tag, i1, i2, j1, j2 = codes[0] codes[0] = tag, max(i1, i2-n), i2, max(j1, j2-n), j2 if codes[-1][0] == : tag, i1, i2, j1, j2 = codes[-1] codes[-1] = tag, i1, min(i2, i1+n), j1, min(j2, j1+n) nn = n + n group = [] for tag, i1, i2, j1, j2 in codes: if tag == and i2-i1 > nn: group.append((tag, i1, min(i2, i1+n), j1, min(j2, j1+n))) yield group group = [] i1, j1 = max(i1, i2-n), max(j1, j2-n) group.append((tag, i1, i2, j1 ,j2)) if group and not (len(group)==1 and group[0][0] == ): yield group
Isolate change clusters by eliminating ranges with no changes. Return a generator of groups with up to n lines of context. Each group is in the same format as returned by get_opcodes(). >>> from pprint import pprint >>> a = map(str, range(1,40)) >>> b = a[:] >>> b[8:8] = ['i'] # Make an insertion >>> b[20] += 'x' # Make a replacement >>> b[23:28] = [] # Make a deletion >>> b[30] += 'y' # Make another replacement >>> pprint(list(SequenceMatcher(None,a,b).get_grouped_opcodes())) [[('equal', 5, 8, 5, 8), ('insert', 8, 8, 8, 9), ('equal', 8, 11, 9, 12)], [('equal', 16, 19, 17, 20), ('replace', 19, 20, 20, 21), ('equal', 20, 22, 21, 23), ('delete', 22, 27, 23, 23), ('equal', 27, 30, 23, 26)], [('equal', 31, 34, 27, 30), ('replace', 34, 35, 30, 31), ('equal', 35, 38, 31, 34)]]
9,292
def get_managed( name, template, source, source_hash, source_hash_name, user, group, mode, attrs, saltenv, context, defaults, skip_verify=False, **kwargs): *{hash_type: , : <md5sum>}755 sfn = source_sum = {} def _get_local_file_source_sum(path): return {: get_hash(path, form=), : } if not sfn or cache_refetch: try: sfn = __salt__[]( source, saltenv, source_hash=source_sum.get()) except Exception as exc: _source = salt.utils.url.redact_http_basic_auth(source) return , {}, .format(_source, exc) if not sfn or not os.path.exists(sfn): _source = salt.utils.url.redact_http_basic_auth(source) return sfn, {}, {0}\.format(_source) if sfn == name: raise SaltInvocationError( ) if template: if template in salt.utils.templates.TEMPLATE_REGISTRY: context_dict = defaults if defaults else {} if context: context_dict = salt.utils.dictupdate.merge(context_dict, context) data = salt.utils.templates.TEMPLATE_REGISTRY[template]( sfn, name=name, source=source, user=user, group=group, mode=mode, attrs=attrs, saltenv=saltenv, context=context_dict, salt=__salt__, pillar=__pillar__, grains=__opts__[], opts=__opts__, **kwargs) else: return sfn, {}, ( ).format(template) if data[]: sfn = data[] hsum = get_hash(sfn, form=) source_sum = {: , : hsum} else: __clean_tmp(sfn) return sfn, {}, data[] return sfn, source_sum,
Return the managed file data for file.managed name location where the file lives on the server template template format source managed source file source_hash hash of the source file source_hash_name When ``source_hash`` refers to a remote file, this specifies the filename to look for in that file. .. versionadded:: 2016.3.5 user Owner of file group Group owner of file mode Permissions of file attrs Attributes of file .. versionadded:: 2018.3.0 context Variables to add to the template context defaults Default values of for context_dict skip_verify If ``True``, hash verification of remote file sources (``http://``, ``https://``, ``ftp://``) will be skipped, and the ``source_hash`` argument will be ignored. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None
9,293
def load(self, name): if self.reload: self._maybe_purge_cache() template = self.cache.get(name) if template: return template path = self.resolve(name) if not path: raise OSError(errno.ENOENT, "File not found: %s" % name) with codecs.open(path, , encoding=) as f: contents = f.read() mtime = os.fstat(f.fileno()).st_mtime template = self.load_string(contents, filename=path) template.mtime = mtime template.path = path self.cache[name] = template return template
If not yet in the cache, load the named template and compiles it, placing it into the cache. If in cache, return the cached template.
9,294
def rst2md(text): top_heading = re.compile(r, flags=re.M) text = re.sub(top_heading, r, text) math_eq = re.compile(r, flags=re.M) text = re.sub(math_eq, lambda match: r.format(match.group(1).strip()), text) inline_math = re.compile(r) text = re.sub(inline_math, r, text) return text
Converts the RST text from the examples docstrigs and comments into markdown text for the IPython notebooks
9,295
def field2choices(self, field, **kwargs): attributes = {} comparable = [ validator.comparable for validator in field.validators if hasattr(validator, "comparable") ] if comparable: attributes["enum"] = comparable else: choices = [ OrderedSet(validator.choices) for validator in field.validators if hasattr(validator, "choices") ] if choices: attributes["enum"] = list(functools.reduce(operator.and_, choices)) return attributes
Return the dictionary of OpenAPI field attributes for valid choices definition :param Field field: A marshmallow field. :rtype: dict
9,296
def md5sum(filename, blocksize=8192): with open(filename, ) as fh: m = hashlib.md5() while True: data = fh.read(blocksize) if not data: break m.update(data) return m.hexdigest()
Get the MD5 checksum of a file.
9,297
def init_default_config(self, path): if not (os.path.exists(path) and os.path.isfile(path)): raise AppConfigValueException( .format(path)) cfl = open(path, ) data = json.load(cfl) cfl.close() for key in data.keys(): if == key: self.application_name = data[key].lower() continue if == key: self.application_author = data[key].lower() continue if == key: self.application_version = data[key].lower() continue self._add_section_default(key, data[key])
Initialize the config object and load the default configuration. The path to the config file must be provided. The name of the application is read from the config file. The config file stores the description and the default values for all configurations including the application name. @param path: The path to the config config file.
9,298
def check(text): err = "airlinese.misc" msg = u" is airlinese." airlinese = [ "enplan(?:e|ed|ing|ement)", "deplan(?:e|ed|ing|ement)", "taking off momentarily", ] return existence_check(text, airlinese, err, msg)
Check the text.
9,299
def create_sqlite_backup_db(audit_tables): try: Popen("rm %s"%(config.get(, )), shell=True) logging.warn("Old sqlite backup DB removed") except Exception as e: logging.warn(e) try: aux_dir = config.get(, ) os.mkdir(aux_dir) logging.warn("%s created", aux_dir) except Exception as e: logging.warn(e) try: backup_dir = config.get(, ) os.mkdir(backup_dir) logging.warn("%s created", backup_dir) except Exception as e: logging.warn(e) db = create_engine(sqlite_engine, echo=True) db.connect() metadata = MetaData(db) for main_audit_table in audit_tables: cols = [] for c in main_audit_table.columns: col = c.copy() if col.type.python_type == Decimal: col.type = DECIMAL() cols.append(col) Table(main_audit_table.name, metadata, *cols, sqlite_autoincrement=True) metadata.create_all(db)
return an inspector object