Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
381,900
def offsets_for_max_size( max_size ): for i, max in enumerate( reversed( BIN_OFFSETS_MAX ) ): if max_size < max: break else: raise Exception( "%d is larger than the maximum possible size (%d)" % ( max_size, BIN_OFFSETS_MAX[0] ) ) return BIN_OFFSETS[ ( len(BIN_OFFSETS) - i - 1 ) : ]
Return the subset of offsets needed to contain intervals over (0,max_size)
381,901
def apply_filters(target, lines): filters = get_filters(target) if filters: for l in lines: if any(f in l for f in filters): yield l else: for l in lines: yield l
Applys filters to the lines of a datasource. This function is used only in integration tests. Filters are applied in an equivalent but more performant way at run time.
381,902
def _set_error_disable_timeout(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=error_disable_timeout.error_disable_timeout, is_container=, presence=False, yang_name="error-disable-timeout", rest_name="error-disable-timeout", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__error_disable_timeout = t if hasattr(self, ): self._set()
Setter method for error_disable_timeout, mapped from YANG variable /protocol/spanning_tree/rpvst/error_disable_timeout (container) If this variable is read-only (config: false) in the source YANG file, then _set_error_disable_timeout is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_error_disable_timeout() directly.
381,903
def start_listener_thread(self, timeout_ms=30000, exception_handler=None): try: thread = Thread(target=self.listen_forever, args=(timeout_ms, exception_handler)) thread.daemon = True self.sync_thread = thread self.should_listen = True thread.start() except RuntimeError: e = sys.exc_info()[0] logger.error("Error: unable to start thread. %s", str(e))
Start a listener thread to listen for events in the background. Args: timeout (int): How long to poll the Home Server for before retrying. exception_handler (func(exception)): Optional exception handler function which can be used to handle exceptions in the caller thread.
381,904
def _headers(self, name, is_file=False): value = self._files[name] if is_file else self._data[name] _boundary = self.boundary.encode("utf-8") if isinstance(self.boundary, unicode) else urllib.quote_plus(self.boundary) headers = ["--%s" % _boundary] if is_file: disposition = % (name, os.path.basename(value)) else: disposition = % name headers.append("Content-Disposition: %s" % disposition) if is_file: file_type = self._file_type(name) else: file_type = "text/plain; charset=utf-8" headers.append("Content-Type: %s" % file_type) if is_file: headers.append("Content-Length: %i" % self._file_size(name)) else: headers.append("Content-Length: %i" % len(value)) headers.append("") headers.append("") return "\r\n".join(headers)
Returns the header of the encoding of this parameter. Args: name (str): Field name Kwargs: is_file (bool): If true, this is a file field Returns: array. Headers
381,905
async def executor(func, *args, **kwargs): def syncfunc(): return func(*args, **kwargs) loop = asyncio.get_running_loop() return await loop.run_in_executor(None, syncfunc)
Execute a function in an executor thread. Args: todo ((func,args,kwargs)): A todo tuple.
381,906
def listlike(obj): return hasattr(obj, "__iter__") \ and not issubclass(type(obj), str)\ and not issubclass(type(obj), unicode)
Is an object iterable like a list (and not a string)?
381,907
def check_lazy_load_straat(f): def wrapper(*args): straat = args[0] if ( straat._namen is None or straat._metadata is None ): log.debug(, straat.id) straat.check_gateway() s = straat.gateway.get_straat_by_id(straat.id) straat._namen = s._namen straat._metadata = s._metadata return f(*args) return wrapper
Decorator function to lazy load a :class:`Straat`.
381,908
def alert_statuses(self, alert_statuses): if alert_statuses is None: raise ValueError("Invalid value for `alert_statuses`, must not be `None`") allowed_values = ["VISIBLE", "HIDDEN", "NOT_LOADED"] if not set(alert_statuses.keys()).issubset(set(allowed_values)): raise ValueError( "Invalid keys in `alert_statuses` [{0}], must be a subset of [{1}]" .format(", ".join(map(str, set(alert_statuses.keys()) - set(allowed_values))), ", ".join(map(str, allowed_values))) ) self._alert_statuses = alert_statuses
Sets the alert_statuses of this IntegrationStatus. A Map from the ids of the alerts contained in this integration to their install status. The install status can take on one of three values, `VISIBLE`, `HIDDEN`, and `NOT_LOADED` # noqa: E501 :param alert_statuses: The alert_statuses of this IntegrationStatus. # noqa: E501 :type: dict(str, str)
381,909
def get_cursor_position(self): in_stream = self.in_stream query_cursor_position = u"\x1b[6n" self.write(query_cursor_position) def retrying_read(): while True: try: c = in_stream.read(1) if c == : raise ValueError("Stream should be blocking - shouldcursor get pos response read interrupted(?P<extra>.*)(?P<CSI>\x1b\[|\x9b)(?P<row>\\d+);(?P<column>\\d+)Rrowcolumnextra'] if extra: if self.extra_bytes_callback: self.extra_bytes_callback( extra.encode(in_stream.encoding) ) else: raise ValueError(("Bytes preceding cursor position " "query response thrown out:\n%r\n" "Pass an extra_bytes_callback to " "CursorAwareWindow to prevent this") % (extra,)) return (row - 1, col - 1)
Returns the terminal (row, column) of the cursor 0-indexed, like blessings cursor positions
381,910
def pos_tag(self): self._require_tokens() self._require_no_ngrams_as_tokens() self._invalidate_workers_tokens() logger.info() self._send_task_to_workers() self.pos_tagged = True return self
Apply Part-of-Speech (POS) tagging on each token. Uses the default NLTK tagger if no language-specific tagger could be loaded (English is assumed then as language). The default NLTK tagger uses Penn Treebank tagset (https://ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html). The default German tagger based on TIGER corpus uses the STTS tagset (http://www.ims.uni-stuttgart.de/forschung/ressourcen/lexika/TagSets/stts-table.html).
381,911
def format_installed_dap_list(simple=False): lines = [] if simple: for pkg in sorted(get_installed_daps()): lines.append(pkg) else: for pkg, instances in sorted(get_installed_daps_detailed().items()): versions = [] for instance in instances: location = utils.unexpanduser(instance[]) version = instance[] if not versions: version = utils.bold(version) versions.append(.format(v=version, p=location)) pkg = utils.bold(pkg) lines.append(.format(pkg=pkg, versions=.join(versions))) return lines
Formats all installed DAPs in a human readable form to list of lines
381,912
def get_series_episode(series_id, season, episode): result = tvdb_client.query_series_episodes(series_id, aired_season=season, aired_episode=episode) if result: return tvdb_client.get_episode(result[][0][])
Get an episode of a series. :param int series_id: id of the series. :param int season: season number of the episode. :param int episode: episode number of the episode. :return: the episode data. :rtype: dict
381,913
def close(self): if not self._process: return if self._process.returncode is not None: return _logger.debug() try: self._process.terminate() except OSError as error: if error.errno != errno.ESRCH: raise for dummy in range(10): if self._process.returncode is not None: return time.sleep(0.05) _logger.debug() try: self._process.kill() except OSError as error: if error.errno != errno.ESRCH: raise
Terminate or kill the subprocess. This function is blocking.
381,914
def _decimal_to_xsd_format(value): value = XDecimal._decimal_canonical(value) negative, digits, exponent = value.as_tuple() assert digits assert digits[0] != 0 or len(digits) == 1 result = [] if negative: result.append("-") if exponent >= 0: result.extend(str(x) for x in digits) result.extend("0" * exponent) return "".join(result) digit_count = len(digits) point_offset = digit_count + exponent fractional_digit_count = min(digit_count, -exponent) while fractional_digit_count and digits[digit_count - 1] == 0: digit_count -= 1 fractional_digit_count -= 1 if point_offset <= 0: result.append("0") if digit_count > 0: result.append(".") result.append("0" * -point_offset) result.extend(str(x) for x in digits[:digit_count]) else: result.extend(str(x) for x in digits[:point_offset]) if point_offset < digit_count: result.append(".") result.extend(str(x) for x in digits[point_offset:digit_count]) return "".join(result)
Converts a decimal.Decimal value to its XSD decimal type value. Result is a string containing the XSD decimal type's lexical value representation. The conversion is done without any precision loss. Note that Python's native decimal.Decimal string representation will not do here as the lexical representation desired here does not allow representing decimal values using float-like `<mantissa>E<exponent>' format, e.g. 12E+30 or 0.10006E-12.
381,915
def get_policies(self): prefix = _IDENTITY_NS + _POLICY_NS policylist_list = [ _create_from_bytes(d, identity_pb2.PolicyList) for _, d in self._state_view.leaves(prefix=prefix) ] policies = [] for policy_list in policylist_list: for policy in policy_list.policies: policies.append(policy) return sorted(policies, key=lambda p: p.name)
Returns all the Policies under the Identity namespace. Returns: (list): A list containing all the Policies under the Identity namespace.
381,916
def parse(self,fileName,offset): p = Parser() p.file = open(fileName, ) a = p.parse_synset(offset=offset) p.file.close() self.__dict__.update(a.__dict__)
Parses synset from file <fileName> from offset <offset>
381,917
def isorbit_record(self): import re test = re.match(, self.targetname.strip()) is not None return test
`True` if `targetname` appears to be a comet orbit record number. NAIF record numbers are 6 digits, begin with a '9' and can change at any time.
381,918
def match_and(self, tokens, item): for match in tokens: self.match(match, item)
Matches and.
381,919
def replace_uri(rdf, fromuri, touri): replace_subject(rdf, fromuri, touri) replace_predicate(rdf, fromuri, touri) replace_object(rdf, fromuri, touri)
Replace all occurrences of fromuri with touri in the given model. If touri is a list or tuple of URIRef, all values will be inserted. If touri=None, will delete all occurrences of fromuri instead.
381,920
def read_validate_params(self, request): self.client = self.client_authenticator.by_identifier_secret(request) self.password = request.post_param("password") self.username = request.post_param("username") self.scope_handler.parse(request=request, source="body") return True
Checks if all incoming parameters meet the expected values.
381,921
def connect_ssl(cls, user, password, endpoints, ca_certs=None, validate=None): if isinstance(endpoints, basestring): endpoints = [endpoints] transport = SingleEndpointTransport( SocketTransport.connect_ssl, endpoints, ca_certs=ca_certs, validate=validate) return cls(transport, user, password)
Creates an SSL transport to the first endpoint (aserver) to which we successfully connect
381,922
def addcols(X, cols, names=None): if isinstance(names,str): names = [n.strip() for n in names.split()] if isinstance(cols, list): if any([isinstance(x,np.ndarray) or isinstance(x,list) or \ isinstance(x,tuple) for x in cols]): assert all([len(x) == len(X) for x in cols]), \ assert names != None and len(cols) == len(names), \ cols = utils.fromarrays(cols,type=np.ndarray,names = names) else: assert len(cols) == len(X), cols = utils.fromarrays([cols], type=np.ndarray,names=names) else: assert isinstance(cols, np.ndarray) if cols.dtype.names == None: cols = utils.fromarrays([cols],type=np.ndarray, names=names) Replacements = [a for a in cols.dtype.names if a in X.dtype.names] if len(Replacements) > 0: print(, [a for a in cols.dtype.names if a in X.dtype.names]) return utils.fromarrays( [X[a] if a not in cols.dtype.names else cols[a] for a in X.dtype.names] + [cols[a] for a in cols.dtype.names if a not in X.dtype.names], type=np.ndarray, names=list(X.dtype.names) + [a for a in cols.dtype.names if a not in X.dtype.names])
Add one or more columns to a numpy ndarray. Technical dependency of :func:`tabular.spreadsheet.aggregate_in`. Implemented by the tabarray method :func:`tabular.tab.tabarray.addcols`. **Parameters** **X** : numpy ndarray with structured dtype or recarray The recarray to add columns to. **cols** : numpy ndarray, or list of arrays of columns Column(s) to add. **names**: list of strings, optional Names of the new columns. Only applicable when `cols` is a list of arrays. **Returns** **out** : numpy ndarray with structured dtype New numpy array made up of `X` plus the new columns. **See also:** :func:`tabular.spreadsheet.colstack`
381,923
def power(self): power = self._state[] return PowerUsage(power.get(), power.get(), power.get(), power.get(), power.get(), power.get(), power.get(), power.get(), power.get())
:return: A power object modeled as a named tuple
381,924
def build_backend(self, conn_string): backend_name, _ = conn_string.split(, 1) backend_path = .format(backend_name) client_class = import_attr(backend_path, ) return client_class(conn_string)
Given a DSN, returns an instantiated backend class. Ex:: backend = gator.build_backend('locmem://') # ...or... backend = gator.build_backend('redis://127.0.0.1:6379/0') :param conn_string: A DSN for connecting to the queue. Passed along to the backend. :type conn_string: string :returns: A backend ``Client`` instance
381,925
def update_enterprise_courses(self, enterprise_customer, course_container_key=, **kwargs): enterprise_context = { : enterprise_customer and enterprise_customer.identity_provider, : enterprise_customer and str(enterprise_customer.uuid), } enterprise_context.update(**kwargs) courses = [] for course in self.data[course_container_key]: courses.append( self.update_course(course, enterprise_customer, enterprise_context) ) self.data[course_container_key] = courses
This method adds enterprise-specific metadata for each course. We are adding following field in all the courses. tpa_hint: a string for identifying Identity Provider. enterprise_id: the UUID of the enterprise **kwargs: any additional data one would like to add on a per-use basis. Arguments: enterprise_customer: The customer whose data will be used to fill the enterprise context. course_container_key: The key used to find the container for courses in the serializer's data dictionary.
381,926
def add_tab(self, widget): self.clients.append(widget) index = self.tabwidget.addTab(widget, widget.get_short_name()) self.tabwidget.setCurrentIndex(index) self.tabwidget.setTabToolTip(index, widget.get_filename()) if self.dockwidget and not self.ismaximized: self.dockwidget.setVisible(True) self.dockwidget.raise_() self.activateWindow() widget.notebookwidget.setFocus()
Add tab.
381,927
async def finalize_websocket( self, result: ResponseReturnValue, websocket_context: Optional[WebsocketContext]=None, from_error_handler: bool=False, ) -> Optional[Response]: if result is not None: response = await self.make_response(result) else: response = None try: response = await self.postprocess_websocket(response, websocket_context) await websocket_finished.send(self, response=response) except Exception: if not from_error_handler: raise self.logger.exception() return response
Turns the view response return value into a response. Arguments: result: The result of the websocket to finalize into a response. websocket_context: The websocket context, optional as Flask omits this argument.
381,928
def do_query(self, line): table, line = self.get_table_params(line) args = self.getargs(line) condition = None count = False as_array = False max_size = None batch_size = None start = None if in args: asc = False args.remove() else: asc = True while args: arg = args[0] if arg[0] == and arg[1:].isdigit(): max_size = int(arg[1:]) args.pop(0) elif args[0].startswith(): arg = args.pop(0) max_size = int(arg[6:]) elif arg in [, ]: count = True args.pop(0) elif arg in [, ]: as_array = True args.pop(0) elif args[0].startswith(): arg = args.pop(0) batch_size = int(arg[8:]) elif args[0].startswith(): arg = args.pop(0) start = (arg[8:], ) elif args[0] == : arg = args.pop(0) if self.next_key: start = self.next_key else: print "no next" return elif arg.startswith("--begin="): condition = BEGINS_WITH(self.get_typed_key_value(table, arg[8:], False)) args.pop(0) elif arg.startswith("--eq="): condition = EQ(self.get_typed_key_value(table, arg[5:], False)) args.pop(0) elif arg.startswith("--ne="): condition = NE(self.get_typed_key_value(table, arg[5:], False)) args.pop(0) elif arg.startswith("--le="): condition = LE(self.get_typed_key_value(table, arg[5:], False)) args.pop(0) elif arg.startswith("--lt="): condition = LT(self.get_typed_key_value(table, arg[5:], False)) args.pop(0) elif arg.startswith("--ge="): condition = GE(self.get_typed_key_value(table, arg[5:], False)) args.pop(0) elif arg.startswith("--gt="): condition = GT(self.get_typed_key_value(table, arg[5:], False)) args.pop(0) elif arg == "--exists": condition = NOT_NULL() args.pop(0) elif arg == "--nexists": condition = NULL() args.pop(0) elif arg.startswith("--contains="): condition = CONTAINS(self.get_typed_key_value(table, arg[11:], False)) args.pop(0) elif arg.startswith("--between="): parts = arg[10:].split(",", 1) condition = BETWEEN(self.get_typed_key_value(table, parts[0], True), self.get_typed_key_value(table, parts[1], False)) args.pop(0) else: break hkey = self.get_typed_key_value(table, args.pop(0)) attr_keys = args[0].split(",") if args else None attrs = list(set(attr_keys)) if attr_keys else None result = table.query(hkey, range_key_condition=condition, attributes_to_get=attrs, scan_index_forward=asc, request_limit=batch_size, max_results=max_size, count=count, exclusive_start_key=start) if count: print "count: %s/%s" % (result.scanned_count, result.count) self.next_key = None else: if as_array and attr_keys: self.print_iterator_array(result, attr_keys) else: self.print_iterator(result) self.next_key = result.last_evaluated_key if self.consumed: print "consumed units:", result.consumed_units
query [:tablename] [-r] [--count|-c] [--array|-a] [-{max}] [{rkey-condition}] hkey [attributes,...] where rkey-condition: --eq={key} (equal key) --ne={key} (not equal key) --le={key} (less or equal than key) --lt={key} (less than key) --ge={key} (greater or equal than key) --gt={key} (greater than key) --exists (key exists) --nexists (key does not exists) --contains={key} (contains key) --ncontains={key} (does not contains key) --begin={startkey} (rkey begins with startkey) --between={firstkey},{lastkey} (between firstkey and lastkey)
381,929
def profile(script, argv, profiler_factory, pickle_protocol, dump_filename, mono): filename, code, globals_ = script sys.argv[:] = [filename] + list(argv) __profile__(filename, code, globals_, profiler_factory, pickle_protocol=pickle_protocol, dump_filename=dump_filename, mono=mono)
Profile a Python script.
381,930
def minimize(f, start=None, smooth=False, log=None, array=False, **vargs): if start is None: assert not array, "Please pass starting values explicitly when array=True" arg_count = f.__code__.co_argcount assert arg_count > 0, "Please pass starting values explicitly for variadic functions" start = [0] * arg_count if not hasattr(start, ): start = [start] if array: objective = f else: @functools.wraps(f) def objective(args): return f(*args) if not smooth and not in vargs: vargs[] = result = optimize.minimize(objective, start, **vargs) if log is not None: log(result) if len(start) == 1: return result.x.item(0) else: return result.x
Minimize a function f of one or more arguments. Args: f: A function that takes numbers and returns a number start: A starting value or list of starting values smooth: Whether to assume that f is smooth and use first-order info log: Logging function called on the result of optimization (e.g. print) vargs: Other named arguments passed to scipy.optimize.minimize Returns either: (a) the minimizing argument of a one-argument function (b) an array of minimizing arguments of a multi-argument function
381,931
def _to_dict(self, node, fast_access=True, short_names=False, nested=False, copy=True, with_links=True): if (fast_access or short_names or nested) and not copy: raise ValueError( ) if nested and short_names: raise ValueError( ) return result_dict
Returns a dictionary with pairings of (full) names as keys and instances as values. :param fast_access: If true parameter or result values are returned instead of the instances. :param short_names: If true keys are not full names but only the names. Raises a ValueError if the names are not unique. :param nested: If true returns a nested dictionary. :param with_links: If links should be considered :return: dictionary :raises: ValueError
381,932
def home(): return dict(links=dict(api=.format(request.url, PREFIX[1:]))), \ HTTPStatus.OK
Temporary helper function to link to the API routes
381,933
def build_routename(cls, name, routename_prefix=None): if routename_prefix is None: routename_prefix = .format( cls.__name__.replace(, ).lower() ) routename_prefix = routename_prefix.rstrip() return .join([routename_prefix, name])
Given a ``name`` & an optional ``routename_prefix``, this generates a name for a URL. :param name: The name for the URL (ex. 'detail') :type name: string :param routename_prefix: (Optional) A prefix for the URL's name (for resolving). The default is ``None``, which will autocreate a prefix based on the class name. Ex: ``BlogPostResource`` -> ``api_blogpost_list`` :type routename_prefix: string :returns: The final name :rtype: string
381,934
def custom_indicator_class_factory(indicator_type, base_class, class_dict, value_fields): value_count = len(value_fields) def init_1(self, tcex, value1, xid, **kwargs): summary = self.build_summary(value1) base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs) for k, v in class_dict.items(): setattr(self, k, v) def init_2(self, tcex, value1, value2, xid, **kwargs): summary = self.build_summary(value1, value2) base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs) for k, v in class_dict.items(): setattr(self, k, v) def init_3(self, tcex, value1, value2, value3, xid, **kwargs): summary = self.build_summary(value1, value2, value3) base_class.__init__(self, tcex, indicator_type, summary, xid, **kwargs) for k, v in class_dict.items(): setattr(self, k, v) class_name = indicator_type.replace(, ) init_method = locals()[.format(value_count)] newclass = type(str(class_name), (base_class,), {: init_method}) return newclass
Internal method for dynamically building Custom Indicator Class.
381,935
def metar(wxdata: MetarData, units: Units) -> MetarTrans: translations = shared(wxdata, units) translations[] = wind(wxdata.wind_direction, wxdata.wind_speed, wxdata.wind_gust, wxdata.wind_variable_direction, units.wind_speed) translations[] = temperature(wxdata.temperature, units.temperature) translations[] = temperature(wxdata.dewpoint, units.temperature) translations[] = remarks.translate(wxdata.remarks) return MetarTrans(**translations)
Translate the results of metar.parse Keys: Wind, Visibility, Clouds, Temperature, Dewpoint, Altimeter, Other
381,936
def fdr(pvals, alpha=0.05, method=): assert method.lower() in [, ] pvals = np.asarray(pvals) shape_init = pvals.shape pvals = pvals.ravel() num_nan = np.isnan(pvals).sum() pvals_sortind = np.argsort(pvals) pvals_sorted = pvals[pvals_sortind] sortrevind = pvals_sortind.argsort() ntests = pvals.size - num_nan ecdffactor = np.arange(1, ntests + 1) / float(ntests) if method.lower() == : cm = np.sum(1. / np.arange(1, ntests + 1)) ecdffactor /= cm pvals_corr = np.diag(pvals_sorted / ecdffactor[..., None]) pvals_corr = np.minimum.accumulate(pvals_corr[::-1])[::-1] pvals_corr = np.clip(pvals_corr, None, 1) pvals_corr = np.append(pvals_corr, np.full(num_nan, np.nan)) pvals_corrected = pvals_corr[sortrevind].reshape(shape_init) with np.errstate(invalid=): reject = np.less(pvals_corrected, alpha) return reject, pvals_corrected
P-values FDR correction with Benjamini/Hochberg and Benjamini/Yekutieli procedure. This covers Benjamini/Hochberg for independent or positively correlated and Benjamini/Yekutieli for general or negatively correlated tests. Parameters ---------- pvals : array_like Array of p-values of the individual tests. alpha : float Error rate (= alpha level). method : str FDR correction methods :: 'fdr_bh' : Benjamini/Hochberg for independent / posit correlated tests 'fdr_by' : Benjamini/Yekutieli for negatively correlated tests Returns ------- reject : array, bool True if a hypothesis is rejected, False if not pval_corrected : array P-values adjusted for multiple hypothesis testing using the BH or BY correction. See also -------- bonf : Bonferroni correction holm : Holm-Bonferroni correction Notes ----- From Wikipedia: The **Benjamini–Hochberg** procedure (BH step-up procedure) controls the false discovery rate (FDR) at level :math:`\\alpha`. It works as follows: 1. For a given :math:`\\alpha`, find the largest :math:`k` such that :math:`P_{(k)}\\leq \\frac {k}{m}\\alpha.` 2. Reject the null hypothesis (i.e., declare discoveries) for all :math:`H_{(i)}` for :math:`i = 1, \\ldots, k`. The BH procedure is valid when the m tests are independent, and also in various scenarios of dependence, but is not universally valid. The **Benjamini–Yekutieli** procedure (BY) controls the FDR under arbitrary dependence assumptions. This refinement modifies the threshold and finds the largest :math:`k` such that: .. math:: P_{(k)} \\leq \\frac{k}{m \\cdot c(m)} \\alpha References ---------- - Benjamini, Y., and Hochberg, Y. (1995). Controlling the false discovery rate: a practical and powerful approach to multiple testing. Journal of the Royal Statistical Society Series B, 57, 289–300. - Benjamini, Y., and Yekutieli, D. (2001). The control of the false discovery rate in multiple testing under dependency. Annals of Statistics, 29, 1165–1188. - https://en.wikipedia.org/wiki/False_discovery_rate Examples -------- FDR correction of an array of p-values >>> from pingouin import fdr >>> pvals = [.50, .003, .32, .054, .0003] >>> reject, pvals_corr = fdr(pvals, alpha=.05) >>> print(reject, pvals_corr) [False True False False True] [0.5 0.0075 0.4 0.09 0.0015]
381,937
def preprocess_legislation(legislation_json): import os import pkg_resources import pandas as pd default_config_files_directory = os.path.join( pkg_resources.get_distribution().location) prix_annuel_carburants = pd.read_csv( os.path.join( default_config_files_directory, , , , ), sep = ) prix_annuel_carburants[] = prix_annuel_carburants[].astype(int) prix_annuel_carburants = prix_annuel_carburants.set_index() all_values = {} prix_carburants = { "@type": "Node", "description": "prix des carburants en euros par hectolitre", "children": {}, } prix_annuel = prix_annuel_carburants[] all_values[] = [] for year in range(1990, 2009): values1 = dict() values1[] = u.format(year) values1[] = u.format(year) values1[] = prix_annuel.loc[year] * 100 all_values[].append(values1) prix_annuel = prix_annuel_carburants[] for year in range(2009, 2013): values2 = dict() values2[] = u.format(year) values2[] = u.format(year) values2[] = prix_annuel.loc[year] * 100 all_values[].append(values2) prix_annuel = prix_annuel_carburants[] for year in range(2013, 2015): values3 = dict() values3[] = u.format(year) values3[] = u.format(year) values3[] = prix_annuel.loc[year] * 100 all_values[].append(values3) prix_carburants[][] = { "@type": "Parameter", "description": .replace(, ), "format": "float", "values": all_values[] } for element in [, , , , , , , , , , ]: assert element in prix_annuel_carburants.columns prix_annuel = prix_annuel_carburants[element] all_values[element] = [] for year in range(1990, 2015): values = dict() values[] = u.format(year) values[] = u.format(year) values[] = prix_annuel.loc[year] * 100 all_values[element].append(values) prix_carburants[][element] = { "@type": "Parameter", "description": element.replace(, ), "format": "float", "values": all_values[element] } legislation_json[][][][] = prix_carburants default_config_files_directory = os.path.join( pkg_resources.get_distribution().location) parc_annuel_moyen_vp = pd.read_csv( os.path.join( default_config_files_directory, , , , ), sep = ) parc_annuel_moyen_vp = parc_annuel_moyen_vp.set_index() values_parc = {} parc_vp = { "@type": "Node", "description": "taille moyenne du parc automobile en France métropolitaine en milliers de véhicules", "children": {}, } for element in [, ]: taille_parc = parc_annuel_moyen_vp[element] values_parc[element] = [] for year in range(1990, 2014): values = dict() values[] = u.format(year) values[] = u.format(year) values[] = taille_parc.loc[year] values_parc[element].append(values) parc_vp[][element] = { "@type": "Parameter", "description": "nombre de véhicules particuliers immatriculés en France à motorisation " + element, "format": "float", "values": values_parc[element] } legislation_json[][][][] = parc_vp default_config_files_directory = os.path.join( pkg_resources.get_distribution().location) quantite_carbu_vp_france = pd.read_csv( os.path.join( default_config_files_directory, , , , ), sep = ) quantite_carbu_vp_france = quantite_carbu_vp_france.set_index() values_quantite = {} quantite_carbu_vp = { "@type": "Node", "description": "quantite de carburants consommés en France métropolitaine", "children": {}, } for element in [, ]: quantite_carburants = quantite_carbu_vp_france[element] values_quantite[element] = [] for year in range(1990, 2014): values = dict() values[] = u.format(year) values[] = u.format(year) values[] = quantite_carburants.loc[year] values_quantite[element].append(values) quantite_carbu_vp[][element] = { "@type": "Parameter", "description": "consommation totale de " + element + " en France", "format": "float", "values": values_quantite[element] } legislation_json[][][][] = quantite_carbu_vp default_config_files_directory = os.path.join( pkg_resources.get_distribution().location) part_des_types_de_supercarburants = pd.read_csv( os.path.join( default_config_files_directory, , , ), sep = ) del part_des_types_de_supercarburants[] part_des_types_de_supercarburants = \ part_des_types_de_supercarburants[part_des_types_de_supercarburants[] > 0].copy() part_des_types_de_supercarburants[] = part_des_types_de_supercarburants[].astype(int) part_des_types_de_supercarburants = part_des_types_de_supercarburants.set_index() cols = part_des_types_de_supercarburants.columns for element in cols: part_des_types_de_supercarburants[element] = ( part_des_types_de_supercarburants[element] / (part_des_types_de_supercarburants[] - part_des_types_de_supercarburants[]) ) del part_des_types_de_supercarburants[] del part_des_types_de_supercarburants[] cols = part_des_types_de_supercarburants.columns part_des_types_de_supercarburants[] = 0 for element in cols: part_des_types_de_supercarburants[] += part_des_types_de_supercarburants[element] assert (part_des_types_de_supercarburants[] == 1).any(), "The weighting of the shares did not work" values_part_supercarburants = {} part_type_supercaburant = { "@type": "Node", "description": "part de la consommation totale dsuper_plombesp_95sp_98sp_e10start{}-01-01stop{}-12-31valuechildrenessences", "format": "float", "values": values_part_supercarburants[element] } legislation_json[][][][] = \ part_type_supercaburant alcool_conso_et_vin = { "@type": "Node", "description": "alcools", "children": {}, } alcool_conso_et_vin[][] = { "@type": "Node", "description": "Pour calculer le taux de taxation implicite sur le vin", "children": { "droit_cn_vin": { "@type": "Parameter", "description": u"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 129}, {: u, : u, : 130}, {: u, : u, : 129}, {: u, : u, : 132}, {: u, : u, : 133}, {: u, : u, : 127}, {: u, : u, : 127}, {: u, : u, : 127}, {: u, : u, : 127}, {: u, : u, : 125}, {: u, : u, : 117}, {: u, : u, : 119}, {: u, : u, : 117}, {: u, : u, : 114}, {: u, : u, : 117}, {: u, : u, : 119}, {: u, : u, : 118}, {: u, : u, : 120}, {: u, : u, : 122}, ], }, "masse_conso_cn_vin": { "@type": "Parameter", "description": u"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 7191}, {: u, : u, : 7419}, {: u, : u, : 7636}, {: u, : u, : 8025}, {: u, : u, : 8451}, {: u, : u, : 8854}, {: u, : u, : 9168}, {: u, : u, : 9476}, {: u, : u, : 9695}, {: u, : u, : 9985}, {: u, : u, : 9933}, {: u, : u, : 10002}, {: u, : u, : 10345}, {: u, : u, : 10461}, {: u, : u, : 10728}, {: u, : u, : 11002}, {: u, : u, : 11387}, {: u, : u, : 11407}, {: u, : u, : 11515}, ], }, }, } alcool_conso_et_vin[][] = { "@type": "Node", "description": "Pour calculer le taux de taxation implicite sur la bière", "children": { "droit_cn_biere": { "@type": "Parameter", "description": "Masse droit biere selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 361}, {: u, : u, : 366}, {: u, : u, : 364}, {: u, : u, : 365}, {: u, : u, : 380}, {: u, : u, : 359}, {: u, : u, : 364}, {: u, : u, : 361}, {: u, : u, : 370}, {: u, : u, : 378}, {: u, : u, : 364}, {: u, : u, : 396}, {: u, : u, : 382}, {: u, : u, : 375}, {: u, : u, : 376}, {: u, : u, : 375}, {: u, : u, : 393}, {: u, : u, : 783}, {: u, : u, : 897}, ], }, "masse_conso_cn_biere": { "@type": "Parameter", "description": u"Masse consommation biere selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 2111}, {: u, : u, : 2144}, {: u, : u, : 2186}, {: u, : u, : 2291}, {: u, : u, : 2334}, {: u, : u, : 2290}, {: u, : u, : 2327}, {: u, : u, : 2405}, {: u, : u, : 2554}, {: u, : u, : 2484}, {: u, : u, : 2466}, {: u, : u, : 2486}, {: u, : u, : 2458}, {: u, : u, : 2287}, {: u, : u, : 2375}, {: u, : u, : 2461}, {: u, : u, : 2769}, {: u, : u, : 2868}, {: u, : u, : 3321}, ], }, }, } alcool_conso_et_vin[][] = { "@type": "Node", "description": "Pour calculer le taux de taxation implicite sur alcools forts", "children": { "droit_cn_alcools": { "@type": "Parameter", "description": "Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort", "format": "float", "values": [ {: u, : u, : 1872}, {: u, : u, : 1957}, {: u, : u, : 1932}, {: u, : u, : 1891}, {: u, : u, : 1908}, {: u, : u, : 1842}, {: u, : u, : 1954}, {: u, : u, : 1990}, {: u, : u, : 2005}, {: u, : u, : 2031}, {: u, : u, : 2111}, {: u, : u, : 2150}, {: u, : u, : 2225}, ], }, "droit_cn_alcools_total": { "@type": "Parameter", "description": u"Masse droit alcool selon comptabilité nationale avec les differents droits", "format": "float", "values": [ {: u, : u, : 2337}, {: u, : u, : 2350}, {: u, : u, : 2366}, {: u, : u, : 2369}, {: u, : u, : 2385}, {: u, : u, : 2416}, {: u, : u, : 2514}, {: u, : u, : 2503}, {: u, : u, : 2453}, {: u, : u, : 2409}, {: u, : u, : 2352}, {: u, : u, : 2477}, {: u, : u, : 2516}, {: u, : u, : 2528}, {: u, : u, : 2629}, {: u, : u, : 2734}, {: u, : u, : 3078}, {: u, : u, : 2718}, {: u, : u, : 3022}, ], }, "masse_conso_cn_alcools": { "@type": "Parameter", "description": u"Masse consommation alcool selon comptabilité nationale", "format": "float", "values": [ {: u, : u, : 4893}, {: u, : u, : 5075}, {: u, : u, : 5065}, {: u, : u, : 5123}, {: u, : u, : 5234}, {: u, : u, : 5558}, {: u, : u, : 5721}, {: u, : u, : 5932}, {: u, : u, : 5895}, {: u, : u, : 5967}, {: u, : u, : 5960}, {: u, : u, : 6106}, {: u, : u, : 6142}, {: u, : u, : 6147}, {: u, : u, : 6342}, {: u, : u, : 6618}, {: u, : u, : 6680}, {: u, : u, : 6996}, {: u, : u, : 7022}, ], }, }, } legislation_json[][][][] = alcool_conso_et_vin keys_ticpe = legislation_json[][][][][].keys() for element in keys_ticpe: get_values = \ legislation_json[][][][][][element][] for each_value in get_values: get_character = .format(each_value[]) year = int(get_character[:4]) if year < 2002: each_value[] = each_value[] / 6.55957 else: each_value[] = each_value[] return legislation_json
Preprocess the legislation parameters to add prices and amounts from national accounts
381,938
def get_parameter_p_value_too_high_warning( model_type, model_params, parameter, p_value, maximum_p_value ): warnings = [] if p_value > maximum_p_value: data = { "{}_p_value".format(parameter): p_value, "{}_maximum_p_value".format(parameter): maximum_p_value, } data.update(model_params) warnings.append( EEMeterWarning( qualified_name=( "eemeter.caltrack_daily.{model_type}.{parameter}_p_value_too_high".format( model_type=model_type, parameter=parameter ) ), description=( "Model fit {parameter} p-value is too high. Candidate model rejected.".format( parameter=parameter ) ), data=data, ) ) return warnings
Return an empty list or a single warning wrapped in a list indicating whether model parameter p-value is too high. Parameters ---------- model_type : :any:`str` Model type (e.g., ``'cdd_hdd'``). model_params : :any:`dict` Parameters as stored in :any:`eemeter.CalTRACKUsagePerDayCandidateModel.model_params`. parameter : :any:`str` The name of the parameter, e.g., ``'intercept'``. p_value : :any:`float` The p-value of the parameter. maximum_p_value : :any:`float` The maximum allowable p-value of the parameter. Returns ------- warnings : :any:`list` of :any:`eemeter.EEMeterWarning` Empty list or list of single warning.
381,939
def statuses(self): r_json = self._get_json() statuses = [Status(self._options, self._session, raw_stat_json) for raw_stat_json in r_json] return statuses
Get a list of status Resources from the server. :rtype: List[Status]
381,940
def append_seeding_annotation(self, annotation: str, values: Set[str]) -> Seeding: return self.seeding.append_annotation(annotation, values)
Add a seed induction method for single annotation's values. :param annotation: The annotation to filter by :param values: The values of the annotation to keep
381,941
def p_file_type_1(self, p): try: self.builder.set_file_type(self.document, p[2]) except OrderError: self.order_error(, , p.lineno(1)) except CardinalityError: self.more_than_one_error(, p.lineno(1))
file_type : FILE_TYPE file_type_value
381,942
def process_m2m_through_save(self, obj, created=False, **kwargs): if not created: return self._process_m2m_through(obj, )
Process M2M post save for custom through model.
381,943
def setAndUpdateValues(self,solution_next,IncomeDstn,LivPrb,DiscFac): s marginal value function (etc), the probability of getting the worst income shock next period, the patience factor, human wealth, and the bounding MPCs. Parameters ---------- solution_next : ConsumerSolution The solution to next period self.DiscFacEff = DiscFac*LivPrb self.ShkPrbsNext = IncomeDstn[0] self.PermShkValsNext = IncomeDstn[1] self.TranShkValsNext = IncomeDstn[2] self.PermShkMinNext = np.min(self.PermShkValsNext) self.TranShkMinNext = np.min(self.TranShkValsNext) self.vPfuncNext = solution_next.vPfunc self.WorstIncPrb = np.sum(self.ShkPrbsNext[ (self.PermShkValsNext*self.TranShkValsNext)== (self.PermShkMinNext*self.TranShkMinNext)]) if self.CubicBool: self.vPPfuncNext = solution_next.vPPfunc if self.vFuncBool: self.vFuncNext = solution_next.vFunc self.PatFac = ((self.Rfree*self.DiscFacEff)**(1.0/self.CRRA))/self.Rfree self.MPCminNow = 1.0/(1.0 + self.PatFac/solution_next.MPCmin) self.ExIncNext = np.dot(self.ShkPrbsNext,self.TranShkValsNext*self.PermShkValsNext) self.hNrmNow = self.PermGroFac/self.Rfree*(self.ExIncNext + solution_next.hNrm) self.MPCmaxNow = 1.0/(1.0 + (self.WorstIncPrb**(1.0/self.CRRA))* self.PatFac/solution_next.MPCmax)
Unpacks some of the inputs (and calculates simple objects based on them), storing the results in self for use by other methods. These include: income shocks and probabilities, next period's marginal value function (etc), the probability of getting the worst income shock next period, the patience factor, human wealth, and the bounding MPCs. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. Returns ------- None
381,944
def trc(postfix: Optional[str] = None, *, depth=1) -> logging.Logger: x = inspect.stack()[depth] code = x[0].f_code func = [obj for obj in gc.get_referrers(code) if inspect.isfunction(obj)][0] mod = inspect.getmodule(x.frame) parts = (mod.__name__, func.__qualname__) if postfix: parts += (postfix,) logger_name = .join(parts) return logging.getLogger(logger_name)
Automatically generate a logger from the calling function :param postfix: append another logger name on top this :param depth: depth of the call stack at which to capture the caller name :return: instance of a logger with a correct path to a current caller
381,945
def remove_independent_variable(self, variable_name): self._remove_child(variable_name) self._independent_variables.pop(variable_name)
Remove an independent variable which was added with add_independent_variable :param variable_name: name of variable to remove :return:
381,946
def run(self): if not self._queue: raise Exception("No queue available to send messages") factory = LiveStreamFactory(self) self._reactor.connectSSL("streaming.campfirenow.com", 443, factory, ssl.ClientContextFactory()) self._reactor.run()
Called by the process, it runs it. NEVER call this method directly. Instead call start() to start the separate process. If you don't want to use a second process, then call fetch() directly on this istance. To stop, call terminate()
381,947
def on_message(self, msg): msg = json.loads(msg) psession = self.funcserver.pysessions.get(self.pysession_id, None) if psession is None: interpreter = PyInterpreter(self.funcserver.define_python_namespace()) psession = dict(interpreter=interpreter, socks=set([self.id])) self.funcserver.pysessions[self.pysession_id] = psession else: interpreter = psession[] psession[].add(self.id) code = msg[] msg_id = msg[] stdout = sys.stdout try: sys.stdout = cStringIO.StringIO() interpreter.runsource(code) output = sys.stdout.getvalue() or interpreter.output if isinstance(output, list): output = .join(output) interpreter.output = [] finally: sys.stdout = stdout msg = {: MSG_TYPE_CONSOLE, : msg_id, : output} self.send_message(msg)
Called when client sends a message. Supports a python debugging console. This forms the "eval" part of a standard read-eval-print loop. Currently the only implementation of the python console is in the WebUI but the implementation of a terminal based console is planned.
381,948
def isasteroid(self): if self.asteroid is not None: return self.asteroid elif self.comet is not None: return not self.comet else: return any(self.parse_asteroid()) is not None
`True` if `targetname` appears to be an asteroid.
381,949
def get_compound_ids(self): cursor = self.conn.cursor() cursor.execute() self.conn.commit() for row in cursor: if not row[0] in self.compound_ids: self.compound_ids.append(row[0])
Extract the current compound ids in the database. Updates the self.compound_ids list
381,950
def add_message(self, id, body, tags=False): if not tags: tags = {} try: self._tx_queue_lock.acquire() self._tx_queue.append( EventHub_pb2.Message(id=id, body=body, tags=tags, zone_id=self.eventhub_client.zone_id)) finally: self._tx_queue_lock.release() return self
add messages to the rx_queue :param id: str message Id :param body: str the message body :param tags: dict[string->string] tags to be associated with the message :return: self
381,951
def _delete(self, url, data, scope): self._create_session(scope) response = self.session.delete(url, data=data) return response.status_code, response.text
Make a DELETE request using the session object to a Degreed endpoint. Args: url (str): The url to send a DELETE request to. data (str): The json encoded payload to DELETE. scope (str): Must be one of the scopes Degreed expects: - `CONTENT_PROVIDER_SCOPE` - `COMPLETION_PROVIDER_SCOPE`
381,952
def encoded_dict(in_dict): out_dict = {} for k, v in in_dict.items(): if isinstance(v, unicode): if sys.version_info < (3, 0): v = v.encode() elif isinstance(v, str): if sys.version_info < (3, 0): v.decode() out_dict[k] = v return out_dict
Encode every value of a dict to UTF-8. Useful for POSTing requests on the 'data' parameter of urlencode.
381,953
def _verify_configs(configs): if configs: scenario_names = [c.scenario.name for c in configs] for scenario_name, n in collections.Counter(scenario_names).items(): if n > 1: msg = ("Duplicate scenario name found. " ).format(scenario_name) util.sysexit_with_message(msg) else: msg = " glob failed. Exiting.".format(MOLECULE_GLOB) util.sysexit_with_message(msg)
Verify a Molecule config was found and returns None. :param configs: A list containing absolute paths to Molecule config files. :return: None
381,954
def update(name, connection_uri="", id_file="", o=[], config=None): storm_ = get_storm_instance(config) settings = {} if id_file != "": settings[] = id_file for option in o: k, v = option.split("=") settings[k] = v try: storm_.update_entry(name, **settings) print(get_formatted_message( .format( name ), )) except ValueError as error: print(get_formatted_message(error, ), file=sys.stderr) sys.exit(1)
Enhanced version of the edit command featuring multiple edits using regular expressions to match entries
381,955
def setUp(self, port, soc, input): name = soc.getOperationName() bop = port.getBinding().operations.get(name) op = port.getBinding().getPortType().operations.get(name) assert op is not None, %name assert bop is not None, %name self.input = input self.op = op self.bop = bop
Instance Data: op -- WSDLTools Operation instance bop -- WSDLTools BindingOperation instance input -- boolean input/output
381,956
def delta_e_cie2000(lab_color_vector, lab_color_matrix, Kl=1, Kc=1, Kh=1): L, a, b = lab_color_vector avg_Lp = (L + lab_color_matrix[:, 0]) / 2.0 C1 = numpy.sqrt(numpy.sum(numpy.power(lab_color_vector[1:], 2))) C2 = numpy.sqrt(numpy.sum(numpy.power(lab_color_matrix[:, 1:], 2), axis=1)) avg_C1_C2 = (C1 + C2) / 2.0 G = 0.5 * (1 - numpy.sqrt(numpy.power(avg_C1_C2, 7.0) / (numpy.power(avg_C1_C2, 7.0) + numpy.power(25.0, 7.0)))) a1p = (1.0 + G) * a a2p = (1.0 + G) * lab_color_matrix[:, 1] C1p = numpy.sqrt(numpy.power(a1p, 2) + numpy.power(b, 2)) C2p = numpy.sqrt(numpy.power(a2p, 2) + numpy.power(lab_color_matrix[:, 2], 2)) avg_C1p_C2p = (C1p + C2p) / 2.0 h1p = numpy.degrees(numpy.arctan2(b, a1p)) h1p += (h1p < 0) * 360 h2p = numpy.degrees(numpy.arctan2(lab_color_matrix[:, 2], a2p)) h2p += (h2p < 0) * 360 avg_Hp = (((numpy.fabs(h1p - h2p) > 180) * 360) + h1p + h2p) / 2.0 T = 1 - 0.17 * numpy.cos(numpy.radians(avg_Hp - 30)) + \ 0.24 * numpy.cos(numpy.radians(2 * avg_Hp)) + \ 0.32 * numpy.cos(numpy.radians(3 * avg_Hp + 6)) - \ 0.2 * numpy.cos(numpy.radians(4 * avg_Hp - 63)) diff_h2p_h1p = h2p - h1p delta_hp = diff_h2p_h1p + (numpy.fabs(diff_h2p_h1p) > 180) * 360 delta_hp -= (h2p > h1p) * 720 delta_Lp = lab_color_matrix[:, 0] - L delta_Cp = C2p - C1p delta_Hp = 2 * numpy.sqrt(C2p * C1p) * numpy.sin(numpy.radians(delta_hp) / 2.0) S_L = 1 + ((0.015 * numpy.power(avg_Lp - 50, 2)) / numpy.sqrt(20 + numpy.power(avg_Lp - 50, 2.0))) S_C = 1 + 0.045 * avg_C1p_C2p S_H = 1 + 0.015 * avg_C1p_C2p * T delta_ro = 30 * numpy.exp(-(numpy.power(((avg_Hp - 275) / 25), 2.0))) R_C = numpy.sqrt((numpy.power(avg_C1p_C2p, 7.0)) / (numpy.power(avg_C1p_C2p, 7.0) + numpy.power(25.0, 7.0))) R_T = -2 * R_C * numpy.sin(2 * numpy.radians(delta_ro)) return numpy.sqrt( numpy.power(delta_Lp / (S_L * Kl), 2) + numpy.power(delta_Cp / (S_C * Kc), 2) + numpy.power(delta_Hp / (S_H * Kh), 2) + R_T * (delta_Cp / (S_C * Kc)) * (delta_Hp / (S_H * Kh)))
Calculates the Delta E (CIE2000) of two colors.
381,957
def whichEncoding(self): if self.request.mode in BROWSER_REQUEST_MODES: if self.fields.getOpenIDNamespace() == OPENID2_NS and \ len(self.encodeToURL()) > OPENID1_URL_LIMIT: return ENCODE_HTML_FORM else: return ENCODE_URL else: return ENCODE_KVFORM
How should I be encoded? @returns: one of ENCODE_URL, ENCODE_HTML_FORM, or ENCODE_KVFORM. @change: 2.1.0 added the ENCODE_HTML_FORM response.
381,958
def process_request(self, request): request.token = get_token(request) request.user = SimpleLazyObject(lambda: get_user(request)) request._dont_enforce_csrf_checks = dont_enforce_csrf_checks(request)
Lazy set user and token
381,959
def _render_item(self, depth, key, value = None, **settings): strptrn = self.INDENT * depth lchar = self.lchar(settings[self.SETTING_LIST_STYLE]) s = self._es_text(settings, settings[self.SETTING_LIST_FORMATING]) lchar = self.fmt_text(lchar, **s) strptrn = "{}" if value is not None: strptrn += ": {}" s = self._es_text(settings, settings[self.SETTING_TEXT_FORMATING]) strptrn = self.fmt_text(strptrn.format(key, value), **s) return .format(self.INDENT * depth, lchar, strptrn)
Format single list item.
381,960
def compute_diff(dir_base, dir_cmp): data = {} data[] = list(set(dir_cmp[]) - set(dir_base[])) data[] = list(set(dir_base[]) - set(dir_cmp[])) data[] = [] data[] = list(set(dir_cmp[]) - set(dir_base[])) for f in set(dir_cmp[]).intersection(set(dir_base[])): if dir_base[][f] != dir_cmp[][f]: data[].append(f) return data
Compare `dir_base' and `dir_cmp' and returns a list with the following keys: - deleted files `deleted' - created files `created' - updated files `updated' - deleted directories `deleted_dirs'
381,961
def deal_with_changeset_stack_policy(self, fqn, stack_policy): if stack_policy: kwargs = generate_stack_policy_args(stack_policy) kwargs["StackName"] = fqn logger.debug("Setting stack policy on %s.", fqn) self.cloudformation.set_stack_policy(**kwargs)
Set a stack policy when using changesets. ChangeSets don't allow you to set stack policies in the same call to update them. This sets it before executing the changeset if the stack policy is passed in. Args: stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy.
381,962
def find_root(filename, target=): lg.debug(f) if target == and (filename / ).exists(): return filename elif filename.is_dir(): pattern = target[:3] + if filename.stem.startswith(pattern): return filename return find_root(filename.parent, target)
Find base directory (root) for a filename. Parameters ---------- filename : instance of Path search the root for this file target: str 'bids' (the directory containing 'participants.tsv'), 'subject' (the directory starting with 'sub-'), 'session' (the directory starting with 'ses-') Returns ------- Path path of the target directory
381,963
def service_execution(self, name=None, pk=None, scope=None, service=None, **kwargs): _service_executions = self.service_executions(name=name, pk=pk, scope=scope, service=service, **kwargs) if len(_service_executions) == 0: raise NotFoundError("No service execution fits criteria") if len(_service_executions) != 1: raise MultipleFoundError("Multiple service executions fit criteria") return _service_executions[0]
Retrieve single KE-chain ServiceExecution. Uses the same interface as the :func:`service_executions` method but returns only a single pykechain :class:`models.ServiceExecution` instance. If additional `keyword=value` arguments are provided, these are added to the request parameters. Please refer to the documentation of the KE-chain API for additional query parameters. :param name: (optional) name to limit the search for :type name: basestring or None :param pk: (optional) primary key or id (UUID) of the service to search for :type pk: basestring or None :param scope: (optional) id (UUID) of the scope to search in :type scope: basestring or None :param kwargs: (optional) additional search keyword arguments :type kwargs: dict or None :return: a single :class:`models.ServiceExecution` object :raises NotFoundError: When no `ServiceExecution` object is found :raises MultipleFoundError: When more than a single `ServiceExecution` object is found
381,964
def lookupGeoInfo(positions): list_data=[] oldlat=0 oldlon=0 d={} for pos in positions: diff_lat=abs(float(pos[])-oldlat) diff_lon=abs(float(pos[])-oldlon) if (diff_lat>POS_THRESHOLD_DEG) or\ (diff_lon>POS_THRESHOLD_DEG): d=lookup_by_latlon(pos[],pos[]) oldlat=float(pos[]) oldlon=float(pos[]) else: logger.debug("Skipping %s/%s, close to prev"%(pos[],pos[])) list_data.append(d) logger.info(%(len(list_data))) return list_data
Looks up lat/lon info with goole given a list of positions as parsed by parsePositionFile. Returns google results in form of dicionary
381,965
def do_check_freshness(self, hosts, services, timeperiods, macromodulations, checkmodulations, checks, when): now = when cls = self.__class__ if not self.in_checking and self.freshness_threshold and not self.freshness_expired: if os.getenv(, None): logger.info("--ALC-- -> checking freshness for: %s", self.get_full_name()) if not self.last_state_update: self.last_state_update = int(now) if self.last_state_update < now - \ (self.freshness_threshold + cls.additional_freshness_latency): timeperiod = timeperiods[self.check_period] if timeperiod is None or timeperiod.is_time_valid(now): chk = self.launch_check(now, hosts, services, timeperiods, macromodulations, checkmodulations, checks) if not chk: logger.warning("No raised freshness check for: %s", self) return None chk.freshness_expiry_check = True chk.check_time = time.time() chk.output = "Freshness period expired: %s" % ( datetime.utcfromtimestamp(int(chk.check_time)).strftime( "%Y-%m-%d %H:%M:%S %Z")) if self.my_type == : if self.freshness_state == : chk.exit_status = 0 elif self.freshness_state == : chk.exit_status = 2 elif self.freshness_state in [, ]: chk.exit_status = 4 else: chk.exit_status = 3 else: if self.freshness_state == : chk.exit_status = 0 elif self.freshness_state == : chk.exit_status = 1 elif self.freshness_state == : chk.exit_status = 2 elif self.freshness_state == : chk.exit_status = 3 elif self.freshness_state == : chk.exit_status = 4 else: chk.exit_status = 3 return chk else: logger.debug("Ignored freshness check for %s, because " "we are not in the check period.", self.get_full_name()) return None
Check freshness and schedule a check now if necessary. This function is called by the scheduler if Alignak is configured to check the freshness. It is called for hosts that have the freshness check enabled if they are only passively checked. It is called for services that have the freshness check enabled if they are only passively checked and if their depending host is not in a freshness expired state (freshness_expiry = True). A log is raised when the freshess expiry is detected and the item is set as freshness_expiry. :param hosts: hosts objects, used to launch checks :type hosts: alignak.objects.host.Hosts :param services: services objects, used launch checks :type services: alignak.objects.service.Services :param timeperiods: Timeperiods objects, used to get check_period :type timeperiods: alignak.objects.timeperiod.Timeperiods :param macromodulations: Macro modulations objects, used in commands (notif, check) :type macromodulations: alignak.objects.macromodulation.Macromodulations :param checkmodulations: Checkmodulations objects, used to change check command if necessary :type checkmodulations: alignak.objects.checkmodulation.Checkmodulations :param checks: checks dict, used to get checks_in_progress for the object :type checks: dict :return: A check or None :rtype: None | object
381,966
def add_path_part(url, regex=PATH_PART): formatter = string.Formatter() url_var_template = "(?P<{var_name}>{regex})" for part in formatter.parse(url): string_part, var_name, _, _ = part if string_part: yield string_part if var_name: yield url_var_template.format(var_name=var_name, regex=regex)
replace the variables in a url template with regex named groups :param url: string of a url template :param regex: regex of the named group :returns: regex
381,967
def p_generate_block(self, p): p[0] = Block(p[2], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
generate_block : BEGIN generate_items END
381,968
def add_axes_and_nodes(self): for i, (group, nodelist) in enumerate(self.nodes.items()): theta = self.group_theta(group) if self.has_edge_within_group(group): theta = theta - self.minor_angle self.plot_nodes(nodelist, theta, group) theta = theta + 2 * self.minor_angle self.plot_nodes(nodelist, theta, group) else: self.plot_nodes(nodelist, theta, group)
Adds the axes (i.e. 2 or 3 axes, not to be confused with matplotlib axes) and the nodes that belong to each axis.
381,969
def setOrientation( self, orientation ): super(XToolBar, self).setOrientation(orientation) self.refreshButton()
Sets the orientation for this toolbar to the inputed value, and \ updates the contents margins and collapse button based on the vaule. :param orientation | <Qt.Orientation>
381,970
def change_state_id(self, state_id=None): if state_id is None: state_id = state_id_generator(used_state_ids=[self.state_id]) if not self.is_root_state and not self.is_root_state_of_library: used_ids = list(self.parent.states.keys()) + [self.parent.state_id, self.state_id] if state_id in used_ids: state_id = state_id_generator(used_state_ids=used_ids) self._state_id = state_id
Changes the id of the state to a new id If no state_id is passed as parameter, a new state id is generated. :param str state_id: The new state id of the state :return:
381,971
def create_bracket_parameter_id(brackets_id, brackets_curr_decay, increased_id=-1): if increased_id == -1: increased_id = str(create_parameter_id()) params_id = .join([str(brackets_id), str(brackets_curr_decay), increased_id]) return params_id
Create a full id for a specific bracket's hyperparameter configuration Parameters ---------- brackets_id: int brackets id brackets_curr_decay: brackets curr decay increased_id: int increased id Returns ------- int params id
381,972
def remove_group(self, process_id, wit_ref_name, page_id, section_id, group_id): route_values = {} if process_id is not None: route_values[] = self._serialize.url(, process_id, ) if wit_ref_name is not None: route_values[] = self._serialize.url(, wit_ref_name, ) if page_id is not None: route_values[] = self._serialize.url(, page_id, ) if section_id is not None: route_values[] = self._serialize.url(, section_id, ) if group_id is not None: route_values[] = self._serialize.url(, group_id, ) self._send(http_method=, location_id=, version=, route_values=route_values)
RemoveGroup. [Preview API] Removes a group from the work item form. :param str process_id: The ID of the process :param str wit_ref_name: The reference name of the work item type :param str page_id: The ID of the page the group is in :param str section_id: The ID of the section to the group is in :param str group_id: The ID of the group
381,973
def from_ordinal(cls, ordinal): if ordinal == 0: return ZeroDate if ordinal >= 736695: year = 2018 month = 1 day = int(ordinal - 736694) elif ordinal >= 719163: year = 1970 month = 1 day = int(ordinal - 719162) else: year = 1 month = 1 day = int(ordinal) if day < 1 or day > 3652059: raise ValueError("Ordinal out of range (1..3652059)") if year < MIN_YEAR or year > MAX_YEAR: raise ValueError("Year out of range (%d..%d)" % (MIN_YEAR, MAX_YEAR)) days_in_year = DAYS_IN_YEAR[year] while day > days_in_year: day -= days_in_year year += 1 days_in_year = DAYS_IN_YEAR[year] days_in_month = DAYS_IN_MONTH[(year, month)] while day > days_in_month: day -= days_in_month month += 1 days_in_month = DAYS_IN_MONTH[(year, month)] year, month, day = _normalize_day(year, month, day) return cls.__new(ordinal, year, month, day)
Return the :class:`.Date` that corresponds to the proleptic Gregorian ordinal, where ``0001-01-01`` has ordinal 1 and ``9999-12-31`` has ordinal 3,652,059. Values outside of this range trigger a :exc:`ValueError`. The corresponding instance method for the reverse date-to-ordinal transformation is :meth:`.to_ordinal`.
381,974
def get_default_property_values(self, classname): schema_element = self.get_element_by_class_name(classname) result = { property_name: property_descriptor.default for property_name, property_descriptor in six.iteritems(schema_element.properties) } if schema_element.is_edge: result.pop(EDGE_SOURCE_PROPERTY_NAME, None) result.pop(EDGE_DESTINATION_PROPERTY_NAME, None) return result
Return a dict with default values for all properties declared on this class.
381,975
def at_css(self, css, timeout = DEFAULT_AT_TIMEOUT, **kw): return self.wait_for_safe(lambda: super(WaitMixin, self).at_css(css), timeout = timeout, **kw)
Returns the first node matching the given CSSv3 expression or ``None`` if a timeout occurs.
381,976
def plot(graph, show_x_axis=True, head=None, tail=None, label_length=4, padding=0, height=2, show_min_max=True, show_data_range=True, show_title=True): def __plot(graph): def get_padding_str(label, value): padding_str = if len(label) < label_length: diff = label_length - len(label) padding_str = * diff padding_str2 = if len(str(value)) < m: diff = m - len(str(value)) padding_str2 = * diff return % (padding_str,padding_str2) out = zip(*graph.strings) out.reverse() if graph.title and show_title: print graph.title lines = [sep.join(a) for a in out] if show_min_max: lines[0] = lines[0] + " -- Max: %s" % str(max(graph.data)) lines[-1] = lines[-1] + " -- Min %s" % str(min(graph.data)) print .join(lines) if graph.labels and show_x_axis: print (u % x_sep.join([ % (label[:label_length], str(v), get_padding_str(label, v)) for label, v in zip(graph.labels, graph.data)])) if show_data_range and graph.labels: print % (graph.first_x, graph.last_x) graph.clean_range() if head: graph.head = head if tail: graph.tail = tail if height: graph.height = height if label_length < 1: label_length = 4 max_label_length = max(map(len, graph.labels or [])) if max_label_length < label_length: label_length = max_label_length sep = if padding >= 1: sep = * padding m = max(map(len, map(str, graph.data))) x_sep = if show_x_axis and graph.labels: sep = * (label_length + 1 + 2 + m + 2) __plot(graph)
show_x_axis: Display X axis head: Show first [head:] elements tail: Show last [-tail:] elements padding: Padding size between columns (default 0) height: Override graph height label_length: Force X axis label string size, may truncate label show_min_max: Display Min and Max values on the left of the graph show_title: Display graph title (if any) show_data_range: Display X axis data range
381,977
def multi_future( children: Union[List[_Yieldable], Dict[Any, _Yieldable]], quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (), ) -> "Union[Future[List], Future[Dict]]": if isinstance(children, dict): keys = list(children.keys()) children_seq = children.values() else: keys = None children_seq = children children_futs = list(map(convert_yielded, children_seq)) assert all(is_future(i) or isinstance(i, _NullFuture) for i in children_futs) unfinished_children = set(children_futs) future = _create_future() if not children_futs: future_set_result_unless_cancelled(future, {} if keys is not None else []) def callback(fut: Future) -> None: unfinished_children.remove(fut) if not unfinished_children: result_list = [] for f in children_futs: try: result_list.append(f.result()) except Exception as e: if future.done(): if not isinstance(e, quiet_exceptions): app_log.error( "Multiple exceptions in yield list", exc_info=True ) else: future_set_exc_info(future, sys.exc_info()) if not future.done(): if keys is not None: future_set_result_unless_cancelled( future, dict(zip(keys, result_list)) ) else: future_set_result_unless_cancelled(future, result_list) listening = set() for f in children_futs: if f not in listening: listening.add(f) future_add_done_callback(f, callback) return future
Wait for multiple asynchronous futures in parallel. Since Tornado 6.0, this function is exactly the same as `multi`. .. versionadded:: 4.0 .. versionchanged:: 4.2 If multiple ``Futures`` fail, any exceptions after the first (which is raised) will be logged. Added the ``quiet_exceptions`` argument to suppress this logging for selected exception types. .. deprecated:: 4.3 Use `multi` instead.
381,978
def _ComputeHash( key, seed = 0x0 ): def fmix( h ): h ^= h >> 16 h = ( h * 0x85ebca6b ) & 0xFFFFFFFF h ^= h >> 13 h = ( h * 0xc2b2ae35 ) & 0xFFFFFFFF h ^= h >> 16 return h length = len( key ) nblocks = int( length / 4 ) h1 = seed c1 = 0xcc9e2d51 c2 = 0x1b873593 for block_start in xrange( 0, nblocks * 4, 4 ): k1 = key[ block_start + 3 ] << 24 | \ key[ block_start + 2 ] << 16 | \ key[ block_start + 1 ] << 8 | \ key[ block_start + 0 ] k1 = c1 * k1 & 0xFFFFFFFF k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF k1 = ( c2 * k1 ) & 0xFFFFFFFF h1 ^= k1 h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF tail_index = nblocks * 4 k1 = 0 tail_size = length & 3 if tail_size >= 3: k1 ^= key[ tail_index + 2 ] << 16 if tail_size >= 2: k1 ^= key[ tail_index + 1 ] << 8 if tail_size >= 1: k1 ^= key[ tail_index + 0 ] if tail_size != 0: k1 = ( k1 * c1 ) & 0xFFFFFFFF k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF k1 = ( k1 * c2 ) & 0xFFFFFFFF h1 ^= k1 return fmix( h1 ^ length )
Computes the hash of the value passed using MurmurHash3 algorithm with the seed value.
381,979
def interfaces(): results = [] if not sys.platform.startswith("win"): net_if_addrs = psutil.net_if_addrs() for interface in sorted(net_if_addrs.keys()): ip_address = "" mac_address = "" netmask = "" interface_type = "ethernet" for addr in net_if_addrs[interface]: if addr.family == socket.AF_INET: ip_address = addr.address netmask = addr.netmask if addr.family == psutil.AF_LINK: mac_address = addr.address if interface.startswith("tap"): interface_type = "tap" results.append({"id": interface, "name": interface, "ip_address": ip_address, "netmask": netmask, "mac_address": mac_address, "type": interface_type}) else: try: service_installed = True if not _check_windows_service("npf") and not _check_windows_service("npcap"): service_installed = False else: results = get_windows_interfaces() except ImportError: message = "pywin32 module is not installed, please install it on the server to get the available interface names" raise aiohttp.web.HTTPInternalServerError(text=message) except Exception as e: log.error("uncaught exception {type}".format(type=type(e)), exc_info=1) raise aiohttp.web.HTTPInternalServerError(text="uncaught exception: {}".format(e)) if service_installed is False: raise aiohttp.web.HTTPInternalServerError(text="The Winpcap or Npcap is not installed or running") for result in results: result["special"] = False for special_interface in ("lo", "vmnet", "vboxnet", "docker", "lxcbr", "virbr", "ovs-system", "veth", "fw", "p2p", "bridge", "vmware", "virtualbox", "gns3"): if result["name"].lower().startswith(special_interface): result["special"] = True for special_interface in ("-nic"): if result["name"].lower().endswith(special_interface): result["special"] = True return results
Gets the network interfaces on this server. :returns: list of network interfaces
381,980
def get(self, default=None): if not self.__cancelled and self.__state == Job.SUCCESS: return self.__result else: return default
Get the result of the Job, or return *default* if the job is not finished or errored. This function will never explicitly raise an exception. Note that the *default* value is also returned if the job was cancelled. # Arguments default (any): The value to return when the result can not be obtained.
381,981
def SecurityCheck(self, func, request, *args, **kwargs): try: auth_header = request.headers.get("Authorization", "") if not auth_header.startswith(self.BEARER_PREFIX): raise ValueError("JWT token is missing.") token = auth_header[len(self.BEARER_PREFIX):] auth_domain = config.CONFIG["AdminUI.firebase_auth_domain"] project_id = auth_domain.split(".")[0] idinfo = id_token.verify_firebase_token( token, request, audience=project_id) if idinfo["iss"] != self.SECURE_TOKEN_PREFIX + project_id: raise ValueError("Wrong issuer.") request.user = idinfo["email"] except ValueError as e: if request.path != "/": return self.AuthError("JWT token validation failed: %s" % e) return func(request, *args, **kwargs)
Check if access should be allowed for the request.
381,982
def unpack(data): length = struct.unpack(, data[0:HEADER_SIZE]) return length[0], data[HEADER_SIZE:]
return length, content
381,983
def _get_address_override(endpoint_type=PUBLIC): override_key = ADDRESS_MAP[endpoint_type][] addr_override = config(override_key) if not addr_override: return None else: return addr_override.format(service_name=service_name())
Returns any address overrides that the user has defined based on the endpoint type. Note: this function allows for the service name to be inserted into the address if the user specifies {service_name}.somehost.org. :param endpoint_type: the type of endpoint to retrieve the override value for. :returns: any endpoint address or hostname that the user has overridden or None if an override is not present.
381,984
def get_siblings_treepos(self, treepos): parent_pos = self.get_parent_treepos(treepos) siblings_treepos = [] if parent_pos is not None: for child_treepos in self.get_children_treepos(parent_pos): if child_treepos != treepos: siblings_treepos.append(child_treepos) return siblings_treepos
Given a treeposition, return the treepositions of its siblings.
381,985
def sents(self, fileids=None) -> Generator[str, str, None]: for para in self.paras(fileids): sentences = self._sent_tokenizer.tokenize(para) for sentence in sentences: yield sentence
:param fileids: :return: A generator of sentences
381,986
def any_hook(*hook_patterns): current_hook = hookenv.hook_name() i_pat = re.compile(r) hook_patterns = _expand_replacements(i_pat, hookenv.role_and_interface_to_relations, hook_patterns) c_pat = re.compile(r) hook_patterns = _expand_replacements(c_pat, lambda v: v.split(), hook_patterns) return current_hook in hook_patterns
Assert that the currently executing hook matches one of the given patterns. Each pattern will match one or more hooks, and can use the following special syntax: * ``db-relation-{joined,changed}`` can be used to match multiple hooks (in this case, ``db-relation-joined`` and ``db-relation-changed``). * ``{provides:mysql}-relation-joined`` can be used to match a relation hook by the role and interface instead of the relation name. The role must be one of ``provides``, ``requires``, or ``peer``. * The previous two can be combined, of course: ``{provides:mysql}-relation-{joined,changed}``
381,987
def enqueue(trg_queue, item_f, *args, **kwargs): /my/file return venqueue(trg_queue, item_f, args, **kwargs)
Enqueue the contents of a file, or file-like object, file-descriptor or the contents of a file at an address (e.g. '/my/file') queue with arbitrary arguments, enqueue is to venqueue what printf is to vprintf
381,988
def load_data(self, path): path = pathlib.Path(path).resolve() meta = {} with path.open() as fd: while True: line = fd.readline().strip() if line.startswith(" line = line.strip(" var, val = line.split(":") if val.strip().replace(".", "").isdigit(): val = float(val) else: val = val.strip().lower() meta[var.strip()] = val elif line and not line.startswith(" break assert meta["column 1"] in dfn.scalar_feature_names assert meta["column 2"] in dfn.scalar_feature_names assert meta["column 3"] == "emodulus" assert meta["method"] in VALID_METHODS with path.open("rb") as isfd: isodata = np.loadtxt(isfd) emoduli = np.unique(isodata[:, 2]) isoel = [] for emod in emoduli: where = isodata[:, 2] == emod isoel.append(isodata[where]) self.add(isoel=isoel, col1=meta["column 1"], col2=meta["column 2"], channel_width=meta["channel width [um]"], flow_rate=meta["flow rate [ul/s]"], viscosity=meta["viscosity [mPa*s]"], method=meta["method"])
Load isoelastics from a text file The text file is loaded with `numpy.loadtxt` and must have three columns, representing the two data columns and the elastic modulus with units defined in `definitions.py`. The file header must have a section defining meta data of the content like so: # [...] # # - column 1: area_um # - column 2: deform # - column 3: emodulus # - channel width [um]: 20 # - flow rate [ul/s]: 0.04 # - viscosity [mPa*s]: 15 # - method: analytical # # [...] Parameters ---------- path: str Path to a isoelastics text file
381,989
def filter_seq(seq): if seq.res: return None n = nt.Factors(seq.factors) guide, s, t = aq.canonical_form(n) seq.guide = guide cls = aq.get_class(guide=guide) num_larges = seq.factors.count() upper_bound_tau = cls - num_larges - len(t) if cls < 2 or upper_bound_tau < 2: return None v = nt.Factors({p: a for p, a in guide.items() if p != 2 and a > 0}) if int(v) == 1 and cls > 3: return None if not aq.is_driver(guide=guide): return None return n, guide
Examines unreserved sequences to see if they are prone to mutation. This currently ignores solely-power-of-2 guides with b > 3
381,990
def dbg_repr(self, max_display=10): s = repr(self) + "\n" if len(self.chosen_statements) > max_display: s += "%d SimRuns in program slice, displaying %d.\n" % (len(self.chosen_statements), max_display) else: s += "%d SimRuns in program slice.\n" % len(self.chosen_statements) if max_display is None: run_addrs = sorted(self.chosen_statements.keys()) else: run_addrs = sorted(self.chosen_statements.keys())[ : max_display] for run_addr in run_addrs: s += self.dbg_repr_run(run_addr) + "\n" return s
Debugging output of this slice. :param max_display: The maximum number of SimRun slices to show. :return: A string representation.
381,991
def search_shell(self): with self._lock: if self._shell is not None: return reference = self._context.get_service_reference(SERVICE_SHELL) if reference is not None: self.set_shell(reference)
Looks for a shell service
381,992
def origin(self): if not self.is_absolute(): raise ValueError("URL should be absolute") if not self._val.scheme: raise ValueError("URL should have scheme") v = self._val netloc = self._make_netloc(None, None, v.hostname, v.port, encode=False) val = v._replace(netloc=netloc, path="", query="", fragment="") return URL(val, encoded=True)
Return an URL with scheme, host and port parts only. user, password, path, query and fragment are removed.
381,993
def mpraw_as_np(shape, dtype): sz = int(np.product(shape)) csz = sz * np.dtype(dtype).itemsize raw = mp.RawArray(, csz) return np.frombuffer(raw, dtype=dtype, count=sz).reshape(shape)
Construct a numpy array of the specified shape and dtype for which the underlying storage is a multiprocessing RawArray in shared memory. Parameters ---------- shape : tuple Shape of numpy array dtype : data-type Data type of array Returns ------- arr : ndarray Numpy array
381,994
def tablib_export_action(modeladmin, request, queryset, file_type="xls"): dataset = SimpleDataset(queryset, headers=None) filename = .format( smart_str(modeladmin.model._meta.verbose_name_plural), file_type) response_kwargs = { : get_content_type(file_type) } response = HttpResponse(getattr(dataset, file_type), **response_kwargs) response[] = .format( filename) return response
Allow the user to download the current filtered list of items :param file_type: One of the formats supported by tablib (e.g. "xls", "csv", "html", etc.)
381,995
def execute_prepared(self, prepared_statement, multi_row_parameters): self._check_closed() parameters = prepared_statement.prepare_parameters(multi_row_parameters) while parameters: request = RequestMessage.new( self.connection, RequestSegment( message_types.EXECUTE, (StatementId(prepared_statement.statement_id), Parameters(parameters)) ) ) reply = self.connection.send_request(request) parts = reply.segments[0].parts function_code = reply.segments[0].function_code if function_code == function_codes.SELECT: self._handle_select(parts, prepared_statement.result_metadata_part) elif function_code in function_codes.DML: self._handle_upsert(parts, request.segments[0].parts[1].unwritten_lobs) elif function_code == function_codes.DDL: pass elif function_code in (function_codes.DBPROCEDURECALL, function_codes.DBPROCEDURECALLWITHRESULT): self._handle_dbproc_call(parts, prepared_statement._params_metadata) else: raise InterfaceError("Invalid or unsupported function code received: %d" % function_code)
:param prepared_statement: A PreparedStatement instance :param multi_row_parameters: A list/tuple containing list/tuples of parameters (for multiple rows)
381,996
def reset_network(message): for command in settings.RESTART_NETWORK: try: subprocess.check_call(command) except: pass print(message)
Resets the users network to make changes take effect
381,997
def conn_has_method(conn, method_name): if method_name in dir(conn): return True log.error(%s\, method_name) return False
Find if the provided connection object has a specific method
381,998
def with_trailing_args(self, *arguments): new_command = copy.deepcopy(self) new_command._trailing_args = [str(arg) for arg in arguments] return new_command
Return new Command object that will be run with specified trailing arguments.
381,999
def _link_package_versions(self, link, search): platform = get_platform() version = None if link.egg_fragment: egg_info = link.egg_fragment ext = link.ext else: egg_info, ext = link.splitext() if not ext: self._log_skipped_link(link, ) return if ext not in SUPPORTED_EXTENSIONS: self._log_skipped_link( link, % ext) return if "binary" not in search.formats and ext == wheel_ext: self._log_skipped_link( link, % search.supplied) return if "macosx10" in link.path and ext == : self._log_skipped_link(link, ) return if ext == wheel_ext: try: wheel = Wheel(link.filename) except InvalidWheelFilename: self._log_skipped_link(link, ) return if (pkg_resources.safe_name(wheel.name).lower() != search.canonical): self._log_skipped_link( link, % search.supplied) return if not wheel.supported(): self._log_skipped_link( link, ) return comes_from = getattr(link, "comes_from", None) if ( ( not platform.startswith() and not platform.startswith() and not platform == ) and comes_from is not None and urllib_parse.urlparse( comes_from.url ).netloc.endswith(PyPI.netloc)): if not wheel.supported(tags=supported_tags_noarch): self._log_skipped_link( link, "it is a pypi-hosted binary " "Wheel on an unsupported platform", ) return version = wheel.version if "source" not in search.formats and ext != wheel_ext: self._log_skipped_link( link, % search.supplied) return if not version: version = egg_info_matches(egg_info, search.supplied, link) if version is None: self._log_skipped_link( link, % search.supplied) return if (link.internal is not None and not link.internal and not normalize_name(search.supplied).lower() in self.allow_external and not self.allow_all_external): self._log_skipped_link(link, ) self.need_warn_external = True return if (link.verifiable is not None and not link.verifiable and not (normalize_name(search.supplied).lower() in self.allow_unverified)): self._log_skipped_link( link, ) self.need_warn_unverified = True return match = self._py_version_re.search(version) if match: version = version[:match.start()] py_version = match.group(1) if py_version != sys.version[:3]: self._log_skipped_link( link, ) return logger.debug(, link, version) return InstallationCandidate(search.supplied, version, link)
Return an InstallationCandidate or None