Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
376,900
def is_same_address(left: AnyAddress, right: AnyAddress) -> bool: if not is_address(left) or not is_address(right): raise ValueError("Both values must be valid addresses") else: return to_normalized_address(left) == to_normalized_address(right)
Checks if both addresses are same or not.
376,901
def customize_ruleset(self, custom_ruleset_file=None): custom_file = custom_ruleset_file or os.environ.get( "BOKCHOY_A11Y_CUSTOM_RULES_FILE" ) if not custom_file: return with open(custom_file, "r") as additional_rules: custom_rules = additional_rules.read() if "var customRules" not in custom_rules: raise A11yAuditConfigError( "Custom rules file must include \"var customRules\"" ) self.custom_rules = custom_rules
Updates the ruleset to include a set of custom rules. These rules will be _added_ to the existing ruleset or replace the existing rule with the same ID. Args: custom_ruleset_file (optional): The filepath to the custom rules. Defaults to `None`. If `custom_ruleset_file` isn't passed, the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` will be checked. If a filepath isn't specified by either of these methods, the ruleset will not be updated. Raises: `IOError` if the specified file does not exist. Examples: To include the rules defined in `axe-core-custom-rules.js`:: page.a11y_audit.config.customize_ruleset( "axe-core-custom-rules.js" ) Alternatively, use the environment variable `BOKCHOY_A11Y_CUSTOM_RULES_FILE` to specify the path to the file containing the custom rules. Documentation for how to write rules: https://github.com/dequelabs/axe-core/blob/master/doc/developer-guide.md An example of a custom rules file can be found at https://github.com/edx/bok-choy/tree/master/tests/a11y_custom_rules.js
376,902
def drag(duration: int, amp: complex, sigma: float, beta: float, name: str = None) -> SamplePulse: r center = duration/2 zeroed_width = duration + 2 return _sampled_drag_pulse(duration, amp, center, sigma, beta, zeroed_width=zeroed_width, rescale_amp=True, name=name)
r"""Generates Y-only correction DRAG `SamplePulse` for standard nonlinear oscillator (SNO) [1]. Centered at `duration/2` and zeroed at `t=-1` to prevent large initial discontinuity. Applies `left` sampling strategy to generate discrete pulse from continuous function. [1] Gambetta, J. M., Motzoi, F., Merkel, S. T. & Wilhelm, F. K. Analytic control methods for high-fidelity unitary operations in a weakly nonlinear oscillator. Phys. Rev. A 83, 012308 (2011). Args: duration: Duration of pulse. Must be greater than zero. amp: Pulse amplitude at `center`. sigma: Width (standard deviation) of pulse. beta: Y correction amplitude. For the SNO this is $\beta=-\frac{\lambda_1^2}{4\Delta_2}$. Where $\lambds_1$ is the relative coupling strength between the first excited and second excited states and $\Delta_2$ is the detuning between the resepective excited states. name: Name of pulse.
376,903
def hash(self): renderer_str = "{}|{}|{}|{}".format( self.renderer.__class__.__name__, self.renderer.colormap, self.renderer.fill_value, self.renderer.background_color ) if isinstance(self.renderer, StretchedRenderer): renderer_str = "{}|{}|{}".format(renderer_str, self.renderer.method, self.renderer.colorspace) elif isinstance(self.renderer, UniqueValuesRenderer): renderer_str = "{}|{}".format(renderer_str, self.renderer.labels) return hash("{}/{}/{}".format(self.variable.pk, renderer_str, self.time_index))
Returns a hash of this render configuration from the variable, renderer, and time_index parameters. Used for caching the full-extent, native projection render so that subsequent requests can be served by a warp operation only.
376,904
def _sphinx_build(self, kind): if kind not in (, ): raise ValueError( .format(kind)) cmd = [, , kind] if self.num_jobs: cmd += [, str(self.num_jobs)] if self.warnings_are_errors: cmd += [, ] if self.verbosity: cmd.append(.format( * self.verbosity)) cmd += [, os.path.join(BUILD_PATH, ), SOURCE_PATH, os.path.join(BUILD_PATH, kind)] return subprocess.call(cmd)
Call sphinx to build documentation. Attribute `num_jobs` from the class is used. Parameters ---------- kind : {'html', 'latex'} Examples -------- >>> DocBuilder(num_jobs=4)._sphinx_build('html')
376,905
def register_metric_descriptor(self, oc_md): metric_type = self.get_metric_type(oc_md) with self._md_lock: if metric_type in self._md_cache: return self._md_cache[metric_type] descriptor = self.get_metric_descriptor(oc_md) project_name = self.client.project_path(self.options.project_id) sd_md = self.client.create_metric_descriptor(project_name, descriptor) with self._md_lock: self._md_cache[metric_type] = sd_md return sd_md
Register a metric descriptor with stackdriver.
376,906
def on_clipboard_mode_change(self, clipboard_mode): if not isinstance(clipboard_mode, ClipboardMode): raise TypeError("clipboard_mode can only be an instance of type ClipboardMode") self._call("onClipboardModeChange", in_p=[clipboard_mode])
Notification when the shared clipboard mode changes. in clipboard_mode of type :class:`ClipboardMode` The new shared clipboard mode.
376,907
def migrate(src_path, src_passphrase, src_backend, dst_path, dst_passphrase, dst_backend): src_storage = STORAGE_MAPPING[src_backend](**_parse_path_string(src_path)) dst_storage = STORAGE_MAPPING[dst_backend](**_parse_path_string(dst_path)) src_stash = Stash(src_storage, src_passphrase) dst_stash = Stash(dst_storage, dst_passphrase) keys = src_stash.export() dst_stash.load(src_passphrase, keys=keys)
Migrate all keys in a source stash to a destination stash The migration process will decrypt all keys using the source stash's passphrase and then encrypt them based on the destination stash's passphrase. re-encryption will take place only if the passphrases are differing
376,908
def gcd(*numbers): n = numbers[0] for i in numbers: n = pygcd(n, i) return n
Returns the greatest common divisor for a sequence of numbers. Args: \*numbers: Sequence of numbers. Returns: (int) Greatest common divisor of numbers.
376,909
def line_is_interesting(self, line): if line.startswith(): return None if line.startswith(): return None if line.startswith(): return None if in line: return False if line == : return None if self._last_line_was_printable else False return True
Return True, False, or None. True means always output, False means never output, None means output only if there are interesting lines.
376,910
def _BuildOobLink(self, param, mode): code = self.rpc_helper.GetOobCode(param) if code: parsed = list(parse.urlparse(self.widget_url)) query = dict(parse.parse_qsl(parsed[4])) query.update({: mode, : code}) try: parsed[4] = parse.urlencode(query) except AttributeError: parsed[4] = urllib.urlencode(query) return code, parse.urlunparse(parsed) raise errors.GitkitClientError()
Builds out-of-band URL. Gitkit API GetOobCode() is called and the returning code is combined with Gitkit widget URL to building the out-of-band url. Args: param: dict of request. mode: string, Gitkit widget mode to handle the oob action after user clicks the oob url in the email. Raises: GitkitClientError: if oob code is not returned. Returns: A string of oob url.
376,911
def SplitPatch(data): patches = [] filename = None diff = [] for line in data.splitlines(True): new_filename = None if line.startswith(): unused, new_filename = line.split(, 1) new_filename = new_filename.strip() elif line.startswith(): unused, temp_filename = line.split(, 1) temp_filename = to_slash(temp_filename.strip()) if temp_filename != filename: new_filename = temp_filename if new_filename: if filename and diff: patches.append((filename, .join(diff))) filename = new_filename diff = [line] continue if diff is not None: diff.append(line) if filename and diff: patches.append((filename, .join(diff))) return patches
Splits a patch into separate pieces for each file. Args: data: A string containing the output of svn diff. Returns: A list of 2-tuple (filename, text) where text is the svn diff output pertaining to filename.
376,912
def cfht_megacam_tap_query(ra_deg=180.0, dec_deg=0.0, width=1, height=1, date=None): radius = min(90, max(width, height) / 2.0) query = ("SELECT " "COORD1(CENTROID(Plane.position_bounds)) AS RAJ2000," "COORD2(CENTROID(Plane.position_bounds)) AS DEJ2000," "target_name " "FROM " "caom2.Observation as o " "JOIN caom2.Plane as Plane on o.obsID=Plane.obsID " "WHERE o.collection = " "AND o.instrument_name = " "AND INTERSECTS( CIRCLE(, %f, %f, %f), Plane.position_bounds ) = 1") query = query % (ra_deg, dec_deg, radius) if date is not None: mjd = Time(date, scale=).mjd query += " AND Plane.time_bounds_lower <= {} AND {} <= Plane.time_bounds_upper ".format(mjd+0.5, mjd-0.5) data = {"QUERY": query, "REQUEST": "doQuery", "LANG": "ADQL", "FORMAT": "votable"} url = "http://www.cadc.hia.nrc.gc.ca/tap/sync" warnings.simplefilter() ff = StringIO(requests.get(url, params=data).content) ff.seek(0) table = votable.parse(ff).get_first_table().to_table() assert isinstance(table, Table) return table
Do a query of the CADC Megacam table. Get all observations inside the box (right now it turns width/height into a radius, should not do this). @rtype : Table @param ra_deg: center of search region, in degrees @param dec_deg: center of search region in degrees @param width: width of search region in degrees @param height: height of search region in degrees @param date: ISO format date string. Query will be +/- 0.5 days from date given.
376,913
def getmoduleinfo(path): filename = os.path.basename(path) suffixes = map(lambda (suffix, mode, mtype): (-len(suffix), suffix, mode, mtype), imp.get_suffixes()) suffixes.sort() for neglen, suffix, mode, mtype in suffixes: if filename[neglen:] == suffix: return filename[:neglen], suffix, mode, mtype
Get the module name, suffix, mode, and module type for a given file.
376,914
def _compute(self): newstate = self._implicit_solver() adjustment = {} tendencies = {} for name, var in self.state.items(): adjustment[name] = newstate[name] - var tendencies[name] = adjustment[name] / self.timestep self.adjustment = adjustment self._update_diagnostics(newstate) return tendencies
Computes the state variable tendencies in time for implicit processes. To calculate the new state the :func:`_implicit_solver()` method is called for daughter classes. This however returns the new state of the variables, not just the tendencies. Therefore, the adjustment is calculated which is the difference between the new and the old state and stored in the object's attribute adjustment. Calculating the new model states through solving the matrix problem already includes the multiplication with the timestep. The derived adjustment is divided by the timestep to calculate the implicit subprocess tendencies, which can be handeled by the :func:`~climlab.process.time_dependent_process.TimeDependentProcess.compute` method of the parent :class:`~climlab.process.time_dependent_process.TimeDependentProcess` class. :ivar dict adjustment: holding all state variables' adjustments of the implicit process which are the differences between the new states (which have been solved through matrix inversion) and the old states.
376,915
def from_argparse(cls, args): settings = vars(args) settings[] = settings.pop() settings[] = settings.pop() return cls(**settings)
Generate the Settings from parsed arguments.
376,916
def initialize_simulation(components: List, input_config: Mapping=None, plugin_config: Mapping=None) -> InteractiveContext: config = build_simulation_configuration() config.update(input_config) plugin_manager = PluginManager(plugin_config) return InteractiveContext(config, components, plugin_manager)
Construct a simulation from a list of components, component configuration, and a plugin configuration. The simulation context returned by this method still needs to be setup by calling its setup method. It is mostly useful for testing and debugging. Parameters ---------- components A list of initialized simulation components. Corresponds to the components block of a model specification. input_config A nested dictionary with any additional simulation configuration information needed. Corresponds to the configuration block of a model specification. plugin_config A dictionary containing a description of any simulation plugins to include in the simulation. If you're using this argument, you're either deep in the process of simulation development or the maintainers have done something wrong. Corresponds to the plugins block of a model specification. Returns ------- An initialized (but not set up) simulation context.
376,917
def ref_frequency(self, context): num_chans = self._manager.spectral_window_table.getcol(MS.NUM_CHAN) ref_freqs = self._manager.spectral_window_table.getcol(MS.REF_FREQUENCY) data = np.hstack((np.repeat(rf, bs) for bs, rf in zip(num_chans, ref_freqs))) return data.reshape(context.shape).astype(context.dtype)
Reference frequency data source
376,918
def get(self, block_alias, context): contents = [] dynamic_block_contents = self.get_contents_dynamic(block_alias, context) if dynamic_block_contents: contents.append(dynamic_block_contents) static_block_contents = self.get_contents_static(block_alias, context) if static_block_contents: contents.append(static_block_contents) if not contents: return return choice(contents)
Main method returning block contents (static or dynamic).
376,919
def get_solution(self, parameters=None): if not self.has_solution(): raise IllegalState() return DisplayText(self.my_osid_object._my_map[])
stub
376,920
def select(self, ): s = self.browser.selected_indexes(self.browser.get_depth()-1) if not s: return i = s[0].internalPointer() if i: tfi = i.internal_data() self.selected = tfi self.accept()
Store the selected taskfileinfo self.selected and accept the dialog :returns: None :rtype: None :raises: None
376,921
def circle(rad=0.5): _ctx = _state["ctx"] _ctx.arc(0, 0, rad, 0, 2 * math.pi) _ctx.set_line_width(0) _ctx.stroke_preserve() _ctx.fill()
Draw a circle
376,922
def pivot(self, index, column, value): assert_is_type(index, str) assert_is_type(column, str) assert_is_type(value, str) col_names = self.names if index not in col_names: raise H2OValueError("Index not in H2OFrame") if column not in col_names: raise H2OValueError("Column not in H2OFrame") if value not in col_names: raise H2OValueError("Value column not in H2OFrame") if self.type(column) not in ["enum","time","int"]: raise H2OValueError(" argument is not type enum, time or int") if self.type(index) not in ["enum","time","int"]: raise H2OValueError(" argument is not type enum, time or int") return H2OFrame._expr(expr=ExprNode("pivot",self,index,column,value))
Pivot the frame designated by the three columns: index, column, and value. Index and column should be of type enum, int, or time. For cases of multiple indexes for a column label, the aggregation method is to pick the first occurrence in the data frame :param index: Index is a column that will be the row label :param column: The labels for the columns in the pivoted Frame :param value: The column of values for the given index and column label :returns:
376,923
def plot_fit(self, **kwargs): import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get(,(10,7)) if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: plt.figure(figsize=figsize) date_index = self.index[max(self.p, self.q):] t_params = self.transform_z() sigma2, Y, ___ = self._model(self.latent_variables.get_z_values()) plt.plot(date_index, np.abs(Y-t_params[-1]), label=self.data_name + ) plt.plot(date_index, np.power(sigma2,0.5), label= + str(self.p) + + str(self.q) + ,c=) plt.title(self.data_name + " Volatility Plot") plt.legend(loc=2) plt.show()
Plots the fit of the model Returns ---------- None (plots data and the fit)
376,924
def _update_estimate_and_sampler(self, ell, ell_hat, weight, extra_info, **kwargs): stratum_idx = extra_info[] self._BB_TP.update(ell*ell_hat, stratum_idx) self._BB_PP.update(ell_hat, stratum_idx) self._BB_P.update(ell, stratum_idx) self._update_cov_model(strata_to_update = [stratum_idx]) self._update_estimates()
Update the BB models and the estimates
376,925
def _overlapping(files): segments = set() for path in files: seg = file_segment(path) for s in segments: if seg.intersects(s): return True segments.add(seg) return False
Quick method to see if a file list contains overlapping files
376,926
def env_set(context): env_set = context[].get(, None) exists = False if env_set: logger.debug("started") for k, v in env_set.items(): logger.debug(f"setting ${k} to context[{v}]") os.environ[k] = context.get_formatted_string(v) logger.info(f"set {len(env_set)} $ENVs from context.") exists = True logger.debug("done") return exists
Set $ENVs to specified string. from the pypyr context. Args: context: is dictionary-like. context is mandatory. context['env']['set'] must exist. It's a dictionary. Values are strings to write to $ENV. Keys are the names of the $ENV values to which to write. For example, say input context is: key1: value1 key2: value2 key3: value3 env: set: MYVAR1: {key1} MYVAR2: before_{key3}_after MYVAR3: arbtexthere This will result in the following $ENVs: $MYVAR1 = value1 $MYVAR2 = before_value3_after $MYVAR3 = arbtexthere Note that the $ENVs are not persisted system-wide, they only exist for pypyr sub-processes, and as such for the following steps during this pypyr pipeline execution. If you set an $ENV here, don't expect to see it in your system environment variables after the pipeline finishes running.
376,927
def render_honeypot_field(field_name=None): if not field_name: field_name = settings.HONEYPOT_FIELD_NAME value = getattr(settings, , ) if callable(value): value = value() return {: field_name, : value}
Renders honeypot field named field_name (defaults to HONEYPOT_FIELD_NAME).
376,928
def validate(self, value): if not self._compiled_regex.match(value): raise ValidationError( .format(value, self._regex))
Validate string by regex :param value: str :return:
376,929
def meta_features_path(self, path): return os.path.join( path, app.config[], str(self.id) ) +
Returns path for meta-features Args: path (str): Absolute/local path of xcessiv folder
376,930
def _parse_CHANLIMIT(value): pairs = map(string_int_pair, value.split()) return dict( (target, number) for target_keys, number in pairs for target in target_keys )
>>> res = FeatureSet._parse_CHANLIMIT('ibe:250,xyz:100') >>> len(res) 6 >>> res['x'] 100 >>> res['i'] == res['b'] == res['e'] == 250 True
376,931
def WriteSignedBinary(binary_urn, binary_content, private_key, public_key, chunk_size = 1024, token = None): if _ShouldUseLegacyDatastore(): collects.GRRSignedBlob.NewFromContent( binary_content, binary_urn, chunk_size=chunk_size, token=token, private_key=private_key, public_key=public_key) if data_store.RelationalDBEnabled(): blob_references = rdf_objects.BlobReferences() for chunk_offset in range(0, len(binary_content), chunk_size): chunk = binary_content[chunk_offset:chunk_offset + chunk_size] blob_rdf = rdf_crypto.SignedBlob() blob_rdf.Sign(chunk, private_key, verify_key=public_key) blob_id = data_store.BLOBS.WriteBlobWithUnknownHash( blob_rdf.SerializeToString()) blob_references.items.Append( rdf_objects.BlobReference( offset=chunk_offset, size=len(chunk), blob_id=blob_id)) data_store.REL_DB.WriteSignedBinaryReferences( _SignedBinaryIDFromURN(binary_urn), blob_references)
Signs a binary and saves it to the datastore. If a signed binary with the given URN already exists, its contents will get overwritten. Args: binary_urn: URN that should serve as a unique identifier for the binary. binary_content: Contents of the binary, as raw bytes. private_key: Key that should be used for signing the binary contents. public_key: Key that should be used to verify the signature generated using the private key. chunk_size: Size, in bytes, of the individual blobs that the binary contents will be split to before saving to the datastore. token: ACL token to use with the legacy (non-relational) datastore.
376,932
def from_str(cls, emotestr): emoteid, occstr = emotestr.split() occurences = [] for occ in occstr.split(): start, end = occ.split() occurences.append((int(start), int(end))) return cls(int(emoteid), occurences)
Create an emote from the emote tag key :param emotestr: the tag key, e.g. ``'123:0-4'`` :type emotestr: :class:`str` :returns: an emote :rtype: :class:`Emote` :raises: None
376,933
def maybe_center_plot(result): begin = re.search(, result) if begin: result = ( + result[begin.end():] + ) return result
Embeds a possible tikz image inside a center environment. Searches for matplotlib2tikz last commend line to detect tikz images. Args: result: The code execution result Returns: The input result if no tikzpicture was found, otherwise a centered version.
376,934
def reverse( self, query, radius=None, exactly_one=True, maxresults=None, pageinformation=None, language=None, mode=, timeout=DEFAULT_SENTINEL ): point = self._coerce_point_to_string(query) params = { : self.app_id, : self.app_code, : mode, : point, } if radius is not None: params[] = % (params[], float(radius)) if pageinformation: params[] = pageinformation if maxresults: params[] = maxresults if exactly_one: params[] = 1 if language: params[] = language url = "%s?%s" % (self.reverse_api, urlencode(params)) logger.debug("%s.reverse: %s", self.__class__.__name__, url) return self._parse_json( self._call_geocoder(url, timeout=timeout), exactly_one )
Return an address by location point. This implementation supports only a subset of all available parameters. A list of all parameters of the pure REST API is available here: https://developer.here.com/documentation/geocoder/topics/resource-reverse-geocode.html :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param float radius: Proximity radius in meters. :param bool exactly_one: Return one result or a list of results, if available. :param int maxresults: Defines the maximum number of items in the response structure. If not provided and there are multiple results the HERE API will return 10 results by default. This will be reset to one if ``exactly_one`` is True. :param int pageinformation: A key which identifies the page to be returned when the response is separated into multiple pages. Only useful when ``maxresults`` is also provided. :param str language: Affects the language of the response, must be a RFC 4647 language code, e.g. 'en-US'. :param str mode: Affects the type of returned response items, must be one of: 'retrieveAddresses' (default), 'retrieveAreas', 'retrieveLandmarks', 'retrieveAll', or 'trackPosition'. See online documentation for more information. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.
376,935
def load(self, text, fieldnames=None): lines = text.split() fieldnames = load_line(lines[0]) values = load_line(lines[1]) self.__dict__ = dict(zip(fieldnames, values))
Item from TSV representation.
376,936
def _redirect_with_params(url_name, *args, **kwargs): url = urlresolvers.reverse(url_name, args=args) params = parse.urlencode(kwargs, True) return "{0}?{1}".format(url, params)
Helper method to create a redirect response with URL params. This builds a redirect string that converts kwargs into a query string. Args: url_name: The name of the url to redirect to. kwargs: the query string param and their values to build. Returns: A properly formatted redirect string.
376,937
def json_to_pages(json, user, preferred_lang=None): from .models import Page if not preferred_lang: preferred_lang = settings.PAGE_DEFAULT_LANGUAGE d = simplejson.loads(json) try: errors = validate_pages_json_data(d, preferred_lang) except KeyError as e: errors = [_() % (e.args[0],)] pages_created = [] if not errors: for p in d[]: pages_created.append( Page.objects.create_and_update_from_json_data(p, user)) for p, results in zip(d[], pages_created): page, created, messages = results rtcs = p[] if rtcs: messages.extend(page.update_redirect_to_from_json(rtcs)) return errors, pages_created
Attept to create/update pages from JSON string json. user is the user that will be used when creating a page if a page's original author can't be found. preferred_lang is the language code of the slugs to include in error messages (defaults to settings.PAGE_DEFAULT_LANGUAGE). Returns (errors, pages_created) where errors is a list of strings and pages_created is a list of: (page object, created bool, messages list of strings) tuples. If any errors are detected there the error list will contain information for the user and no pages will be created/updated.
376,938
def parse_arg(f, kwd, offset=0): vnames = describe(f) return tuple([kwd[k] for k in vnames[offset:]])
convert dictionary of keyword argument and value to positional argument equivalent to:: vnames = describe(f) return tuple([kwd[k] for k in vnames[offset:]])
376,939
def loadtxt2(fname, dtype=None, delimiter=, newline=, comment_character=, skiplines=0): dtypert = [None, None, None] def preparedtype(dtype): dtypert[0] = dtype flatten = flatten_dtype(dtype) dtypert[1] = flatten dtypert[2] = numpy.dtype([(, (numpy.int8, flatten.itemsize))]) buf = numpy.empty((), dtype=dtypert[1]) converters = [_default_conv[flatten[name].char] for name in flatten.names] return buf, converters, flatten.names def fileiter(fh): converters = [] buf = None if dtype is not None: buf, converters, names = preparedtype(dtype) yield None for lineno, line in enumerate(fh): if lineno < skiplines: continue if line[0] in comment_character: if buf is None and line[1] == : ddtype = pickle.loads(base64.b64decode(line[2:])) buf, converters, names = preparedtype(ddtype) yield None continue for word, c, name in zip(line.split(), converters, names): buf[name] = c(word) buf2 = buf.copy().view(dtype=dtypert[2]) yield buf2 if isinstance(fname, basestring): fh = file(fh, ) cleanup = lambda : fh.close() else: fh = iter(fname) cleanup = lambda : None try: i = fileiter(fh) i.next() return numpy.fromiter(i, dtype=dtypert[2]).view(dtype=dtypert[0]) finally: cleanup()
Known issues delimiter and newline is not respected. string quotation with space is broken.
376,940
def setup_icons(self, ): plus_icon = get_icon(, asicon=True) self.addnew_tb.setIcon(plus_icon)
Set all icons on buttons :returns: None :rtype: None :raises: None
376,941
def get_status(task, prefix, expnum, version, ccd, return_message=False): key = get_process_tag(prefix+task, ccd, version) status = get_tag(expnum, key) logger.debug( % (key, status)) if return_message: return status else: return status == SUCCESS
Report back status of the given program by looking up the associated VOSpace annotation. @param task: name of the process or task that will be checked. @param prefix: prefix of the file that was processed (often fk or None) @param expnum: which exposure number (or base filename) @param version: which version of that exposure (p, s, o) @param ccd: which CCD within the exposure. @param return_message: Return what did the TAG said or just /True/False/ for Success/Failure? @return: the status of the processing based on the annotation value.
376,942
def is_callable_type(tp): if NEW_TYPING: return (tp is Callable or isinstance(tp, _GenericAlias) and tp.__origin__ is collections.abc.Callable or isinstance(tp, type) and issubclass(tp, Generic) and issubclass(tp, collections.abc.Callable)) return type(tp) is CallableMeta
Test if the type is a generic callable type, including subclasses excluding non-generic types and callables. Examples:: is_callable_type(int) == False is_callable_type(type) == False is_callable_type(Callable) == True is_callable_type(Callable[..., int]) == True is_callable_type(Callable[[int, int], Iterable[str]]) == True class MyClass(Callable[[int], int]): ... is_callable_type(MyClass) == True For more general tests use callable(), for more precise test (excluding subclasses) use:: get_origin(tp) is collections.abc.Callable # Callable prior to Python 3.7
376,943
def transform_audio(self, y): tempogram n_frames = self.n_frames(get_duration(y=y, sr=self.sr)) tgram = tempogram(y=y, sr=self.sr, hop_length=self.hop_length, win_length=self.win_length).astype(np.float32) tgram = fix_length(tgram, n_frames) return {: tgram.T[self.idx]}
Compute the tempogram Parameters ---------- y : np.ndarray Audio buffer Returns ------- data : dict data['tempogram'] : np.ndarray, shape=(n_frames, win_length) The tempogram
376,944
def body(self): body = self.get_parameters_by_location([]) return self.root.schemas.get(body[0].type) if body else None
Return body request parameter :return: Body parameter :rtype: Parameter or None
376,945
def _hook_unmapped(self, uc, access, address, size, value, data): try: self.sync_unicorn_to_manticore() logger.warning(f"Encountered an operation on unmapped memory at {hex(address)}") m = self._cpu.memory.map_containing(address) self.copy_memory(m.start, m.end - m.start) except MemoryException as e: logger.error("Failed to map memory {}-{}, ({}): {}".format(hex(address), hex(address + size), access, e)) self._to_raise = e self._should_try_again = False return False self._should_try_again = True return False
We hit an unmapped region; map it into unicorn.
376,946
def at_time(self, time, asof=False, axis=None): if axis is None: axis = self._stat_axis_number axis = self._get_axis_number(axis) index = self._get_axis(axis) try: indexer = index.indexer_at_time(time, asof=asof) except AttributeError: raise TypeError() return self._take(indexer, axis=axis)
Select values at particular time of day (e.g. 9:30AM). Parameters ---------- time : datetime.time or str axis : {0 or 'index', 1 or 'columns'}, default 0 .. versionadded:: 0.24.0 Returns ------- Series or DataFrame Raises ------ TypeError If the index is not a :class:`DatetimeIndex` See Also -------- between_time : Select values between particular times of the day. first : Select initial periods of time series based on a date offset. last : Select final periods of time series based on a date offset. DatetimeIndex.indexer_at_time : Get just the index locations for values at particular time of the day. Examples -------- >>> i = pd.date_range('2018-04-09', periods=4, freq='12H') >>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i) >>> ts A 2018-04-09 00:00:00 1 2018-04-09 12:00:00 2 2018-04-10 00:00:00 3 2018-04-10 12:00:00 4 >>> ts.at_time('12:00') A 2018-04-09 12:00:00 2 2018-04-10 12:00:00 4
376,947
def _clean_flags(args, caller): flags = if args is None: return flags allowed = (, , , , , , , , , , , ) for flag in args: if flag in allowed: flags += flag else: raise CommandExecutionError( .format(caller) ) return flags
Sanitize flags passed into df
376,948
def _gl_initialize(self): if in gl.current_backend.__name__: pass else: GL_VERTEX_PROGRAM_POINT_SIZE = 34370 GL_POINT_SPRITE = 34913 gl.glEnable(GL_VERTEX_PROGRAM_POINT_SIZE) gl.glEnable(GL_POINT_SPRITE) if self.capabilities[] is None: self.capabilities[] = gl.glGetParameter(gl.GL_VERSION) self.capabilities[] = \ gl.glGetParameter(gl.GL_MAX_TEXTURE_SIZE) this_version = self.capabilities[].split()[0] this_version = LooseVersion(this_version)
Deal with compatibility; desktop does not have sprites enabled by default. ES has.
376,949
def xd(self): if self._xd is None: path_to_lsm_files = path.join(self.lsm_input_folder_path, self.lsm_search_card) self._xd = pa.open_mfdataset(path_to_lsm_files, lat_var=self.lsm_lat_var, lon_var=self.lsm_lon_var, time_var=self.lsm_time_var, lat_dim=self.lsm_lat_dim, lon_dim=self.lsm_lon_dim, time_dim=self.lsm_time_dim, loader=self.pangaea_loader) self.lsm_time_dim = self.lsm_time_var = return self._xd
get xarray dataset file handle to LSM files
376,950
def flip(self, reactions): for reaction in reactions: if reaction in self._flipped: self._flipped.remove(reaction) else: self._flipped.add(reaction)
Flip the specified reactions.
376,951
def exec(self, container: Container, command: str, context: Optional[str] = None, stdout: bool = True, stderr: bool = False, time_limit: Optional[int] = None ) -> ExecResponse: payload = { : command, : context, : stdout, : stderr, : time_limit } path = "containers/{}/exec".format(container.uid) r = self.__api.post(path, json=payload) if r.status_code == 200: return ExecResponse.from_dict(r.json()) if r.status_code == 404: raise KeyError("no container found with given UID: {}".format(container.uid)) self.__api.handle_erroneous_response(r)
Executes a given command inside a provided container. Parameters: container: the container to which the command should be issued. command: the command that should be executed. context: the working directory that should be used to perform the execution. If no context is provided, then the command will be executed at the root of the container. stdout: specifies whether or not output to the stdout should be included in the execution summary. stderr: specifies whether or not output to the stderr should be included in the execution summary. time_limit: an optional time limit that is applied to the execution. If the command fails to execute within the time limit, the command will be aborted and treated as a failure. Returns: a summary of the outcome of the execution. Raises: KeyError: if the container no longer exists on the server.
376,952
def start(): setupdir = dirname(dirname(__file__)) curdir = os.getcwd() if len(sys.argv) > 1: configfile = sys.argv[1] elif exists(join(setupdir, "setup.py")): configfile = join(setupdir, "dev.cfg") elif exists(join(curdir, "prod.cfg")): configfile = join(curdir, "prod.cfg") else: try: configfile = pkg_resources.resource_filename( pkg_resources.Requirement.parse("tgpisa"), "config/default.cfg") except pkg_resources.DistributionNotFound: raise ConfigurationError("Could not find default configuration.") turbogears.update_config(configfile=configfile, modulename="tgpisa.config") from tgpisa.controllers import Root turbogears.start_server(Root())
Start the CherryPy application server.
376,953
def open(self): self._connection = \ amqp.Connection(host= % (self.hostname, self.port), userid=self.username, password=self.password, virtual_host=self.virtual_host, insist=False) self.channel = self._connection.channel()
Open a connection to the AMQP compliant broker.
376,954
def bin_b64_type(arg): try: arg = base64.standard_b64decode(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError("{0} is invalid base64 data".format(repr(arg))) return arg
An argparse type representing binary data encoded in base64.
376,955
def export(name: str, value: Any): stack = get_root_resource() if stack is not None: stack.output(name, value)
Exports a named stack output. :param str name: The name to assign to this output. :param Any value: The value of this output.
376,956
def _raise_for_status(response): http_error_msg = if 400 <= response.status_code < 500: http_error_msg = % ( response.status_code, response.reason, response.url) elif 500 <= response.status_code < 600: http_error_msg = % ( response.status_code, response.reason, response.url) if http_error_msg: spacetrack_error_msg = None try: json = response.json() if isinstance(json, Mapping): spacetrack_error_msg = json[] except (ValueError, KeyError): pass if not spacetrack_error_msg: spacetrack_error_msg = response.text if spacetrack_error_msg: http_error_msg += + spacetrack_error_msg raise requests.HTTPError(http_error_msg, response=response)
Raises stored :class:`HTTPError`, if one occurred. This is the :meth:`requests.models.Response.raise_for_status` method, modified to add the response from Space-Track, if given.
376,957
def load_terms(self, terms): assert t.parent is not None try: dd = terms.declare_dict self.decl_terms.update(dd[]) self.decl_sections.update(dd[]) self.super_terms.update(terms.super_terms()) kf = lambda e: e[1] self.derived_terms ={ k:set( e[0] for e in g) for k, g in groupby(sorted(self.super_terms.items(), key=kf), kf)} except AttributeError as e: pass try: self.errors = terms.errors_as_dict() except AttributeError: self.errors = {} return self
Create a builder from a sequence of terms, usually a TermInterpreter
376,958
def buildURL(self, action, **query): base = urlparse.urljoin(self.server.base_url, action) return appendArgs(base, query)
Build a URL relative to the server base_url, with the given query parameters added.
376,959
def print_tb(tb, limit=None, file=None): if file is None: file = sys.stderr if limit is None: if hasattr(sys, ): limit = sys.tracebacklimit n = 0 while tb is not None and (limit is None or n < limit): f = tb.tb_frame lineno = tb.tb_lineno co = f.f_code filename = co.co_filename name = co.co_name _print(file, % (filename, lineno, name)) linecache.checkcache(filename) line = linecache.getline(filename, lineno, f.f_globals) if line: _print(file, + line.strip()) tb = tb.tb_next n = n+1
Print up to 'limit' stack trace entries from the traceback 'tb'. If 'limit' is omitted or None, all entries are printed. If 'file' is omitted or None, the output goes to sys.stderr; otherwise 'file' should be an open file or file-like object with a write() method.
376,960
def to_bluesky( traffic: Traffic, filename: Union[str, Path], minimum_time: Optional[timelike] = None, ) -> None: if minimum_time is not None: minimum_time = to_datetime(minimum_time) traffic = traffic.query(f"timestamp >= ") if isinstance(filename, str): filename = Path(filename) if not filename.parent.exists(): filename.parent.mkdir(parents=True) altitude = ( "baro_altitude" if "baro_altitude" in traffic.data.columns else "altitude" ) if "mdl" not in traffic.data.columns: traffic = aircraft.merge(traffic) if "cas" not in traffic.data.columns: traffic = Traffic( traffic.data.assign( cas=vtas2cas(traffic.data.ground_speed, traffic.data[altitude]) ) ) with filename.open("w") as fh: t_delta = traffic.data.timestamp - traffic.start_time data = ( traffic.assign_id() .data.groupby("flight_id") .filter(lambda x: x.shape[0] > 3) .assign(timedelta=t_delta.apply(fmt_timedelta)) .sort_values(by="timestamp") ) for column in data.columns: data[column] = data[column].astype(np.str) is_created: List[str] = [] is_deleted: List[str] = [] start_time = cast(pd.Timestamp, traffic.start_time).time() fh.write(f"00:00:00> TIME {start_time}\n") buff = data.groupby("flight_id").timestamp.max() dd = pd.DataFrame( columns=["timestamp"], data=buff.values, index=buff.index.values ) map_icao24_last_point = {} for i, v in dd.iterrows(): map_icao24_last_point[i] = v[0] for _, v in data.iterrows(): if v.flight_id not in is_created: is_created.append(v.flight_id) fh.write( f"{v.timedelta}> CRE {v.callsign} {v.mdl} " f"{v.latitude} {v.longitude} {v.track} " f"{v[altitude]} {v.cas}\n" ) elif v.timestamp == map_icao24_last_point[v.flight_id]: if v.flight_id not in is_deleted: is_deleted.append(v.flight_id) fh.write(f"{v.timedelta}> DEL {v.callsign}\n") elif v.flight_id not in is_deleted: fh.write( f"{v.timedelta}> MOVE {v.callsign} " f"{v.latitude} {v.longitude} {v[altitude]} " f"{v.track} {v.cas} {v.vertical_rate}\n" ) logging.info(f"Scenario file {filename} written")
Generates a Bluesky scenario file.
376,961
def uniquelines(q): setoflines = set() for facets in q: for line in itertools.combinations(facets, 2): setoflines.add(tuple(sorted(line))) return setoflines
Given all the facets, convert it into a set of unique lines. Specifically used for converting convex hull facets into line pairs of coordinates. Args: q: A 2-dim sequence, where each row represents a facet. E.g., [[1,2,3],[3,6,7],...] Returns: setoflines: A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
376,962
def gdal_rasterize(src, dst, options): out = gdal.Rasterize(dst, src, options=gdal.RasterizeOptions(**options)) out = None
a simple wrapper for gdal.Rasterize Parameters ---------- src: str or :osgeo:class:`ogr.DataSource` the input data set dst: str the output data set options: dict additional parameters passed to gdal.Rasterize; see :osgeo:func:`gdal.RasterizeOptions` Returns -------
376,963
def strerror(errno): from pypy.module._codecs.locale import str_decode_locale_surrogateescape return str_decode_locale_surrogateescape(os.strerror(errno))
Translate an error code to a message string.
376,964
def open_file(filename): if sys.platform.startswith("darwin"): subprocess.call(("open", filename)) elif sys.platform == "cygwin": subprocess.call(("cygstart", filename)) elif os.name == "nt": os.system("start %s" % filename) elif os.name == "posix": subprocess.call(("xdg-open", filename))
Multi-platform way to make the OS open a file with its default application
376,965
def add_user(self, attrs): username = attrs[self.key] if username in self.users: raise UserAlreadyExists(username, self.backend_name) self.users[username] = attrs self.users[username][] = set([])
Add a user to the backend :param attrs: attributes of the user :type attrs: dict ({<attr>: <value>}) .. warning:: raise UserAlreadyExists if user already exists
376,966
def __get_metrics(self): esfilters_closed = None esfilters_opened = None if self.esfilters: esfilters_closed = self.esfilters.copy() esfilters_opened = self.esfilters.copy() closed = self.closed_class(self.es_url, self.es_index, start=self.start, end=self.end, esfilters=esfilters_closed, interval=self.interval) opened = self.opened_class(self.es_url, self.es_index, start=self.start, end=self.end, esfilters=esfilters_opened, interval=self.interval) return (closed, opened)
Each metric must have its own filters copy to modify it freely
376,967
def _expect_empty(self): item = self._lexer.get_token() if item: line_no, token = item raise ParseError(u"Unexpected token on line {1}" .format(common.from_utf8(token.strip()), line_no))
Checks if the token stream is empty. * Raises a ``ParseError` exception if a token is found.
376,968
def create_from_request_pdu(pdu): _, starting_address, quantity = struct.unpack(, pdu) instance = ReadHoldingRegisters() instance.starting_address = starting_address instance.quantity = quantity return instance
Create instance from request PDU. :param pdu: A request PDU. :return: Instance of this class.
376,969
def _parse(self, line): try: result = line.split(, maxsplit=4) filename, line_num_txt, column_txt, message_type, text = result except ValueError: return try: self.line_num = int(line_num_txt.strip()) self.column = int(column_txt.strip()) except ValueError: return self.filename = filename self.message_type = message_type.strip() self.text = text.strip() self.valid = True
Parse the output line
376,970
def configure(self, options, conf): self.conf = conf self.enabled = options.debugErrors or options.debugFailures self.enabled_for_errors = options.debugErrors self.enabled_for_failures = options.debugFailures
Configure which kinds of exceptions trigger plugin.
376,971
def get_host_from_service_info(service_info): host = None port = None if (service_info and service_info.port and (service_info.server or service_info.address)): if service_info.address: host = socket.inet_ntoa(service_info.address) else: host = service_info.server.lower() port = service_info.port return (host, port)
Get hostname or IP from service_info.
376,972
def is_symlink(self): try: return S_ISLNK(self.lstat().st_mode) except OSError as e: if e.errno != ENOENT: raise return False
Whether this path is a symbolic link.
376,973
def combine(objs): from .orbit import Orbit if isinstance(objs, PhaseSpacePosition) or isinstance(objs, Orbit): raise ValueError("You must pass a non-empty iterable to combine.") elif not isiterable(objs) or len(objs) < 1: raise ValueError("You must pass a non-empty iterable to combine.") elif len(objs) == 1: return objs[0] if objs[0].__class__ not in [PhaseSpacePosition, Orbit]: raise TypeError("Objects must be either PhaseSpacePosition or Orbit " "instances.") for obj in objs: if obj.__class__ != objs[0].__class__: raise TypeError("All objects must have the same type.") if obj.ndim != objs[0].ndim: raise ValueError("All objects must have the same ndim.") if obj.frame != objs[0].frame: raise ValueError("All objects must have the same frame.") if hasattr(obj, ) and obj.potential != objs[0].potential: raise ValueError("All objects must have the same potential.") if (hasattr(obj, ) and obj.t is not None and objs[0].t is not None and not quantity_allclose(obj.t, objs[0].t, atol=1E-13*objs[0].t.unit)): raise ValueError("All orbits must have the same time array.") if not in obj.pos.get_name(): raise NotImplementedError("Currently, combine only works for " "Cartesian-represented objects.") if objs[0].__class__ == PhaseSpacePosition: pos = [] vel = [] for i, obj in enumerate(objs): if i == 0: pos_unit = obj.pos.xyz.unit vel_unit = obj.vel.d_xyz.unit pos.append(atleast_2d(obj.pos.xyz.to(pos_unit).value, insert_axis=1)) vel.append(atleast_2d(obj.vel.d_xyz.to(vel_unit).value, insert_axis=1)) pos = np.concatenate(pos, axis=1) * pos_unit vel = np.concatenate(vel, axis=1) * vel_unit return PhaseSpacePosition(pos=pos, vel=vel, frame=objs[0].frame) elif objs[0].__class__ == Orbit: pos = [] vel = [] for i, obj in enumerate(objs): if i == 0: pos_unit = obj.pos.xyz.unit vel_unit = obj.vel.d_xyz.unit p = obj.pos.xyz.to(pos_unit).value v = obj.vel.d_xyz.to(vel_unit).value if p.ndim < 3: p = p.reshape(p.shape + (1,)) v = v.reshape(v.shape + (1,)) pos.append(p) vel.append(v) pos = np.concatenate(pos, axis=2) * pos_unit vel = np.concatenate(vel, axis=2) * vel_unit return Orbit(pos=pos, vel=vel, t=objs[0].t, frame=objs[0].frame, potential=objs[0].potential) else: raise RuntimeError("should never get here...")
Combine the specified `~gala.dynamics.PhaseSpacePosition` or `~gala.dynamics.Orbit` objects. Parameters ---------- objs : iterable An iterable of either `~gala.dynamics.PhaseSpacePosition` or `~gala.dynamics.Orbit` objects.
376,974
def pyhttp(self, value): if value.startswith("pyuri_"): return value parts = self.parse_uri(value) return "pyuri_%s_%s" % (base64.b64encode(bytes(parts[0], "utf-8")).decode(), parts[1])
converts a no namespaces uri to a python excessable name
376,975
def role_definitions(self): api_version = self._get_api_version() if api_version == : from .v2015_07_01.operations import RoleDefinitionsOperations as OperationClass elif api_version == : from .v2018_01_01_preview.operations import RoleDefinitionsOperations as OperationClass else: raise NotImplementedError("APIVersion {} is not available".format(api_version)) return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
Instance depends on the API version: * 2015-07-01: :class:`RoleDefinitionsOperations<azure.mgmt.authorization.v2015_07_01.operations.RoleDefinitionsOperations>` * 2018-01-01-preview: :class:`RoleDefinitionsOperations<azure.mgmt.authorization.v2018_01_01_preview.operations.RoleDefinitionsOperations>`
376,976
def stream_json_lines(file): if isinstance(file, string_type): file = open(file, ) for line in file: line = line.strip() if line: if isinstance(line, bytes): line = line.decode() yield json.loads(line)
Load a JSON stream and return a generator, yielding one object at a time.
376,977
def read(cls, proto): tm = super(TMShimMixin, cls).read(proto) tm.infActiveState = {"t": None} return tm
Intercepts TemporalMemory deserialization request in order to initialize `self.infActiveState` @param proto (DynamicStructBuilder) Proto object @return (TemporalMemory) TemporalMemory shim instance
376,978
def format_private_ip_address(result): from collections import OrderedDict order_dict = OrderedDict() if result.ip_address is not None: order_dict[] = result.ip_address if result.subnet_resource_id is not None: order_dict[] = result.subnet_resource_id return order_dict
Formats the PrivateIPAddress object removing arguments that are empty
376,979
def embed(self, url, **kwargs): try: provider = self.provider_for_url(url) except OEmbedMissingEndpoint: raise else: try: stored_match = StoredOEmbed.objects.filter( match=url, maxwidth=kwargs.get(, None), maxheight=kwargs.get(, None), date_expires__gte=datetime.datetime.now())[0] return OEmbedResource.create_json(stored_match.response_json) except IndexError: params = dict([(k, v) for k, v in kwargs.items() if v]) resource = provider.request_resource(url, **params) try: cache_age = int(resource.cache_age) if cache_age < MIN_OEMBED_TTL: cache_age = MIN_OEMBED_TTL except: cache_age = DEFAULT_OEMBED_TTL date_expires = datetime.datetime.now() + datetime.timedelta(seconds=cache_age) stored_oembed, created = StoredOEmbed.objects.get_or_create( match=url, maxwidth=kwargs.get(, None), maxheight=kwargs.get(, None)) stored_oembed.response_json = resource.json stored_oembed.resource_type = resource.type stored_oembed.date_expires = date_expires if resource.content_object: stored_oembed.content_object = resource.content_object stored_oembed.save() return resource
The heart of the matter
376,980
def deploy( src, requirements=None, local_package=None, config_file=, profile_name=None, preserve_vpc=False ): path_to_config_file = os.path.join(src, config_file) cfg = read_cfg(path_to_config_file, profile_name) path_to_zip_file = build( src, config_file=config_file, requirements=requirements, local_package=local_package, ) existing_config = get_function_config(cfg) if existing_config: update_function(cfg, path_to_zip_file, existing_config, preserve_vpc=preserve_vpc) else: create_function(cfg, path_to_zip_file)
Deploys a new function to AWS Lambda. :param str src: The path to your Lambda ready project (folder must contain a valid config.yaml and handler module (e.g.: service.py). :param str local_package: The path to a local package with should be included in the deploy as well (and/or is not available on PyPi)
376,981
def generate(env): global PSAction if PSAction is None: PSAction = SCons.Action.Action(, ) global DVIPSAction if DVIPSAction is None: DVIPSAction = SCons.Action.Action(DviPsFunction, strfunction = DviPsStrFunction) global PSBuilder if PSBuilder is None: PSBuilder = SCons.Builder.Builder(action = PSAction, prefix = , suffix = , src_suffix = , src_builder = , single_source=True) env[][] = PSBuilder env[] = env[] = SCons.Util.CLVar()
Add Builders and construction variables for dvips to an Environment.
376,982
def _submit_result(self): if self._cur_res_id and self._cur_values: self._addRawResult(self._cur_res_id, self._cur_values) self._reset()
Adding current values as a Raw Result and Resetting everything. Notice that we are not calculating final result of assay. We just set NP and GP values and in Bika, AS will have a Calculation to generate final result based on NP and GP values.
376,983
def get_tags(self, rev=None): rev_num = self._get_rev_num(rev) return ( set(self._read_tags_for_rev(rev_num)) if not rev_num.endswith() else set([]) )
Get the tags for the given revision specifier (or the current revision if not specified).
376,984
def simplify(self): if self.iscanonical: return self expr = self.cancel() if not isinstance(expr, self.__class__): return expr.simplify() if expr.args[0] in (self.TRUE, self.FALSE,): return expr.args[0].dual expr = self.__class__(expr.args[0].simplify()) expr.iscanonical = True return expr
Return a simplified expr in canonical form. This means double negations are canceled out and all contained boolean objects are in their canonical form.
376,985
def scoreatpercentile(inlist, percent): if percent > 1: print("\nDividing percent>1 by 100 in lscoreatpercentile().\n") percent = percent / 100.0 targetcf = percent * len(inlist) h, lrl, binsize, extras = histogram(inlist) cumhist = cumsum(copy.deepcopy(h)) for i in range(len(cumhist)): if cumhist[i] >= targetcf: break score = binsize * ((targetcf - cumhist[i - 1]) / float(h[i])) + (lrl + binsize * i) return score
Returns the score at a given percentile relative to the distribution given by inlist. Usage: lscoreatpercentile(inlist,percent)
376,986
def rollback_savepoint(self, savepoint): if not self._platform.is_savepoints_supported(): raise DBALConnectionError.savepoints_not_supported() self.ensure_connected() self._platform.rollback_savepoint(savepoint)
Rolls back to the given savepoint. :param savepoint: the name of the savepoint to rollback to :raise: pydbal.exception.DBALConnectionError
376,987
def patch(self, endpoint, data): response = self.get_response(method=, endpoint=endpoint, json=data, headers={: }) if response.status_code == 200: return self.decode(response=response) return response
Method to update an item The headers must include an If-Match containing the object _etag. headers = {'If-Match': contact_etag} The data dictionary contain the fields that must be modified. If the patching fails because the _etag object do not match with the provided one, a BackendException is raised with code = 412. If inception is True, this method makes e new get request on the endpoint to refresh the _etag and then a new patch is called. If an HTTP 412 error occurs, a BackendException is raised. This exception is: - code: 412 - message: response content - response: backend response All other HTTP error raises a BackendException. If some _issues are provided by the backend, this exception is: - code: HTTP error code - message: response content - response: JSON encoded backend response (including '_issues' dictionary ...) If no _issues are provided and an _error is signaled by the backend, this exception is: - code: backend error code - message: backend error message - response: JSON encoded backend response :param endpoint: endpoint (API URL) :type endpoint: str :param data: properties of item to update :type data: dict :param headers: headers (example: Content-Type). 'If-Match' required :type headers: dict :param inception: if True tries to get the last _etag :type inception: bool :return: dictionary containing patch response from the backend :rtype: dict
376,988
def read_all(filename): array = javabridge.static_call( "Lweka/core/SerializationHelper;", "readAll", "(Ljava/lang/String;)[Ljava/lang/Object;", filename) if array is None: return None else: return javabridge.get_env().get_object_array_elements(array)
Reads the serialized objects from disk. Caller must wrap objects in appropriate Python wrapper classes. :param filename: the file with the serialized objects :type filename: str :return: the list of JB_OBjects :rtype: list
376,989
def calc_synch_eta(b, ne, delta, sinth, nu, E0=1.): s = nu / calc_nu_b(b) return (b * ne * 8.6e-24 * (delta - 1) * sinth * (0.175 * s / (E0**2 * sinth))**(0.5 * (1 - delta)))
Calculate the relativistic synchrotron emission coefficient η_ν. This is Dulk (1985) equation 40, which is an approximation assuming a power-law electron population. Arguments are: b Magnetic field strength in Gauss ne The density of electrons per cubic centimeter with energies greater than E0. delta The power-law index defining the energy distribution of the electron population, with ``n(E) ~ E^(-delta)``. The equation is valid for ``2 <~ delta <~ 5``. sinth The sine of the angle between the line of sight and the magnetic field direction. It's not specified for what range of values the expressions work well. nu The frequency at which to calculate η, in Hz. The equation is valid for It's not specified for what range of values the expressions work well. E0 The minimum energy of electrons to consider, in MeV. Defaults to 1 so that these functions can be called identically to the gyrosynchrotron functions. The return value is the emission coefficient (AKA "emissivity"), in units of ``erg s^-1 Hz^-1 cm^-3 sr^-1``. No complaints are raised if you attempt to use the equation outside of its range of validity.
376,990
def get_opt_repairs_add_remove_edges_greedy(instance,nm, edges): sem = [sign_cons_prg, elem_path_prg, fwd_prop_prg, bwd_prop_prg] inst = instance.to_file() f_edges = TermSet(edges).to_file() prg = [ inst, f_edges, remove_edges_prg, min_repairs_prg, show_rep_prg, ] + sem + scenfit coptions = str(nm)+ solver = GringoClasp(clasp_options=coptions) models = solver.run(prg, collapseTerms=True, collapseAtoms=False) os.unlink(f_edges) os.unlink(inst) return models
only apply with elementary path consistency notion
376,991
def list(self, resource, url_prefix, auth, session, send_opts): req = self.get_request( resource, , , url_prefix, auth, proj_list_req=True) prep = session.prepare_request(req) resp = session.send(prep, **send_opts) if resp.status_code == 200: return self._get_resource_list(resp.json()) err = (.format( resource.name, resp.status_code, resp.text)) raise HTTPError(err, request = req, response = resp)
List all resources of the same type as the given resource. Args: resource (intern.resource.boss.BossResource): List resources of the same type as this.. url_prefix (string): Protocol + host such as https://api.theboss.io auth (string): Token to send in the request header. session (requests.Session): HTTP session to use for request. send_opts (dictionary): Additional arguments to pass to session.send(). Returns: (list): List of resources. Each resource is a dictionary. Raises: requests.HTTPError on failure.
376,992
def _extract_alphabet(self, grammar): alphabet = set([]) for terminal in grammar.Terminals: alphabet |= set([x for x in terminal]) self.alphabet = list(alphabet)
Extract an alphabet from the given grammar.
376,993
def findAllCfgTasksUnderDir(aDir): retval = {} for f in irafutils.rglob(aDir, ): retval[f] = getEmbeddedKeyVal(f, TASK_NAME_KEY, ) return retval
Finds all installed tasks by examining any .cfg files found on disk at and under the given directory, as an installation might be. This returns a dict of { file name : task name }
376,994
def add_node_collection(self, node, collection): assert node in self.node2pending if not self.collection_is_completed: self.node2collection[node] = list(collection) self.node2pending[node] = [] if len(self.node2collection) >= self.numnodes: self.collection_is_completed = True elif self._removed2pending: for deadnode in self._removed2pending: if deadnode.gateway.spec == node.gateway.spec: dead_collection = self.node2collection[deadnode] if collection != dead_collection: msg = report_collection_diff( dead_collection, collection, deadnode.gateway.id, node.gateway.id, ) self.log(msg) return pending = self._removed2pending.pop(deadnode) self.node2pending[node] = pending break
Add the collected test items from a node Collection is complete once all nodes have submitted their collection. In this case its pending list is set to an empty list. When the collection is already completed this submission is from a node which was restarted to replace a dead node. In this case we already assign the pending items here. In either case ``.schedule()`` will instruct the node to start running the required tests.
376,995
def new(cls, gen: Generator, sign_key: SignKey) -> : logger = logging.getLogger(__name__) logger.debug("VerKey::new: >>>") c_instance = c_void_p() do_call(cls.new_handler, gen.c_instance, sign_key.c_instance, byref(c_instance)) res = cls(c_instance) logger.debug("VerKey::new: <<< res: %r", res) return res
Creates and returns BLS ver key that corresponds to the given generator and sign key. :param: gen - Generator :param: sign_key - Sign Key :return: BLS verification key
376,996
def transform(self, data): if not self._get("fitted"): raise RuntimeError("`transform` called before `fit` or `fit_transform`.") data = data.copy() output_column_prefix = self._get("output_column_prefix") if output_column_prefix is None: prefix = "" else: prefix = output_column_prefix + transform_function = self._get("transform_function") feature_columns = self._get("features") feature_columns = _internal_utils.select_feature_subset(data, feature_columns) for f in feature_columns: data[prefix + f] = transform_function(data[f]) return data
Transforms the data.
376,997
def encrypt(self, key): if (self.encrypted): return self.iv = Random.new().read(AES.block_size) aes = AES.new(key, AES.MODE_CFB, self.iv) self.f_key = aes.encrypt(self.f_key) self.alpha_key = aes.encrypt(self.alpha_key) self.encrypted = True self.hmac = self.get_hmac(key)
This method encrypts and signs the state to make it unreadable by the server, since it contains information that would allow faking proof of storage. :param key: the key to encrypt and sign with
376,998
def add_tokens_for_single(self, ignore=False): args = self.single.args name = self.single.python_name self.reset_indentation(self.indent_type * self.single.indent) self.result.extend(self.tokens.make_single(name, args)) if ignore: self.single.skipped = True self.result.extend(self.tokens.test_skip) self.groups.finish_signature()
Add the tokens for the single signature
376,999
def bot_config(player_config_path: Path, team: Team) -> : bot_config = PlayerConfig() bot_config.bot = True bot_config.rlbot_controlled = True bot_config.team = team.value bot_config.config_path = str(player_config_path.absolute()) config_bundle = get_bot_config_bundle(bot_config.config_path) bot_config.name = config_bundle.name bot_config.loadout_config = load_bot_appearance(config_bundle.get_looks_config(), bot_config.team) return bot_config
A function to cover the common case of creating a config for a bot.