Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
28,700
def easybake(css_in, html_in=sys.stdin, html_out=sys.stdout, last_step=None, coverage_file=None, use_repeatable_ids=False): html_doc = etree.parse(html_in) oven = Oven(css_in, use_repeatable_ids) oven.bake(html_doc, last_step) print(etree.tostring(html_doc, method="xml").decode(), file=html_out) if coverage_file: print(.format(css_in.name), file=coverage_file) print(oven.get_coverage_report(), file=coverage_file) print(, file=coverage_file)
Process the given HTML file stream with the css stream.
28,701
def FailoverInstance(r, instance, iallocator=None, ignore_consistency=False, target_node=None): body = { "ignore_consistency": ignore_consistency, } if iallocator is not None: body["iallocator"] = iallocator if target_node is not None: body["target_node"] = target_node return r.request("put", "/2/instances/%s/failover" % instance, content=body)
Does a failover of an instance. @type instance: string @param instance: Instance name @type iallocator: string @param iallocator: Iallocator for deciding the target node for shared-storage instances @type ignore_consistency: bool @param ignore_consistency: Whether to ignore disk consistency @type target_node: string @param target_node: Target node for shared-storage instances @rtype: string @return: job id
28,702
def render(self): size_format = self.size_primitive.fmt if self.value is None: return size_format, [-1] value = self.render_value(self.value) size = len(value) fmt = "%s%ds" % (size_format, size) return fmt, [size, value]
Returns the ``struct`` format and list of the size and value. The format is derived from the size primitive and the length of the resulting encoded value (e.g. the format for a string of 'foo' ends up as 'h3s'. .. note :: The value is expected to be string-able (wrapped in ``str()``) and is then encoded as UTF-8.
28,703
def get_parameter(self, name): default_value = "$%!)(INVALID)(!%$" value = self.lib.tdGetDeviceParameter(self.id, name, default_value) if value == default_value: raise AttributeError(name) return value
Get a parameter.
28,704
def show_progress(name, **kwargs): skl_groups.divergences.knn.progress logger = logging.getLogger(name) logger.setLevel(logging.INFO) logger.addHandler(ProgressBarHandler(**kwargs))
Sets up a :class:`ProgressBarHandler` to handle progess logs for a given module. Parameters ---------- name : string The module name of the progress logger to use. For example, :class:`skl_groups.divergences.KNNDivergenceEstimator` uses ``'skl_groups.divergences.knn.progress'``. * : anything Other keyword arguments are passed to the :class:`ProgressBarHandler`.
28,705
def skull_strip(dset,suffix=,prefix=None,unifize=True): return available_method()(dset,suffix,prefix,unifize)
attempts to cleanly remove skull from ``dset``
28,706
def POPFQ(cpu): mask = 0x00000001 | 0x00000004 | 0x00000010 | 0x00000040 | 0x00000080 | 0x00000400 | 0x00000800 cpu.EFLAGS = (cpu.EFLAGS & ~mask) | cpu.pop(64) & mask
Pops stack into EFLAGS register. :param cpu: current CPU.
28,707
def get_rectangle(self): rec = [self.pos[0], self.pos[1]]*2 for age in self.nodes: for node in age: for i in range(2): if rec[0+i] > node.pos[i]: rec[0+i] = node.pos[i] elif rec[2+i] < node.pos[i]: rec[2+i] = node.pos[i] return tuple(rec)
Gets the coordinates of the rectangle, in which the tree can be put. Returns: tupel: (x1, y1, x2, y2)
28,708
def __handle_events(self): events = pygame.event.get() for event in events: if event.type == pygame.QUIT: self.exit()
This is the place to put all event handeling.
28,709
def values_clear(self, range): url = SPREADSHEET_VALUES_CLEAR_URL % (self.id, quote(range)) r = self.client.request(, url) return r.json()
Lower-level method that directly calls `spreadsheets.values.clear <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear>`_. :param str range: The `A1 notation <https://developers.google.com/sheets/api/guides/concepts#a1_notation>`_ of the values to clear. :returns: `Response body <https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear#response-body>`_. :rtype: dict .. versionadded:: 3.0
28,710
def issubclass(cls, ifaces): ifaces = _ensure_ifaces_tuple(ifaces) for iface in ifaces: return all(( _check_for_definition( iface, cls, , _is_attribute, ), _check_for_definition( iface, cls, , _is_property, ), _check_for_definition( iface, cls, , _is_method, ), _check_for_definition( iface, cls, , _is_classmethod, ), ))
Check if the given class is an implementation of the given iface.
28,711
def get(self, resource): return self.service.get( resource, self.url_prefix, self.auth, self.session, self.session_send_opts)
Get attributes of the data model object named by the given resource. Args: resource (intern.resource.boss.BossResource): resource.name as well as any parents must be identified to succeed. Returns: (intern.resource.boss.BossResource): Returns resource of type requested on success. Raises: requests.HTTPError on failure.
28,712
def extract_bbox(layers): if not hasattr(layers, ): layers = [layers] bboxes = [ layer.bbox for layer in layers if layer.is_visible() and not layer.bbox == (0, 0, 0, 0) ] if len(bboxes) == 0: return (0, 0, 0, 0) lefts, tops, rights, bottoms = zip(*bboxes) return (min(lefts), min(tops), max(rights), max(bottoms))
Returns a bounding box for ``layers`` or (0, 0, 0, 0) if the layers have no bounding box.
28,713
def populate_values(self): obj = self._get_base_state() self.base_state = json.dumps(obj)
Add values from the underlying dash layout configuration
28,714
def qteTextChanged(self): self.clearHighlighting() SCI = self.qteWidget self.compileMatchList() if len(self.matchList) == 0: return style = bytearray(self.styleOrig) cur = SCI.positionFromLineIndex(*self.cursorPosOrig) self.selMatchIdx = 0 for start, stop in self.matchList: if start < cur: self.selMatchIdx += 1 style[start:stop] = bytes(b) * (stop - start) if self.selMatchIdx == len(self.matchList): self.selMatchIdx = 0 start, stop = self.matchList[self.selMatchIdx] style[start:stop] = bytes(b) * (stop - start) line, col = SCI.lineIndexFromPosition(start) SCI.setCursorPosition(line, col) self.selMatchIdx += 1 self.qteWidget.SCISetStylingEx(0, 0, style)
Search for sub-string matches. This method is triggered by Qt whenever the text changes, ie. whenever the user has altered the input. Extract the new input, find all matches, and highlight them accordingly.
28,715
def dim_dc(self, pars): r self._set_parameters(pars) nom1a = - self.m * np.log(self.w * self.tau) * self.otc *\ np.sin(self.ang) nom1b = - self.m * self.otc * (np.pi / 2.0) * np.cos(self.ang) term1 = (nom1a + nom1b) / self.denom nom2 = (self.m * self.otc * np.sin(self.ang)) *\ (2 * np.log(self.w * self.tau) * self.otc * np.cos(self.ang) - 2 * self.otc * (np.pi / 2.0) * np.sin(self.ang) + 2 * np.log(self.w * self.tau) * self.otc2) term2 = nom2 / self.denom ** 2 result = term1 + term2 result *= self.rho0 return result
r""" :math:`\frac{\partial \hat{\rho''}(\omega)}{\partial c} = \rho_0 \frac{-m sin(\frac{c \pi}{2}) ln(\omega \tau)(\omega \tau)^c - m (\omega \tau)^c \frac{\pi}{2} cos(\frac{\pi}{2}}{1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}} + \rho_0 \frac{\left[-m (\omega \tau)^c cos(\frac{c \pi}{2}) \right] \cdot \left[ -2 ln(\omega \tau) (\omega \tau)^c cos(\frac{c \pi}{2}) + 2 (\omega \tau)^c \frac{\pi}{2} cos(\frac{c \pi}{2}) \right] + \left[2 ln(\omega \tau) (\omega \tau)^{2 c}\right]}{\left[1 + 2 (\omega \tau)^c cos(\frac{c \pi}{2}) + (\omega \tau)^{2 c}\right]^2}`
28,716
def check_status(self): black_list = (self.S_LOCKED, self.S_ERROR) if self.returncode != 0: msg = "job.sh return code: %s\nPerhaps the job was not submitted properly?" % self.returncode return self.set_status(self.S_QCRITICAL, msg=msg) if self.mpiabort_file.exists: return self.set_status(self.S_ABICRITICAL, msg="Found ABINIT abort file") err_msg = None if self.stderr_file.getsize() != 0: err_msg = self.stderr_file.read() qerr_info = None if self.qerr_file.getsize() != 0: qerr_info = self.qerr_file.read() qout_info = None if self.qout_file.getsize(): qout_info = self.qout_file.read() if self.output_file.exists: try: report = self.get_event_report() except Exception as exc: msg = "%s exception while parsing event_report:\n%s" % (self, exc) return self.set_status(self.S_ABICRITICAL, msg=msg) if report is None: return self.set_status(self.S_ERROR, msg="got None report!") if report.run_completed: self.datetimes.start = report.start_datetime self.datetimes.end = report.end_datetime not_ok = report.filter_types(self.CRITICAL_EVENTS) if not_ok: return self.set_status(self.S_UNCONVERGED, msg=) else: return self.set_status(self.S_OK, msg="status set to ok based on abiout") if report.errors: logger.debug() for error in report.errors: logger.debug(str(error)) try: self.abi_errors.append(error) except AttributeError: self.abi_errors = [error] logger.debug("%s: Found Errors or Bugs in ABINIT main output!" % self) msg = "\n".join(map(repr, report.errors)) return self.set_status(self.S_ABICRITICAL, msg=msg) if self.stderr_file.exists and not err_msg: if self.qerr_file.exists and not qerr_info: return self.set_status(self.S_RUN, msg=) if not self.output_file.exists: logger.debug("output_file does not exists") if not self.stderr_file.exists and not self.qerr_file.exists: return self.status if False and (qerr_info or qout_info): from pymatgen.io.abinit.scheduler_error_parsers import get_parser scheduler_parser = get_parser(self.manager.qadapter.QTYPE, err_file=self.qerr_file.path, out_file=self.qout_file.path, run_err_file=self.stderr_file.path) if scheduler_parser is None: return self.set_status(self.S_QCRITICAL, msg="Cannot find scheduler_parser for qtype %s" % self.manager.qadapter.QTYPE) scheduler_parser.parse() if scheduler_parser.errors: self.queue_errors = scheduler_parser.errors msg = "scheduler errors found:\n%s" % str(scheduler_parser.errors) return self.set_status(self.S_QCRITICAL, msg=msg) elif lennone(qerr_info) > 0: self.history.info( % str(qerr_info)) if err_msg: msg = % str(err_msg) self.history.warning(msg)
This function checks the status of the task by inspecting the output and the error files produced by the application and by the queue manager.
28,717
def calc_area_extent(self, key): xyres = {500: 22272, 1000: 11136, 2000: 5568} chkres = xyres[key.resolution] measured = self.nc[.format(key.name)] variable = self.nc[ .format(key.name)] self.startline = int(measured[][...]) self.endline = int(measured[][...]) self.startcol = int(measured[][...]) self.endcol = int(measured[][...]) self.nlines, self.ncols = variable[:].shape logger.debug(.format(key.name, chkres)) logger.debug(.format(self.nlines, self.ncols)) logger.debug(.format(self.startline, self.endline)) logger.debug(.format(self.startcol, self.endcol)) max_y = 5432229.9317116784 min_y = -5429229.5285458621 full_y = max_y + abs(min_y) res_y = full_y / chkres startl = min_y + res_y * self.startline - 0.5 * (res_y) endl = min_y + res_y * self.endline + 0.5 * (res_y) logger.debug(.format(startl, endl)) chk_extent = (-5432229.9317116784, endl, 5429229.5285458621, startl) return(chk_extent)
Calculate area extent for a dataset.
28,718
def path(path_name=None, override=None, *, root=None, name=None, ext=None, inject=None, relpath=None, reduce=False): path_name, identity, root = _initialize(path_name, override, root, inject) new_name = _process_name(path_name, identity, name, ext) new_directory = _process_directory(path_name, identity, root, inject) full_path = os.path.normpath(os.path.join(new_directory, new_name)) if APPEND_SEP_TO_DIRS and not new_name and full_path[-1] != os.sep: full_path += os.sep final_path = _format_path(full_path, root, relpath, reduce) return final_path
Path manipulation black magic
28,719
def usergroup_update(usrgrpid, **kwargs): s docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see modules docstring) :return: IDs of the updated user group, False on failure. CLI Example: .. code-block:: bash salt zabbix.usergroup_update 8 name=guestsRenamed usergroup.updateurlauthresultusrgrpids'] else: raise KeyError except KeyError: return ret
.. versionadded:: 2016.3.0 Update existing user group .. note:: This function accepts all standard user group properties: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/usergroup/object#user_group :param usrgrpid: ID of the user group to update. :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: IDs of the updated user group, False on failure. CLI Example: .. code-block:: bash salt '*' zabbix.usergroup_update 8 name=guestsRenamed
28,720
def update(self, d): for (k, v) in d.items(): if k not in self or self[k] != v: self[k] = v
Works like regular update, but only actually updates when the new value and the old value differ. This is necessary to prevent certain infinite loops. :arg d: a dictionary
28,721
def get_critical_original_kink_ratio(self): ratios = [] if self.c1_original == self.c2_original: return [0, 1] reaction_kink = [k[3] for k in self.get_kinks()] for rxt in reaction_kink: ratios.append(abs(self._get_original_composition_ratio(rxt))) return ratios
Returns a list of molar mixing ratio for each kink between ORIGINAL (instead of processed) reactant compositions. This is the same list as mixing ratio obtained from get_kinks method if self.norm = False. Returns: A list of floats representing molar mixing ratios between the original reactant compositions for each kink.
28,722
def helices(self): hel_molecules = list(itertools.chain( *[p.helices._molecules for p in self._molecules if hasattr(p, )])) hel_assembly = Assembly(molecules=hel_molecules, assembly_id=self.id) return hel_assembly
Generates new `Assembly` containing just Ξ±-helices. Notes ----- Metadata is not currently preserved from the parent object. Returns ------- hel_assembly : ampal.Protein `Assembly` containing only the Ξ±-helices of the original `Assembly`.
28,723
def make_sentence(self, init_state=None, **kwargs): tries = kwargs.get(, DEFAULT_TRIES) mor = kwargs.get(, DEFAULT_MAX_OVERLAP_RATIO) mot = kwargs.get(, DEFAULT_MAX_OVERLAP_TOTAL) test_output = kwargs.get(, True) max_words = kwargs.get(, None) if init_state != None: prefix = list(init_state) for word in prefix: if word == BEGIN: prefix = prefix[1:] else: break else: prefix = [] for _ in range(tries): words = prefix + self.chain.walk(init_state) if max_words != None and len(words) > max_words: continue if test_output and hasattr(self, "rejoined_text"): if self.test_sentence_output(words, mor, mot): return self.word_join(words) else: return self.word_join(words) return None
Attempts `tries` (default: 10) times to generate a valid sentence, based on the model and `test_sentence_output`. Passes `max_overlap_ratio` and `max_overlap_total` to `test_sentence_output`. If successful, returns the sentence as a string. If not, returns None. If `init_state` (a tuple of `self.chain.state_size` words) is not specified, this method chooses a sentence-start at random, in accordance with the model. If `test_output` is set as False then the `test_sentence_output` check will be skipped. If `max_words` is specified, the word count for the sentence will be evaluated against the provided limit.
28,724
def vol_per_rev_LS(id_number): tubing_data_path = os.path.join(os.path.dirname(__file__), "data", "LS_tubing.txt") df = pd.read_csv(tubing_data_path, delimiter=) idx = df["Number"] == id_number return df[idx][].values[0] * u.mL/u.turn
Look up the volume per revolution output by a Masterflex L/S pump through L/S tubing of the given ID number. :param id_number: Identification number of the L/S tubing. Valid numbers are 13-18, 24, 35, and 36. :type id_number: int :return: Volume per revolution output by a Masterflex L/S pump through the L/S tubing :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import vol_per_rev_LS >>> from aguaclara.core.units import unit_registry as u >>> vol_per_rev_LS(13) <Quantity(0.06, 'milliliter / turn')> >>> vol_per_rev_LS(18) <Quantity(3.8, 'milliliter / turn')>
28,725
def fit(self, dataset): dataset = dataset.map(_convert_to_vector) jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset) return StandardScalerModel(jmodel)
Computes the mean and variance and stores as a model to be used for later scaling. :param dataset: The data used to compute the mean and variance to build the transformation model. :return: a StandardScalarModel
28,726
def get_signature_challenge(self): devices = [DeviceRegistration.wrap(device) for device in self.__get_u2f_devices()] if devices == []: return { : , : } challenge = start_authenticate(devices) challenge[] = session[] = challenge.json return challenge
Returns new signature challenge
28,727
def fit(self, X): self.constant_value = self._get_constant_value(X) if self.constant_value is None: self.model = scipy.stats.gaussian_kde(X) else: self._replace_constant_methods() self.fitted = True
Fit Kernel density estimation to an list of values. Args: X: 1-d `np.ndarray` or `pd.Series` or `list` datapoints to be estimated from. This function will fit a gaussian_kde model to a list of datapoints and store it as a class attribute.
28,728
def activate(self, asset): activate_url = asset[][] return self._get(activate_url, body_type=models.Body).get_body()
Request activation of the specified asset representation. Asset representations are obtained from :py:meth:`get_assets`. :param request dict: An asset representation from the API. :returns: :py:class:`planet.api.models.Body` with no response content :raises planet.api.exceptions.APIException: On API error.
28,729
def to_tokens(self, indices): to_reduce = False if not isinstance(indices, (list, tuple)): indices = [indices] to_reduce = True max_idx = len(self._idx_to_token) - 1 tokens = [] for idx in indices: if not isinstance(idx, int) or idx > max_idx: raise ValueError(.format(idx)) else: tokens.append(self._idx_to_token[idx]) return tokens[0] if to_reduce else tokens
Converts token indices to tokens according to the vocabulary. Parameters ---------- indices : int or list of ints A source token index or token indices to be converted. Returns ------- str or list of strs A token or a list of tokens according to the vocabulary.
28,730
def requestTimingInfo(self): try: return tuple(item.split()[1] for item in self.http_response.header.get().split()) except AttributeError: return None, None
Returns the time needed to process the request by the frontend server in microseconds and the EPOC timestamp of the request in microseconds. :rtype: tuple containing processing time and timestamp
28,731
def __is_function_action(self, action_function): is_function_action = True if not hasattr(action_function, ): return False try: for end_string, context in action_function(): if not isinstance(end_string, basestring): self.log_error("Action function must return end of filename as a string as first argument") if not isinstance(context, dict): self.log_error("Action function must return context as a dict as second argument") break except Exception: is_function_action = False return is_function_action
Detect if given function is really an action function. Args: action_function: Function to test. Note: We don't care if the variable refer to a function but rather if it is callable or not.
28,732
def _convert_to_image_color(self, color): rgb = self._convert_color_to_rrggbb(color) return self._convert_rrggbb_to_image_color(rgb)
:return: a color that can be used by the image
28,733
def hgsub_report(self): if self.relpath == : return yield "%s = [%s]%s" % ( self.fpath.lstrip(), self.label, self.remote_url)
Yields: str: .hgsubs line for this repository
28,734
def shutdown(self, delete=False): disks = self.get_disks() self.domain.destroy() if delete: for disk in disks: disk.wipe() disk.delete()
Shutdown this VM :param delete: Should we delete after shutting the VM down? :type delete: bool
28,735
def create(self, image, geometry, options): image = self.cropbox(image, geometry, options) image = self.orientation(image, geometry, options) image = self.colorspace(image, geometry, options) image = self.remove_border(image, options) image = self.scale(image, geometry, options) image = self.crop(image, geometry, options) image = self.rounded(image, geometry, options) image = self.blur(image, geometry, options) image = self.padding(image, geometry, options) return image
Processing conductor, returns the thumbnail as an image engine instance
28,736
def _construct_retry(method_config, retry_codes, retry_params, retry_names): if method_config is None: return None codes = None if retry_codes and in method_config: codes_name = method_config[] if codes_name in retry_codes and retry_codes[codes_name]: codes = [retry_names[name] for name in retry_codes[codes_name]] else: codes = [] backoff_settings = None if retry_params and in method_config: params_name = method_config[] if params_name and params_name in retry_params: backoff_settings = gax.BackoffSettings(**retry_params[params_name]) return gax.RetryOptions( backoff_settings=backoff_settings, retry_codes=codes, )
Helper for ``construct_settings()``. Args: method_config (dict): A dictionary representing a single ``methods`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_codes (dict): A dictionary parsed from the ``retry_codes`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_params (dict): A dictionary parsed from the ``retry_params`` entry of the standard API client config file. (See ``construct_settings()`` for information on this yaml.) retry_names (dict): A dictionary mapping the string names used in the standard API client config file to API response status codes. Returns: Optional[RetryOptions]: The retry options, if applicable.
28,737
def valid_file(cls, filename): file_ex = os.path.splitext(filename)[1].replace(, , 1) return file_ex in SUPPORTED_FILES and is_excel_file(filename)
Check if the provided file is a valid file for this plugin. :arg filename: the path to the file to check.
28,738
def call(self): is_mutable = False if self.token.nature == Nature.MUT: is_mutable = True self._process(Nature.MUT) identifier = Identifier(name=self.token.value) self._process(Nature.ID) if self.token.nature == Nature.LPAREN: return FunctionCall(identifier=identifier, parameters=self.parameters()) else: return Variable(identifier=identifier, is_mutable=is_mutable)
call: ['mut'] ID ['(' parameters ')']
28,739
def prepare_framework(estimator, s3_operations): if estimator.code_location is not None: bucket, key = fw_utils.parse_s3_url(estimator.code_location) key = os.path.join(key, estimator._current_job_name, , ) else: bucket = estimator.sagemaker_session._default_bucket key = os.path.join(estimator._current_job_name, , ) script = os.path.basename(estimator.entry_point) if estimator.source_dir and estimator.source_dir.lower().startswith(): code_dir = estimator.source_dir estimator.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) else: code_dir = .format(bucket, key) estimator.uploaded_code = fw_utils.UploadedCode(s3_prefix=code_dir, script_name=script) s3_operations[] = [{ : estimator.source_dir or script, : bucket, : key, : True }] estimator._hyperparameters[sagemaker.model.DIR_PARAM_NAME] = code_dir estimator._hyperparameters[sagemaker.model.SCRIPT_PARAM_NAME] = script estimator._hyperparameters[sagemaker.model.CLOUDWATCH_METRICS_PARAM_NAME] = \ estimator.enable_cloudwatch_metrics estimator._hyperparameters[sagemaker.model.CONTAINER_LOG_LEVEL_PARAM_NAME] = estimator.container_log_level estimator._hyperparameters[sagemaker.model.JOB_NAME_PARAM_NAME] = estimator._current_job_name estimator._hyperparameters[sagemaker.model.SAGEMAKER_REGION_PARAM_NAME] = \ estimator.sagemaker_session.boto_region_name
Prepare S3 operations (specify where to upload `source_dir`) and environment variables related to framework. Args: estimator (sagemaker.estimator.Estimator): The framework estimator to get information from and update. s3_operations (dict): The dict to specify s3 operations (upload `source_dir`).
28,740
def translated(structure, values, lang_spec): indentation = endline = object_code = "" stack = [] push = lambda x: stack.append(x) pop = lambda : stack.pop() last = lambda : stack[-1] if len(stack) > 0 else def indented_code(s, level, end): return lang_spec[INDENTATION]*level + s + end level = 0 CONDITIONS = [LEXEM_TYPE_PREDICAT, LEXEM_TYPE_CONDITION] ACTION = LEXEM_TYPE_ACTION DOWNLEVEL = LEXEM_TYPE_DOWNLEVEL for lexem_type in structure: if lexem_type is ACTION: if last() in CONDITIONS: value, values = values[0:len(stack)], values[len(stack):] object_code += (indented_code(lang_spec[BEG_CONDITION] + lang_spec[LOGICAL_AND].join(value) + lang_spec[END_CONDITION], level, lang_spec[END_LINE] )) if len(lang_spec[BEG_BLOCK]) > 0: object_code += indented_code( lang_spec[BEG_BLOCK], level, lang_spec[END_LINE] ) stack = [] level += 1 object_code += indented_code( lang_spec[BEG_ACTION] + values[0], level, lang_spec[END_ACTION]+lang_spec[END_LINE] ) values = values[1:] elif lexem_type in CONDITIONS: push(lexem_type) elif lexem_type is DOWNLEVEL: if last() not in CONDITIONS: level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 while level > 0: level -= 1 if level >= 0: object_code += indented_code( lang_spec[END_BLOCK], level, lang_spec[END_LINE] ) else: level = 0 return object_code
Return code associated to given structure and values, translate with given language specification.
28,741
def simulate_moment_steps( self, circuit: circuits.Circuit, param_resolver: = None, qubit_order: ops.QubitOrderOrList = ops.QubitOrder.DEFAULT, initial_state: Any = None ) -> Iterator: return self._simulator_iterator( circuit, study.ParamResolver(param_resolver), qubit_order, initial_state)
Returns an iterator of StepResults for each moment simulated. If the circuit being simulated is empty, a single step result should be returned with the state being set to the initial state. Args: circuit: The Circuit to simulate. param_resolver: A ParamResolver for determining values of Symbols. qubit_order: Determines the canonical ordering of the qubits. This is often used in specifying the initial state, i.e. the ordering of the computational basis states. initial_state: The initial state for the simulation. The form of this state depends on the simulation implementation. See documentation of the implementing class for details. Returns: Iterator that steps through the simulation, simulating each moment and returning a StepResult for each moment.
28,742
def _getMethodsVoc(self): methods = api.search({ "portal_type": "Method", "is_active": True }, "bika_setup_catalog") items = map(lambda m: (api.get_uid(m), api.get_title(m)), methods) items.sort(lambda x, y: cmp(x[1], y[1])) items.insert(0, ("", _("Not specified"))) return DisplayList(list(items))
Return the registered methods as DisplayList
28,743
def create(self, locator): for registration in self._registrations: this_locator = registration.locator if this_locator == locator: try: return registration.factory(locator) except Exception as ex: if isinstance(ex, CreateException): raise ex raise CreateException( None, "Failed to create object for " + str(locator) ).with_cause(ex)
Creates a component identified by given locator. :param locator: a locator to identify component to be created. :return: the created component.
28,744
def transform(self, Z): mapper = self.broadcast(super(SparkDictVectorizer, self).transform, Z.context) dtype = sp.spmatrix if self.sparse else np.ndarray return Z.transform(mapper, column=, dtype=dtype)
Transform ArrayRDD's (or DictRDD's 'X' column's) feature->value dicts to array or sparse matrix. Named features not encountered during fit or fit_transform will be silently ignored. Parameters ---------- Z : ArrayRDD or DictRDD with column 'X' containing Mapping or iterable over Mappings, length = n_samples Dict(s) or Mapping(s) from feature names (arbitrary Python objects) to feature values (strings or convertible to dtype). Returns ------- Z : transformed, containing {array, sparse matrix} Feature vectors; always 2-d.
28,745
def custom_pygments_guess_lexer_for_filename(_fn, _text, **options): fn = basename(_fn) primary = {} matching_lexers = set() for lexer in _iter_lexerclasses(): for filename in lexer.filenames: if _fn_matches(fn, filename): matching_lexers.add(lexer) primary[lexer] = True for filename in lexer.alias_filenames: if _fn_matches(fn, filename): matching_lexers.add(lexer) primary[lexer] = False if not matching_lexers: raise ClassNotFound( % fn) if len(matching_lexers) == 1: return matching_lexers.pop()(**options) result = [] for lexer in matching_lexers: rv = lexer.analyse_text(_text) if rv == 1.0: return lexer(**options) result.append(customize_lexer_priority(_fn, rv, lexer)) matlab = list(filter(lambda x: x[2].name.lower() == , result)) if len(matlab) > 0: objc = list(filter(lambda x: x[2].name.lower() == , result)) if objc and objc[0][0] == matlab[0][0]: raise SkipHeartbeat() def type_sort(t): return (t[0], primary[t[2]], t[1], t[2].__name__) result.sort(key=type_sort) return result[-1][2](**options)
Overwrite pygments.lexers.guess_lexer_for_filename to customize the priority of different lexers based on popularity of languages.
28,746
def load_training_rasters(response_raster, explanatory_rasters, selected=None): with rasterio.open(response_raster) as src: response_data = src.read().flatten() if selected is None: train_y = response_data else: train_y = response_data[selected] selected_data = [] for rast in explanatory_rasters: with rasterio.open(rast) as src: explanatory_data = src.read().flatten() assert explanatory_data.size == response_data.size if selected is None: selected_data.append(explanatory_data) else: selected_data.append(explanatory_data[selected]) train_xs = np.asarray(selected_data).T return train_xs, train_y
Parameters ---------- response_raster : Path to GDAL raster containing responses explanatory_rasters : List of Paths to GDAL rasters containing explanatory variables Returns ------- train_xs : Array of explanatory variables train_ys : 1xN array of known responses
28,747
def websocket( self, uri, host=None, strict_slashes=None, subprotocols=None, name=None ): self.enable_websocket() if not uri.startswith("/"): uri = "/" + uri if strict_slashes is None: strict_slashes = self.strict_slashes def response(handler): async def websocket_handler(request, *args, **kwargs): request.app = self if not getattr(handler, "__blueprintname__", False): request.endpoint = handler.__name__ else: request.endpoint = ( getattr(handler, "__blueprintname__", "") + handler.__name__ ) try: protocol = request.transport.get_protocol() except AttributeError: protocol = request.transport._protocol ws = await protocol.websocket_handshake(request, subprotocols) fut = ensure_future(handler(request, ws, *args, **kwargs)) self.websocket_tasks.add(fut) try: await fut except (CancelledError, ConnectionClosed): pass finally: self.websocket_tasks.remove(fut) await ws.close() self.router.add( uri=uri, handler=websocket_handler, methods=frozenset({"GET"}), host=host, strict_slashes=strict_slashes, name=name, ) return handler return response
Decorate a function to be registered as a websocket route :param uri: path of the URL :param subprotocols: optional list of str with supported subprotocols :param host: :return: decorated function
28,748
def _syspath_modname_to_modpath(modname, sys_path=None, exclude=None): def _isvalid(modpath, base): subdir = dirname(modpath) while subdir and subdir != base: if not exists(join(subdir, )): return False subdir = dirname(subdir) return True _fname_we = modname.replace(, os.path.sep) candidate_fnames = [ _fname_we + , ] candidate_fnames += [_fname_we + ext for ext in _platform_pylib_exts()] if sys_path is None: sys_path = sys.path candidate_dpaths = [ if p == else p for p in sys_path] if exclude: def normalize(p): if sys.platform.startswith(): return realpath(p).lower() else: return realpath(p) real_exclude = {normalize(p) for p in exclude} candidate_dpaths = [p for p in candidate_dpaths if normalize(p) not in real_exclude] for dpath in candidate_dpaths: modpath = join(dpath, _fname_we) if exists(modpath): if isfile(join(modpath, )): if _isvalid(modpath, dpath): return modpath for fname in candidate_fnames: modpath = join(dpath, fname) if isfile(modpath): if _isvalid(modpath, dpath): return modpath
syspath version of modname_to_modpath Args: modname (str): name of module to find sys_path (List[PathLike], default=None): if specified overrides `sys.path` exclude (List[PathLike], default=None): list of directory paths. if specified prevents these directories from being searched. Notes: This is much slower than the pkgutil mechanisms. CommandLine: python -m xdoctest.static_analysis _syspath_modname_to_modpath Example: >>> print(_syspath_modname_to_modpath('xdoctest.static_analysis')) ...static_analysis.py >>> print(_syspath_modname_to_modpath('xdoctest')) ...xdoctest >>> print(_syspath_modname_to_modpath('_ctypes')) ..._ctypes... >>> assert _syspath_modname_to_modpath('xdoctest', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('xdoctest.static_analysis', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('_ctypes', sys_path=[]) is None >>> assert _syspath_modname_to_modpath('this', sys_path=[]) is None Example: >>> # test what happens when the module is not visible in the path >>> modname = 'xdoctest.static_analysis' >>> modpath = _syspath_modname_to_modpath(modname) >>> exclude = [split_modpath(modpath)[0]] >>> found = _syspath_modname_to_modpath(modname, exclude=exclude) >>> # this only works if installed in dev mode, pypi fails >>> assert found is None, 'should not have found {}'.format(found)
28,749
def guess_segments_lines(segments, lines, nearline_tolerance=5.0): ys = segments[:, 1] closeness = numpy.abs(numpy.subtract.outer(ys, lines)) line_of_y = numpy.argmin(closeness, axis=1) distance = numpy.min(closeness, axis=1) bad = distance > numpy.mean(distance) + nearline_tolerance * numpy.std(distance) line_of_y[bad] = -1 return line_of_y
given segments, outputs a array of line numbers, or -1 if it doesn't belong to any
28,750
def EncloseAnsiText(text): return sgr_re.sub(lambda x: ANSI_START + x.group(1) + ANSI_END, text)
Enclose ANSI/SGR escape sequences with ANSI_START and ANSI_END.
28,751
def add_remote(self, path, name, remote_url, use_sudo=False, user=None, fetch=True): if path is None: raise ValueError("Path to the working copy is needed to add a remote") if fetch: cmd = % (name, remote_url) else: cmd = % (name, remote_url) with cd(path): if use_sudo and user is None: run_as_root(cmd) elif use_sudo: sudo(cmd, user=user) else: run(cmd)
Add a remote Git repository into a directory. :param path: Path of the working copy directory. This directory must exist and be a Git working copy with a default remote to fetch from. :type path: str :param use_sudo: If ``True`` execute ``git`` with :func:`fabric.operations.sudo`, else with :func:`fabric.operations.run`. :type use_sudo: bool :param user: If ``use_sudo is True``, run :func:`fabric.operations.sudo` with the given user. If ``use_sudo is False`` this parameter has no effect. :type user: str :param name: name for the remote repository :type name: str :param remote_url: URL of the remote repository :type remote_url: str :param fetch: If ``True`` execute ``git remote add -f`` :type fetch: bool
28,752
def monitor_running_process(context: RunContext): while True: capture_output_from_running_process(context) if context.process_finished(): context.return_code = context.command.returncode break if context.process_timed_out(): context.return_code = -1 raise ProcessTimeoutError( exe_name=context.exe_short_name, timeout=context.timeout, )
Runs an infinite loop that waits for the process to either exit on its or time out Captures all output from the running process :param context: run context :type context: RunContext
28,753
def assert_called(_mock_self): self = _mock_self if self.call_count == 0: msg = ("Expected to have been called." % self._mock_name or ) raise AssertionError(msg)
assert that the mock was called at least once
28,754
def _index(self): if self.__index is None: try: with open(self._get_path()) as f: data = json.load(f) except (IOError, ValueError): self.__index = {} else: if data.get(, 0) != self.version: self.clear_cache() self.__index = {} else: self.__index = data[] return self.__index
Keys a list of file paths that have been pickled in this directory. The index is stored in a json file in the same directory as the pickled objects.
28,755
def bandwidth(self, subid, params=None): params = update_params(params, {: subid}) return self.request(, params, )
/v1/server/bandwidth GET - account Get the bandwidth used by a virtual machine Link: https://www.vultr.com/api/#server_bandwidth
28,756
def list_metrics(self, project, page_size=None, page_token=None): extra_params = {} if page_size is not None: extra_params["pageSize"] = page_size path = "/projects/%s/metrics" % (project,) return page_iterator.HTTPIterator( client=self._client, api_request=self._client._connection.api_request, path=path, item_to_value=_item_to_metric, items_key="metrics", page_token=page_token, extra_params=extra_params, )
List metrics for the project associated with this client. See https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/list :type project: str :param project: ID of the project whose metrics are to be listed. :type page_size: int :param page_size: maximum number of metrics to return, If not passed, defaults to a value set by the API. :type page_token: str :param page_token: opaque marker for the next "page" of metrics. If not passed, the API will return the first page of metrics. :rtype: :class:`~google.api_core.page_iterator.Iterator` :returns: Iterator of :class:`~google.cloud.logging.metric.Metric` accessible to the current API.
28,757
def handle_heartbeat_response_22(msg): if not msg.gateway.is_sensor(msg.node_id): return None msg.gateway.sensors[msg.node_id].heartbeat = msg.payload msg.gateway.alert(msg) return None
Process an internal heartbeat response message.
28,758
def persistent_menu(menu): if len(menu) > 3: raise Invalid() if any(len(item[]) > 5 for item in menu if item[] == ): raise Invalid() for item in menu: if len(item[]) > 30: raise Invalid() if item[] == and len(item[]) > 1000: raise Invalid()
more: https://developers.facebook.com/docs/messenger-platform/thread-settings/persistent-menu :param menu: :return:
28,759
def calc_measurement_error(self, tangents): if len(tangents) < 2: return 0.0 avg_tan = float(sum(tangents) / len(tangents)) numerator = float() for i in tangents: numerator += (i - avg_tan) * (i - avg_tan) return math.sqrt(numerator / len(tangents) / (len(tangents) - 1))
formula for measurement error sqrt ( (sum(1, n, (k_i - <k>)**2) / (n*(n-1)))
28,760
def MultiReadClientSnapshot(self, client_ids, cursor=None): int_ids = [db_utils.ClientIDToInt(cid) for cid in client_ids] query = ( "SELECT h.client_id, h.client_snapshot, UNIX_TIMESTAMP(h.timestamp)," " s.startup_info " "FROM clients as c FORCE INDEX (PRIMARY), " "client_snapshot_history as h FORCE INDEX (PRIMARY), " "client_startup_history as s FORCE INDEX (PRIMARY) " "WHERE h.client_id = c.client_id " "AND s.client_id = c.client_id " "AND h.timestamp = c.last_snapshot_timestamp " "AND s.timestamp = c.last_startup_timestamp " "AND c.client_id IN ({})").format(", ".join(["%s"] * len(client_ids))) ret = {cid: None for cid in client_ids} cursor.execute(query, int_ids) while True: row = cursor.fetchone() if not row: break cid, snapshot, timestamp, startup_info = row client_obj = mysql_utils.StringToRDFProto(rdf_objects.ClientSnapshot, snapshot) client_obj.startup_info = mysql_utils.StringToRDFProto( rdf_client.StartupInfo, startup_info) client_obj.timestamp = mysql_utils.TimestampToRDFDatetime(timestamp) ret[db_utils.IntToClientID(cid)] = client_obj return ret
Reads the latest client snapshots for a list of clients.
28,761
def main(args): foo:bar AND baz:borkfoobarbazborkbarbork args = [unicode(u, ) for u in args[1:]] schema = __sample_schema() if "--schema" in args else None if schema: args.pop(args.index("--schema")) query = u.join(args) print "Lucene input:", query parser = __sample_parser(schema=schema) parsed = parser.parse(query) print "Parsed representation:", repr(parsed) print "Lucene form:", unicode(parsed) cloudsearch_query = .join(walk_clause(parsed)) print "Cloudsearch form:", cloudsearch_query
For command line experimentation. Sample output: $ python l2cs.py 'foo:bar AND baz:bork' Lucene input: foo:bar AND baz:bork Parsed representation: And([Term(u'foo', u'bar'), Term(u'baz', u'bork')]) Lucene form: (foo:bar AND baz:bork) Cloudsearch form: (and (field foo 'bar') (field baz 'bork'))
28,762
def select_source(self, source): status = self.status() if status[]: if status[] != source: if source in self.SOURCES: self._send(self.CMD_SOURCE + self.SOURCES[source], read_reply=True)
Select a source from the list of sources.
28,763
def run_eidos(endpoint, *args): call_class = % (eidos_package, endpoint) cmd = [, , , eip, call_class] + list(args) logger.info( % (.join(cmd))) subprocess.call(cmd)
Run a given enpoint of Eidos through the command line. Parameters ---------- endpoint : str The class within the Eidos package to run, for instance 'apps.ExtractFromDirectory' will run 'org.clulab.wm.eidos.apps.ExtractFromDirectory' *args Any further arguments to be passed as inputs to the class being run.
28,764
def cum_returns(returns, starting_value=0, out=None): if len(returns) < 1: return returns.copy() nanmask = np.isnan(returns) if np.any(nanmask): returns = returns.copy() returns[nanmask] = 0 allocated_output = out is None if allocated_output: out = np.empty_like(returns) np.add(returns, 1, out=out) out.cumprod(axis=0, out=out) if starting_value == 0: np.subtract(out, 1, out=out) else: np.multiply(out, starting_value, out=out) if allocated_output: if returns.ndim == 1 and isinstance(returns, pd.Series): out = pd.Series(out, index=returns.index) elif isinstance(returns, pd.DataFrame): out = pd.DataFrame( out, index=returns.index, columns=returns.columns, ) return out
Compute cumulative returns from simple returns. Parameters ---------- returns : pd.Series, np.ndarray, or pd.DataFrame Returns of the strategy as a percentage, noncumulative. - Time series with decimal returns. - Example:: 2015-07-16 -0.012143 2015-07-17 0.045350 2015-07-20 0.030957 2015-07-21 0.004902 - Also accepts two dimensional data. In this case, each column is cumulated. starting_value : float, optional The starting returns. out : array-like, optional Array to use as output buffer. If not passed, a new array will be created. Returns ------- cumulative_returns : array-like Series of cumulative returns.
28,765
def copy(source, dest, name=None, shallow=False, without_attrs=False, log=None, if_exists=, dry_run=False, **create_kws): _check_dest_is_group(dest) with _LogWriter(log) as log: n_copied, n_skipped, n_bytes_copied = _copy( log, source, dest, name=name, root=True, shallow=shallow, without_attrs=without_attrs, if_exists=if_exists, dry_run=dry_run, **create_kws ) _log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied) return n_copied, n_skipped, n_bytes_copied
Copy the `source` array or group into the `dest` group. Parameters ---------- source : group or array/dataset A zarr group or array, or an h5py group or dataset. dest : group A zarr or h5py group. name : str, optional Name to copy the object to. shallow : bool, optional If True, only copy immediate children of `source`. without_attrs : bool, optional Do not copy user attributes. log : callable, file path or file-like object, optional If provided, will be used to log progress information. if_exists : {'raise', 'replace', 'skip', 'skip_initialized'}, optional How to handle arrays that already exist in the destination group. If 'raise' then a CopyError is raised on the first array already present in the destination group. If 'replace' then any array will be replaced in the destination. If 'skip' then any existing arrays will not be copied. If 'skip_initialized' then any existing arrays with all chunks initialized will not be copied (not available when copying to h5py). dry_run : bool, optional If True, don't actually copy anything, just log what would have happened. **create_kws Passed through to the create_dataset method when copying an array/dataset. Returns ------- n_copied : int Number of items copied. n_skipped : int Number of items skipped. n_bytes_copied : int Number of bytes of data that were actually copied. Examples -------- Here's an example of copying a group named 'foo' from an HDF5 file to a Zarr group:: >>> import h5py >>> import zarr >>> import numpy as np >>> source = h5py.File('data/example.h5', mode='w') >>> foo = source.create_group('foo') >>> baz = foo.create_dataset('bar/baz', data=np.arange(100), chunks=(50,)) >>> spam = source.create_dataset('spam', data=np.arange(100, 200), chunks=(30,)) >>> zarr.tree(source) / β”œβ”€β”€ foo β”‚ └── bar β”‚ └── baz (100,) int64 └── spam (100,) int64 >>> dest = zarr.group() >>> from sys import stdout >>> zarr.copy(source['foo'], dest, log=stdout) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 all done: 3 copied, 0 skipped, 800 bytes copied (3, 0, 800) >>> dest.tree() # N.B., no spam / └── foo └── bar └── baz (100,) int64 >>> source.close() The ``if_exists`` parameter provides options for how to handle pre-existing data in the destination. Here are some examples of these options, also using ``dry_run=True`` to find out what would happen without actually copying anything:: >>> source = zarr.group() >>> dest = zarr.group() >>> baz = source.create_dataset('foo/bar/baz', data=np.arange(100)) >>> spam = source.create_dataset('foo/spam', data=np.arange(1000)) >>> existing_spam = dest.create_dataset('foo/spam', data=np.arange(1000)) >>> from sys import stdout >>> try: ... zarr.copy(source['foo'], dest, log=stdout, dry_run=True) ... except zarr.CopyError as e: ... print(e) ... copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 an object 'spam' already exists in destination '/foo' >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='replace', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 copy /foo/spam (1000,) int64 dry run: 4 copied, 0 skipped (4, 0, 0) >>> zarr.copy(source['foo'], dest, log=stdout, if_exists='skip', dry_run=True) copy /foo copy /foo/bar copy /foo/bar/baz (100,) int64 skip /foo/spam (1000,) int64 dry run: 3 copied, 1 skipped (3, 1, 0) Notes ----- Please note that this is an experimental feature. The behaviour of this function is still evolving and the default behaviour and/or parameters may change in future versions.
28,766
def draw_spectra(md, ds): coeffs_all, covs, scatters, red_chisqs, pivots, label_vector = model.model nstars = len(dataset.test_SNR) cannon_flux = np.zeros(dataset.test_flux.shape) cannon_ivar = np.zeros(dataset.test_ivar.shape) for i in range(nstars): x = label_vector[:,i,:] spec_fit = np.einsum(, x, coeffs_all) cannon_flux[i,:] = spec_fit bad = dataset.test_ivar[i,:] == SMALL**2 cannon_ivar[i,:][~bad] = 1. / scatters[~bad] ** 2 return cannon_flux, cannon_ivar
Generate best-fit spectra for all the test objects Parameters ---------- md: model The Cannon spectral model ds: Dataset Dataset object Returns ------- best_fluxes: ndarray The best-fit test fluxes best_ivars: The best-fit test inverse variances
28,767
def all_with(self, x): _args = [] for arg in self.all: if is_collection(x): for _x in x: if _x in arg: _args.append(arg) break else: if x in arg: _args.append(arg) return Args(_args, no_argv=True)
Returns all arguments containing given string (or list thereof)
28,768
def get_default_config(self): config = super(ArchiveHandler, self).get_default_config() config.update({ : , : , : 7, : 1, : None, : False, }) return config
Return the default config for the handler
28,769
def remove_direct_link_triples(train, valid, test): pairs = set() merged = valid + test for t in merged: pairs.add((t.head, t.tail)) filtered = filterfalse(lambda t: (t.head, t.tail) in pairs or (t.tail, t.head) in pairs, train) return list(filtered)
Remove direct links in the training sets.
28,770
def load_dataframe(fobj, compression=): try: from pandas import read_fwf except ImportError: raise ImportError(PANDAS_MESSAGE) names, colspecs = zip( (, (2, 14)), (, (41, 46)), (, (51, 63)), (, (64, 76)), (, (79, 86)), (, (87, 95)), (, (96, 104)), ) df = read_fwf(fobj, colspecs, names=names, compression=compression) df = df.assign( ra_hours = df[] / 15.0, epoch_year = 1991.25, ) return df.set_index()
Given an open file for `hip_main.dat.gz`, return a parsed dataframe. If your copy of ``hip_main.dat`` has already been unzipped, pass the optional argument ``compression=None``.
28,771
def display_multi(annotations, fig_kw=None, meta=True, **kwargs): if fig_kw is None: fig_kw = dict() fig_kw.setdefault(, True) fig_kw.setdefault(, True) display_annotations = [] for ann in annotations: for namespace in VIZ_MAPPING: if can_convert(ann, namespace): display_annotations.append(ann) break if not len(display_annotations): raise ParameterError() fig, axs = plt.subplots(nrows=len(display_annotations), ncols=1, **fig_kw) if len(display_annotations) == 1: axs = [axs] for ann, ax in zip(display_annotations, axs): kwargs[] = ax display(ann, meta=meta, **kwargs) return fig, axs
Display multiple annotations with shared axes Parameters ---------- annotations : jams.AnnotationArray A collection of annotations to display fig_kw : dict Keyword arguments to `plt.figure` meta : bool If `True`, display annotation metadata for each annotation kwargs Additional keyword arguments to the `mir_eval.display` routines Returns ------- fig The created figure axs List of subplot axes corresponding to each displayed annotation
28,772
def get_instance(self, payload): return ApplicationInstance(self._version, payload, account_sid=self._solution[], )
Build an instance of ApplicationInstance :param dict payload: Payload response from the API :returns: twilio.rest.api.v2010.account.application.ApplicationInstance :rtype: twilio.rest.api.v2010.account.application.ApplicationInstance
28,773
def remove_pid_file(process_name): pid_filename = get_pid_filename(process_name) try: os.remove(pid_filename) print(.format(pid_filename), file=sys.stdout) except Exception as e: print(.format(pid_filename, e), file=sys.stderr)
removes pid file
28,774
def from_path(cls, path): urlparts = urlparse.urlsplit(path) site = if (urlparts.scheme == or urlparts.scheme == ): if os.path.isfile(urlparts.path): path = os.path.abspath(urlparts.path) path = urlparse.urljoin(, urllib.pathname2url(path)) site = fil = File(os.path.basename(path)) fil.PFN(path, site) return fil
Takes a path and returns a File object with the path as the PFN.
28,775
def build_command(chunks): if not chunks: raise ValueError( "No command parts: {} ({})".format(chunks, type(chunks))) if isinstance(chunks, str): return chunks parsed_pieces = [] for cmd_part in chunks: if cmd_part is None: continue try: parsed_pieces.append(cmd_part.strip(" ")) except AttributeError: option, argument = cmd_part if argument is None or argument == "": continue option, argument = option.strip(" "), str(argument).strip(" ") parsed_pieces.append("{} {}".format(option, argument)) return " ".join(parsed_pieces)
Create a command from various parts. The parts provided may include a base, flags, option-bound arguments, and positional arguments. Each element must be either a string or a two-tuple. Raw strings are interpreted as either the command base, a pre-joined pair (or multiple pairs) of option and argument, a series of positional arguments, or a combination of those elements. The only modification they undergo is trimming of any space characters from each end. :param Iterable[str | (str, str | NoneType)] chunks: the collection of the command components to interpret, modify, and join to create a single meaningful command :return str: the single meaningful command built from the given components :raise ValueError: if no command parts are provided
28,776
def from_linearized(first, second, intersections): s, t, success = segment_intersection( first.start_node, first.end_node, second.start_node, second.end_node ) bad_parameters = False if success: if not ( _helpers.in_interval(s, 0.0, 1.0) and _helpers.in_interval(t, 0.0, 1.0) ): bad_parameters = True else: if first.error == 0.0 and second.error == 0.0: raise ValueError(_UNHANDLED_LINES) bad_parameters = True s = 0.5 t = 0.5 if bad_parameters: if not convex_hull_collide(first.curve.nodes, second.curve.nodes): return orig_s = (1 - s) * first.curve.start + s * first.curve.end orig_t = (1 - t) * second.curve.start + t * second.curve.end refined_s, refined_t = _intersection_helpers.full_newton( orig_s, first.curve.original_nodes, orig_t, second.curve.original_nodes ) refined_s, success = _helpers.wiggle_interval(refined_s) if not success: return refined_t, success = _helpers.wiggle_interval(refined_t) if not success: return add_intersection(refined_s, refined_t, intersections)
Determine curve-curve intersection from pair of linearizations. .. note:: This assumes that at least one of ``first`` and ``second`` is not a line. The line-line case should be handled "early" by :func:`check_lines`. .. note:: This assumes the caller has verified that the bounding boxes for ``first`` and ``second`` actually intersect. If there is an intersection along the segments, adds that intersection to ``intersections``. Otherwise, returns without doing anything. Args: first (Linearization): First curve being intersected. second (Linearization): Second curve being intersected. intersections (list): A list of existing intersections. Raises: ValueError: If ``first`` and ``second`` both have linearization error of ``0.0`` (i.e. they are both lines). This is because this function expects the caller to have used :func:`check_lines` already.
28,777
def get_indirect_url_lock_list(self, url, principal=None): url = normalize_lock_root(url) lockList = [] u = url while u: ll = self.storage.get_lock_list( u, include_root=True, include_children=False, token_only=False ) for l in ll: if u != url and l["depth"] != "infinity": continue if principal is None or principal == l["principal"]: lockList.append(l) u = util.get_uri_parent(u) return lockList
Return a list of valid lockDicts, that protect <path> directly or indirectly. If a principal is given, only locks owned by this principal are returned. Side effect: expired locks for this path and all parents are purged.
28,778
def generate_np(self, x, **kwargs): x_adv = [] if in kwargs and kwargs[] is not None: image_target = np.copy(kwargs[]) else: image_target = None if in kwargs and kwargs[] is not None: y_target = np.copy(kwargs[]) else: y_target = None for i, x_single in enumerate(x): img = np.expand_dims(x_single, axis=0) if image_target is not None: single_img_target = np.expand_dims(image_target[i], axis=0) kwargs[] = single_img_target if y_target is not None: single_y_target = np.expand_dims(y_target[i], axis=0) kwargs[] = single_y_target adv_img = super(BoundaryAttackPlusPlus, self).generate_np(img, **kwargs) x_adv.append(adv_img) return np.concatenate(x_adv, axis=0)
Generate adversarial images in a for loop. :param y: An array of shape (n, nb_classes) for true labels. :param y_target: An array of shape (n, nb_classes) for target labels. Required for targeted attack. :param image_target: An array of shape (n, **image shape) for initial target images. Required for targeted attack. See parse_params for other kwargs.
28,779
def dec_ptr(self, ptr): result = ptr - self.reading_len[self.ws_type] if result < self.data_start: result = 0x10000 - self.reading_len[self.ws_type] return result
Get previous circular buffer data pointer.
28,780
def bash(filename): sys.stdout.flush() subprocess.call("bash {}".format(filename), shell=True)
Runs a bash script in the local directory
28,781
def get_group(self, t, i): try: value = [] if t in _DIGIT and t != : value.append(t) t = next(i) if t in _DIGIT: value.append(t) else: i.rewind(1) except StopIteration: pass return .join(value) if value else None
Get group number.
28,782
def _walk_tree(self, data, scheme, ancestors=None, property_name=None, prefix=None): if property_name is None: property_name = if err: raise err else: self._create_attr(property_name, data, ancestors) self.__validate_unrecognized_values(scheme, data, ancestors, prefix) self.__populate_scheme_references(scheme, property_name) self.__validate_config_properties(scheme, data, ancestors, prefix)
This function takes configuration data and a validation scheme then walk the configuration tree validating the configuraton data agenst the scheme provided. Will raise error on failure otherwise return None. Usage:: >>> self._walk_tree( >>> OrderedDict([('root', config_data)]), >>> registries, >>> REGISTRIES_SCHEME >>> ) :param ancestors: A :OrderedDict:, The first element of the dict must be 'root'. :param data: The data that needs to be validated agents the scheme. :param scheme: A :dict:, The scheme defining the validations. :param property_name: A :string:, This is the name of the data getting validated. :param prefix: :rtype: :None: will raise error if a validation fails.
28,783
def _add_arguments(param, parser, used_char_args, add_nos): if param.kind is param.VAR_POSITIONAL: arg_spec[] = if description is not None: arg_spec[] = description flags = [] name = param.name if is_option: for letter in name[0], name[0].swapcase(): if letter not in used_char_args: used_char_args.add(letter) flags.append(.format(letter)) break if len(name) > 1 or not flags: flags.append(.format(name)) arg_spec[] = name else: flags.append(name) parser.add_argument(*flags, **arg_spec) if add_nos and arg_type is bool: parser.add_argument( .format(name), action=, dest=name, const=default if default is not _empty else False)
Add the argument(s) to an ArgumentParser (using add_argument) for a given parameter. used_char_args is the set of -short options currently already in use, and is updated (if necessary) by this function. If add_nos is True, this will also add an inverse switch for all boolean options. For instance, for the boolean parameter "verbose", this will create --verbose and --no-verbose.
28,784
def _build_default_options(self): return [ OptionDefault(, None, inherit=True), OptionDefault(, False, inherit=False), OptionDefault(, enums.CREATE_STRATEGY, inherit=True), OptionDefault(, (), inherit=True), OptionDefault(, (), inherit=True), OptionDefault(, {}, inherit=True), ]
Provide the default value for all allowed fields. Custom FactoryOptions classes should override this method to update() its return value.
28,785
def display(contents, domain=DEFAULT_DOMAIN, force_gist=False): url = make_url(contents, domain, force_gist) webbrowser.open(url) return url
Open a web browser pointing to geojson.io with the specified content. If the content is large, an anonymous gist will be created on github and the URL will instruct geojson.io to download the gist data and then display. If the content is small, this step is not needed as the data can be included in the URL Parameters ---------- content - (see make_geojson) domain - string, default http://geojson.io force_gist - bool, default False Create an anonymous gist on Github regardless of the size of the contents
28,786
def posterior(self, x, s=1.): pr0 = 1. / self.sigma**2 prd = x.size / s**2 varp = 1. / (pr0 + prd) mu = varp * (pr0 * self.mu + prd * x.mean()) return TruncNormal(mu=mu, sigma=np.sqrt(varp), a=self.a, b=self.b)
Model is X_1,...,X_n ~ N(theta, s^2), theta~self, s fixed
28,787
def stat_file(self, id=None, path="/"): if id: return self.request(id, params={"path": path}, method="get").json() else: return self.request(params={"path": path}, method="get").json()
Stat a file in an allocation directory. https://www.nomadproject.io/docs/http/client-fs-stat.html arguments: - id - path returns: dict raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException
28,788
def close(self): self._close_proc() super(PipeReader, self).close() if self._tempfile: try: os.unlink(self._tempfile) except OSError: pass self._tempfile = None
Close open resources.
28,789
def from_dict(d: Dict[str, Any]) -> : name_type = d[] cls = _NAME_TO_INSTRUCTIONS[name_type] return cls.from_dict(d)
Loads a set of coverage instructions from a given dictionary. Raises: BadCoverageInstructions: if the given coverage instructions are illegal.
28,790
def iter_list_market_profit_and_loss( self, market_ids, chunk_size, **kwargs): return itertools.chain(*( self.list_market_profit_and_loss(market_chunk, **kwargs) for market_chunk in utils.get_chunks(market_ids, chunk_size) ))
Split call to `list_market_profit_and_loss` into separate requests. :param list market_ids: List of market IDs :param int chunk_size: Number of records per chunk :param dict kwargs: Arguments passed to `list_market_profit_and_loss`
28,791
def branch(self): branch = [self] for c in self.children: branch += c.branch() return branch
Get a flattened representation of the branch. @return: A flat list of nodes. @rtype: [L{Element},..]
28,792
def calculate_derivative_P(self, P, T, zs, ws, method, order=1): rs derivative function, with a delta of 0.01 Pa and a number of points equal to 2*order + 1. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- P : float Pressure at which to calculate the derivative, [Pa] T : float Temperature at which to calculate the derivative, [K] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Method for which to find the derivative order : int Order of the derivative, >= 1 Returns ------- d_prop_d_P_at_T : float Calculated derivative property at constant temperature, [`units/Pa^order`] ' f = lambda P: self.calculate(T, P, zs, ws, method) return derivative(f, P, dx=1e-2, n=order, order=1+order*2)
r'''Method to calculate a derivative of a mixture property with respect to pressure at constant temperature and composition of a given order using a specified method. Uses SciPy's derivative function, with a delta of 0.01 Pa and a number of points equal to 2*order + 1. This method can be overwritten by subclasses who may perfer to add analytical methods for some or all methods as this is much faster. If the calculation does not succeed, returns the actual error encountered. Parameters ---------- P : float Pressure at which to calculate the derivative, [Pa] T : float Temperature at which to calculate the derivative, [K] zs : list[float] Mole fractions of all species in the mixture, [-] ws : list[float] Weight fractions of all species in the mixture, [-] method : str Method for which to find the derivative order : int Order of the derivative, >= 1 Returns ------- d_prop_d_P_at_T : float Calculated derivative property at constant temperature, [`units/Pa^order`]
28,793
def create(model_config, reinforcer, optimizer, storage, total_frames, batches_per_epoch, callbacks=None, scheduler=None, openai_logging=False): from vel.openai.baselines import logger logger.configure(dir=model_config.openai_dir()) return RlTrainCommand( model_config=model_config, reinforcer=reinforcer, optimizer_factory=optimizer, scheduler_factory=scheduler, storage=storage, callbacks=callbacks, total_frames=int(float(total_frames)), batches_per_epoch=int(batches_per_epoch), openai_logging=openai_logging )
Vel factory function
28,794
def _check_custom_url_parameters(self): for param in self.custom_url_params.keys(): if param is not CustomUrlParam.TRANSPARENT: raise ValueError(.format(param))
Checks if custom url parameters are valid parameters. Throws ValueError if the provided parameter is not a valid parameter.
28,795
def __neuron_evolution(self, index): value = 0.0 for index_neighbor in range(self.__num_osc): value += self.__weights[index][index_neighbor] * (1.0 - 2.0 * (self.__output[index_neighbor] ** 2)) return value / self.__weights_summary[index]
! @brief Calculates state of the neuron with specified index. @param[in] index (uint): Index of neuron in the network. @return (double) New output of the specified neuron.
28,796
def __generate_file(self, template_filename, context, generated_filename, force=False): if force or (not os.path.isfile(generated_filename) or os.stat(template_filename).st_mtime - os.stat(generated_filename).st_mtime > 1): self.log_info( % template_filename) code_generated = self.__jinja2_environment.get_template(template_filename).render(context) with open(generated_filename, ) as f: self.log_info( % generated_filename) f.write(code_generated.encode())
Generate **one** (source code) file from a template. The file is **only** generated if needed, i.e. if ``force`` is set to ``True`` or if generated file is older than the template file. The generated file is written in the same directory as the template file. Args: template_filename (str): **Absolute** filename of a template file to translate. context (dict): Dictionary with ``(key, val)`` replacements. generated_filename (str): **Absolute** filename of the generated file filename. force (bool): If set to ``True``, file is generated no matter what.
28,797
def unstructure_attrs_astuple(self, obj): attrs = obj.__class__.__attrs_attrs__ return tuple(self.unstructure(getattr(obj, a.name)) for a in attrs)
Our version of `attrs.astuple`, so we can call back to us.
28,798
def _format_background(background): if os.path.isfile(background): with open(background, "r") as i_file: background = i_file.read().splitlines() else: background = background.splitlines() final_background = "" for line in background: if line == "": final_background += r"\\" + "\n\n" continue final_background += latex.wrap_lines(latex.sanitize_tex(line)) return final_background
Formats the background section :param background: the background content or file. :type background: str or file :returns: the background content. :rtype: str
28,799
def make_describe_attrs(self): lst = [] if self.all_groups: lst.append((NEWLINE, )) lst.append((INDENT, )) for group in self.all_groups: if group.name: lst.extend(self.tokens.make_describe_attr(group.kls_name)) return lst
Create tokens for setting is_noy_spec on describes