Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
21,300
def template_statemgr_yaml(cl_args, zookeepers): statemgr_config_file_template = "%s/standalone/templates/statemgr.template.yaml" \ % cl_args["config_path"] statemgr_config_file_actual = "%s/standalone/statemgr.yaml" % cl_args["config_path"] template_file(statemgr_config_file_template, statemgr_config_file_actual, {"<zookeeper_host:zookeeper_port>": ",".join( [ % zk if ":" in zk else % zk for zk in zookeepers])})
Template statemgr.yaml
21,301
def get_file_size(fileobj): currpos = fileobj.tell() fileobj.seek(0, 2) total_size = fileobj.tell() fileobj.seek(currpos) return total_size
Returns the size of a file-like object.
21,302
def pool_context(*args, **kwargs): pool = Pool(*args, **kwargs) try: yield pool except Exception as e: raise e finally: pool.terminate()
Context manager for multiprocessing.Pool class (for compatibility with Python 2.7.x)
21,303
def dist_hamming(src, tar, diff_lens=True): return Hamming().dist(src, tar, diff_lens)
Return the normalized Hamming distance between two strings. This is a wrapper for :py:meth:`Hamming.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison diff_lens : bool If True (default), this returns the Hamming distance for those characters that have a matching character in both strings plus the difference in the strings' lengths. This is equivalent to extending the shorter string with obligatorily non-matching characters. If False, an exception is raised in the case of strings of unequal lengths. Returns ------- float The normalized Hamming distance Examples -------- >>> round(dist_hamming('cat', 'hat'), 12) 0.333333333333 >>> dist_hamming('Niall', 'Neil') 0.6 >>> dist_hamming('aluminum', 'Catalan') 1.0 >>> dist_hamming('ATCG', 'TAGC') 1.0
21,304
def set_target(self, target: EventDispatcherBase) -> None: if self._target is not None: raise PermissionError("The target property already has a valid value.") if not isinstance(target, EventDispatcherBase): raise TypeError("Invalid target type: {}".format(target)) self._target = target
This method should be called by the event dispatcher that dispatches this event to set its target property. Args: target (EventDispatcherBase): The event dispatcher that will dispatch this event. Raises: PermissionError: If the target property of the event has already been set. TypeError: If `target` is not an `EventDispatcherBase` instance.
21,305
def modify_conf(cfgfile, service_name, outfn): if not cfgfile or not outfn: print() sys.exit(0) options = service_options[service_name] with open(cfgfile, ) as cf: lines = cf.readlines() for opt in options: op = opt.get() res = [line for line in lines if line.startswith(op)] if len(res) > 1: print( % res) sys.exit(0) if res: (op, sep, val) = (res[0].strip().replace(, ). partition()) new_val = None if opt.get(): if not any(opt.get() == value for value in val.split()): new_val = .join((val, opt.get())) else: if val != opt.get(): new_val = opt.get() if new_val: opt_idx = lines.index(res[0]) lines.pop(opt_idx) lines.insert(opt_idx, .join((opt.get(), new_val + ))) else: try: sec_idx = lines.index( + opt.get() + ) lines.insert(sec_idx + 1, .join( (opt.get(), opt.get() + ))) except ValueError: print( % opt.get()) sys.exit(0) with open(outfn, ) as fwp: all_lines = for line in lines: all_lines += line fwp.write(all_lines)
Modify config file neutron and keystone to include enabler options.
21,306
def get_sample_times(self): if self._epoch is None: return Array(range(len(self))) * self._delta_t else: return Array(range(len(self))) * self._delta_t + float(self._epoch)
Return an Array containing the sample times.
21,307
def run(self, data_dir=None): if data_dir: centinel_home = data_dir self.config[][] = os.path.join(centinel_home, ) logging.info() if not os.path.exists(self.config[][]): logging.warn("Creating results directory in " "%s" % (self.config[][])) os.makedirs(self.config[][]) logging.debug("Results directory: %s" % (self.config[][])) sched_filename = os.path.join(self.config[][], ) logging.debug("Loading scheduler file.") sched_info = {} if os.path.exists(sched_filename): with open(sched_filename, ) as file_p: try: sched_info = json.load(file_p) except Exception as exp: logging.error("Failed to load the " "scheduler: %s" % str(exp)) return logging.debug("Scheduler file loaded.") logging.debug("Processing the experiment schedule.") for name in sched_info: run_next = sched_info[name][] run_next += sched_info[name][] if run_next > time.time(): run_next_str = datetime.fromtimestamp(long(run_next)) logging.debug("Skipping %s, it will " "be run on or after %s." % (name, run_next_str)) continue if not in sched_info[name]: self.run_exp(name=name) else: exps = sched_info[name][].items() for python_exp, exp_config in exps: logging.debug("Running %s." % python_exp) self.run_exp(name=python_exp, exp_config=exp_config, schedule_name=name) logging.debug("Finished running %s." % python_exp) sched_info[name][] = time.time() logging.debug("Updating timeout values in scheduler.") with open(sched_filename, ) as file_p: json.dump(sched_info, file_p, indent=2, separators=(, )) self.consolidate_results() logging.info("Finished running experiments. " "Look in %s for results." % (self.config[][]))
Note: this function will check the experiments directory for a special file, scheduler.info, that details how often each experiment should be run and the last time the experiment was run. If the time since the experiment was run is shorter than the scheduled interval in seconds, then the experiment will not be run. :param data_dir: :return:
21,308
def from_path(path: str, encoding: str = , **kwargs) -> BELGraph: log.info(, path) graph = BELGraph(path=path) with codecs.open(os.path.expanduser(path), encoding=encoding) as lines: parse_lines(graph=graph, lines=lines, **kwargs) return graph
Load a BEL graph from a file resource. This function is a thin wrapper around :func:`from_lines`. :param path: A file path :param encoding: the encoding to use when reading this file. Is passed to :code:`codecs.open`. See the python `docs <https://docs.python.org/3/library/codecs.html#standard-encodings>`_ for a list of standard encodings. For example, files starting with a UTF-8 BOM should use :code:`utf_8_sig`. The remaining keyword arguments are passed to :func:`pybel.io.line_utils.parse_lines`.
21,309
def compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False): matches_by_order = [0] * max_order possible_matches_by_order = [0] * max_order reference_length = 0 translation_length = 0 for (references, translation) in zip(reference_corpus, translation_corpus): reference_length += min(len(r) for r in references) translation_length += len(translation) merged_ref_ngram_counts = collections.Counter() for reference in references: merged_ref_ngram_counts |= _get_ngrams(reference, max_order) translation_ngram_counts = _get_ngrams(translation, max_order) overlap = translation_ngram_counts & merged_ref_ngram_counts for ngram in overlap: matches_by_order[len(ngram)-1] += overlap[ngram] for order in range(1, max_order+1): possible_matches = len(translation) - order + 1 if possible_matches > 0: possible_matches_by_order[order-1] += possible_matches precisions = [0] * max_order for i in range(0, max_order): if smooth: precisions[i] = ((matches_by_order[i] + 1.) / (possible_matches_by_order[i] + 1.)) else: if possible_matches_by_order[i] > 0: precisions[i] = (float(matches_by_order[i]) / possible_matches_by_order[i]) else: precisions[i] = 0.0 if min(precisions) > 0: p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions) geo_mean = math.exp(p_log_sum) else: geo_mean = 0 ratio = float(translation_length) / reference_length if ratio > 1.0: bp = 1. else: bp = math.exp(1 - 1. / ratio) bleu = geo_mean * bp return (bleu, precisions, bp, ratio, translation_length, reference_length)
Computes BLEU score of translated segments against one or more references. Args: reference_corpus: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. translation_corpus: list of translations to score. Each translation should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram precisions and brevity penalty.
21,310
def batch_means(x, f=lambda y: y, theta=.5, q=.95, burn=0): try: import scipy from scipy import stats except ImportError: raise ImportError() x = x[burn:] n = len(x) b = np.int(n ** theta) a = n / b t_quant = stats.t.isf(1 - q, a - 1) Y = np.array([np.mean(f(x[i * b:(i + 1) * b])) for i in xrange(a)]) sig = b / (a - 1.) * sum((Y - np.mean(f(x))) ** 2) return t_quant * sig / np.sqrt(n)
TODO: Use Bayesian CI. Returns the half-width of the frequentist confidence interval (q'th quantile) of the Monte Carlo estimate of E[f(x)]. :Parameters: x : sequence Sampled series. Must be a one-dimensional array. f : function The MCSE of E[f(x)] will be computed. theta : float between 0 and 1 The batch length will be set to len(x) ** theta. q : float between 0 and 1 The desired quantile. :Example: >>>batch_means(x, f=lambda x: x**2, theta=.5, q=.95) :Reference: Flegal, James M. and Haran, Murali and Jones, Galin L. (2007). Markov chain Monte Carlo: Can we trust the third significant figure? <Publication> :Note: Requires SciPy
21,311
def execute_api_request(self): if not self.auth.check_auth(): raise Exception() if self.auth.is_authentified(): id_cookie = {BboxConstant.COOKIE_BBOX_ID: self.auth.get_cookie_id()} if self.parameters is None: resp = self.call_method(self.api_url.get_url(), cookies=id_cookie) else: resp = self.call_method(self.api_url.get_url(), data=self.parameters, cookies=id_cookie) else: if self.parameters is None: resp = self.call_method(self.api_url.get_url()) else: resp = self.call_method(self.api_url.get_url(), data=self.parameters) if resp.status_code != 200: raise Exception(.format( resp.status_code, self.api_url.get_url())) return resp
Execute the request and return json data as a dict :return: data dict
21,312
def reset_can(self, channel=Channel.CHANNEL_CH0, flags=ResetFlags.RESET_ALL): UcanResetCanEx(self._handle, channel, flags)
Resets a CAN channel of a device (hardware reset, empty buffer, and so on). :param int channel: CAN channel, to be reset (:data:`Channel.CHANNEL_CH0` or :data:`Channel.CHANNEL_CH1`). :param int flags: Flags defines what should be reset (see enum :class:`ResetFlags`).
21,313
def GET_getitemvalues(self) -> None: for item in state.getitems: for name, value in item.yield_name2value(state.idx1, state.idx2): self._outputs[name] = value
Get the values of all |Variable| objects observed by the current |GetItem| objects. For |GetItem| objects observing time series, |HydPyServer.GET_getitemvalues| returns only the values within the current simulation period.
21,314
def get_extrapolated_conductivity(temps, diffusivities, new_temp, structure, species): return get_extrapolated_diffusivity(temps, diffusivities, new_temp) \ * get_conversion_factor(structure, species, new_temp)
Returns extrapolated mS/cm conductivity. Args: temps ([float]): A sequence of temperatures. units: K diffusivities ([float]): A sequence of diffusivities (e.g., from DiffusionAnalyzer.diffusivity). units: cm^2/s new_temp (float): desired temperature. units: K structure (structure): Structure used for the diffusivity calculation species (string/Specie): conducting species Returns: (float) Conductivity at extrapolated temp in mS/cm.
21,315
def update_meta_data_for_state_view(graphical_editor_view, state_v, affects_children=False, publish=True): from gaphas.item import NW update_meta_data_for_port(graphical_editor_view, state_v, None) if affects_children: update_meta_data_for_name_view(graphical_editor_view, state_v.name_view, publish=False) for transition_v in state_v.get_transitions(): update_meta_data_for_transition_waypoints(graphical_editor_view, transition_v, None, publish=False) for child_state_v in state_v.child_state_views(): update_meta_data_for_state_view(graphical_editor_view, child_state_v, True, publish=False) rel_pos = calc_rel_pos_to_parent(graphical_editor_view.editor.canvas, state_v, state_v.handles()[NW]) state_v.model.set_meta_data_editor(, (state_v.width, state_v.height)) state_v.model.set_meta_data_editor(, rel_pos) if publish: graphical_editor_view.emit(, state_v.model, "size", affects_children)
This method updates the meta data of a state view :param graphical_editor_view: Graphical Editor view the change occurred in :param state_v: The state view which has been changed/moved :param affects_children: Whether the children of the state view have been resized or not :param publish: Whether to publish the changes of the meta data
21,316
def read_pandas (self, format=, **kwargs): import pandas reader = getattr (pandas, + format, None) if not callable (reader): raise PKError (, format, format) with self.open () as f: return reader (f, **kwargs)
Read using :mod:`pandas`. The function ``pandas.read_FORMAT`` is called where ``FORMAT`` is set from the argument *format*. *kwargs* are passed to this function. Supported formats likely include ``clipboard``, ``csv``, ``excel``, ``fwf``, ``gbq``, ``html``, ``json``, ``msgpack``, ``pickle``, ``sql``, ``sql_query``, ``sql_table``, ``stata``, ``table``. Note that ``hdf`` is not supported because it requires a non-keyword argument; see :meth:`Path.read_hdf`.
21,317
def patch_wheel(in_wheel, patch_fname, out_wheel=None): in_wheel = abspath(in_wheel) patch_fname = abspath(patch_fname) if out_wheel is None: out_wheel = in_wheel else: out_wheel = abspath(out_wheel) if not exists(patch_fname): raise ValueError("patch file {0} does not exist".format(patch_fname)) with InWheel(in_wheel, out_wheel): with open(patch_fname, ) as fobj: patch_proc = Popen([, ], stdin = fobj, stdout = PIPE, stderr = PIPE) stdout, stderr = patch_proc.communicate() if patch_proc.returncode != 0: raise RuntimeError("Patch failed with stdout:\n" + stdout.decode())
Apply ``-p1`` style patch in `patch_fname` to contents of `in_wheel` If `out_wheel` is None (the default), overwrite the wheel `in_wheel` in-place. Parameters ---------- in_wheel : str Filename of wheel to process patch_fname : str Filename of patch file. Will be applied with ``patch -p1 < patch_fname`` out_wheel : None or str Filename of patched wheel to write. If None, overwrite `in_wheel`
21,318
def show_system_info_output_show_system_info_stack_mac(self, **kwargs): config = ET.Element("config") show_system_info = ET.Element("show_system_info") config = show_system_info output = ET.SubElement(show_system_info, "output") show_system_info = ET.SubElement(output, "show-system-info") stack_mac = ET.SubElement(show_system_info, "stack-mac") stack_mac.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
21,319
def time_stops(self): if not self.supports_time: return [] if self.service.calendar == : units = self.service.time_interval_units interval = self.service.time_interval steps = [self.time_start] if units in (, , ): if units == : years = interval elif units == : years = 10 * interval else: years = 100 * interval next_value = lambda x: x.replace(year=x.year + years) elif units == : def _fn(x): year = x.year + (x.month+interval-1) // 12 month = (x.month+interval) % 12 or 12 day = min(x.day, calendar.monthrange(year, month)[1]) return x.replace(year=year, month=month, day=day) next_value = _fn else: if units == : delta = timedelta(milliseconds=interval) elif units == : delta = timedelta(seconds=interval) elif units == : delta = timedelta(minutes=interval) elif units == : delta = timedelta(hours=interval) elif units == : delta = timedelta(days=interval) elif units == : delta = timedelta(weeks=interval) else: raise ValidationError( "Service has an invalid time_interval_units: {}".format(self.service.time_interval_units) ) next_value = lambda x: x + delta while steps[-1] < self.time_end: value = next_value(steps[-1]) if value > self.time_end: break steps.append(value) return steps else: raise NotImplementedError
Valid time steps for this service as a list of datetime objects.
21,320
def gene_id_of_associated_transcript(effect): return apply_to_transcript_if_exists( effect=effect, fn=lambda t: t.gene_id, default=None)
Ensembl gene ID of transcript associated with effect, returns None if effect does not have transcript.
21,321
def create_trace( turn_activity: Activity, name: str, value: object = None, value_type: str = None, label: str = None, ) -> Activity: from_property = ( ChannelAccount( id=turn_activity.recipient.id, name=turn_activity.recipient.name ) if turn_activity.recipient is not None else ChannelAccount() ) if value_type is None and value is not None: value_type = type(value).__name__ reply = Activity( type=ActivityTypes.trace, timestamp=datetime.utcnow(), from_property=from_property, recipient=ChannelAccount( id=turn_activity.from_property.id, name=turn_activity.from_property.name ), reply_to_id=turn_activity.id, service_url=turn_activity.service_url, channel_id=turn_activity.channel_id, conversation=ConversationAccount( is_group=turn_activity.conversation.is_group, id=turn_activity.conversation.id, name=turn_activity.conversation.name, ), name=name, label=label, value_type=value_type, value=value, ) return reply
Creates a trace activity based on this activity. :param turn_activity: :type turn_activity: Activity :param name: The value to assign to the trace activity's <see cref="Activity.name"/> property. :type name: str :param value: The value to assign to the trace activity's <see cref="Activity.value"/> property., defaults to None :param value: object, optional :param value_type: The value to assign to the trace activity's <see cref="Activity.value_type"/> property, defaults to None :param value_type: str, optional :param label: The value to assign to the trace activity's <see cref="Activity.label"/> property, defaults to None :param label: str, optional :return: The created trace activity. :rtype: Activity
21,322
def or_(self, first_qe, *qes): $orageage res = first_qe for qe in qes: res = (res | qe) self.filter(res) return self
Add a $not expression to the query, negating the query expressions given. The ``| operator`` on query expressions does the same thing **Examples**: ``query.or_(SomeDocClass.age == 18, SomeDocClass.age == 17)`` becomes ``{'$or' : [{ 'age' : 18 }, { 'age' : 17 }]}`` :param query_expressions: Instances of :class:`ommongo.query_expression.QueryExpression`
21,323
def claim_pep_node(self, node_namespace, *, register_feature=True, notify=False): if node_namespace in self._pep_node_claims: raise RuntimeError( "claiming already claimed node" ) registered_node = RegisteredPEPNode( self, node_namespace, register_feature=register_feature, notify=notify, ) finalizer = weakref.finalize( registered_node, weakref.WeakMethod(registered_node._unregister) ) finalizer.atexit = False self._pep_node_claims[node_namespace] = registered_node return registered_node
Claim node `node_namespace`. :param node_namespace: the pubsub node whose events shall be handled. :param register_feature: Whether to publish the `node_namespace` as feature. :param notify: Whether to register the ``+notify`` feature to receive notification without explicit subscription. :raises RuntimeError: if a handler for `node_namespace` is already set. :returns: a :class:`~aioxmpp.pep.service.RegisteredPEPNode` instance representing the claim. .. seealso:: :class:`aioxmpp.pep.register_pep_node` a descriptor which can be used with :class:`~aioxmpp.service.Service` subclasses to claim a PEP node automatically. This registers `node_namespace` as feature for service discovery unless ``register_feature=False`` is passed. .. note:: For `notify` to work, it is required that :class:`aioxmpp.EntityCapsService` is loaded and that presence is re-sent soon after :meth:`~aioxmpp.EntityCapsService.on_ver_changed` fires. See the documentation of the class and the signal for details.
21,324
def enable(profile=): * cmd = [, , , profile, , ] ret = __salt__[](cmd, python_shell=False, ignore_retcode=True) if ret[] != 0: raise CommandExecutionError(ret[]) return True
.. versionadded:: 2015.5.0 Enable firewall profile Args: profile (Optional[str]): The name of the profile to enable. Default is ``allprofiles``. Valid options are: - allprofiles - domainprofile - privateprofile - publicprofile Returns: bool: True if successful Raises: CommandExecutionError: If the command fails CLI Example: .. code-block:: bash salt '*' firewall.enable
21,325
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_message(self, **kwargs): config = ET.Element("config") logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status") config = logical_chassis_fwdl_status output = ET.SubElement(logical_chassis_fwdl_status, "output") cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries") fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries") message = ET.SubElement(fwdl_entries, "message") message.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
21,326
def issue_funds(ctx, amount=, rtgs_hash=, returns=STATUS): "In the IOU fungible the supply is set by Issuer, who issue funds." ctx.accounts[ctx.msg_sender] += amount ctx.issued_amounts[ctx.msg_sender] += amount ctx.Issuance(ctx.msg_sender, rtgs_hash, amount) return OK
In the IOU fungible the supply is set by Issuer, who issue funds.
21,327
def select_point(action, action_space, select_point_act, screen): select = spatial(action, action_space).unit_selection_point screen.assign_to(select.selection_screen_coord) select.type = select_point_act
Select a unit at a point.
21,328
def patch(self, url, data=None, **kwargs): return self.oauth_request(url, , data=data, **kwargs)
Shorthand for self.oauth_request(url, 'patch') :param str url: url to send patch oauth request to :param dict data: patch data to update the service :param kwargs: extra params to send to request api :return: Response of the request :rtype: requests.Response
21,329
def dump_registers(cls, registers, arch = None): if registers is None: return if arch is None: if in registers: arch = win32.ARCH_I386 elif in registers: arch = win32.ARCH_AMD64 else: arch = if arch not in cls.reg_template: msg = "Donefl_dumpEFlags'] ) return cls.reg_template[arch] % registers
Dump the x86/x64 processor register values. The output mimics that of the WinDBG debugger. @type registers: dict( str S{->} int ) @param registers: Dictionary mapping register names to their values. @type arch: str @param arch: Architecture of the machine whose registers were dumped. Defaults to the current architecture. Currently only the following architectures are supported: - L{win32.ARCH_I386} - L{win32.ARCH_AMD64} @rtype: str @return: Text suitable for logging.
21,330
def read_midc_raw_data_from_nrel(site, start, end): args = {: site, : start.strftime(), : end.strftime()} endpoint = url = endpoint + .join([.format(k, v) for k, v in args.items()]) return read_midc(url, raw_data=True)
Request and read MIDC data directly from the raw data api. Parameters ---------- site: string The MIDC station id. start: datetime Start date for requested data. end: datetime End date for requested data. Returns ------- data: Dataframe with DatetimeIndex localized to the station location. Notes ----- Requests spanning an instrumentation change will yield an error. See the MIDC raw data api page `here <https://midcdmz.nrel.gov/apps/data_api_doc.pl?_idtextlist>`_ for more details and considerations.
21,331
def is_github_repo_owner_the_official_one(context, repo_owner): official_repo_owner = context.config[] if not official_repo_owner: raise ConfigError( .format(official_repo_owner) ) return official_repo_owner == repo_owner
Given a repo_owner, check if it matches the one configured to be the official one. Args: context (scriptworker.context.Context): the scriptworker context. repo_owner (str): the repo_owner to verify Raises: scriptworker.exceptions.ConfigError: when no official owner was defined Returns: bool: True when ``repo_owner`` matches the one configured to be the official one
21,332
def Scale(self, factor): new = self.Copy() new.xs = [x * factor for x in self.xs] return new
Multiplies the xs by a factor. factor: what to multiply by
21,333
def login(self, email=None, password=None, user=None): if user is not None: data = {: user, : password} elif email is not None: data = {: email, : password} else: raise ValueError() self.headers = {: } response = self.post(, **data) self.token = response[] self.headers = {: self.token, : } return response
Logs the user in and setups the header with the private token :param email: Gitlab user Email :param user: Gitlab username :param password: Gitlab user password :return: True if login successful :raise: HttpError :raise: ValueError
21,334
def get_copyright_metadata(self): metadata = dict(self._mdata[]) metadata.update({: self._my_map[]}) return Metadata(**metadata)
Gets the metadata for the copyright. return: (osid.Metadata) - metadata for the copyright *compliance: mandatory -- This method must be implemented.*
21,335
def download_file_by_name(url, target_folder, file_name, mkdir=False): __hdr__ = {: , : , : , : , : , : } if not os.path.isdir(target_folder): if mkdir: preparedir(target_folder) else: created = preparedir(target_folder, False) if not created: raise ValueError("Failed to find %s." % target_folder) file_path = os.path.join(target_folder, file_name) if (sys.version_info < (3, 0)): _download_py2(url, file_path, __hdr__) else: _download_py3(url, file_path, __hdr__)
Download a file to a directory. Args: url: A string to a valid URL. target_folder: Target folder for download (e.g. c:/ladybug) file_name: File name (e.g. testPts.zip). mkdir: Set to True to create the directory if doesn't exist (Default: False)
21,336
def validate_properties_exist(self, classname, property_names): schema_element = self.get_element_by_class_name(classname) requested_properties = set(property_names) available_properties = set(schema_element.properties.keys()) non_existent_properties = requested_properties - available_properties if non_existent_properties: raise InvalidPropertyError( u u.format(classname, non_existent_properties, property_names))
Validate that the specified property names are indeed defined on the given class.
21,337
def _set_platform_specific_keyboard_shortcuts(self): self.action_new_phrase.setShortcuts(QKeySequence.New) self.action_save.setShortcuts(QKeySequence.Save) self.action_close_window.setShortcuts(QKeySequence.Close) self.action_quit.setShortcuts(QKeySequence.Quit) self.action_undo.setShortcuts(QKeySequence.Undo) self.action_redo.setShortcuts(QKeySequence.Redo) self.action_cut_item.setShortcuts(QKeySequence.Cut) self.action_copy_item.setShortcuts(QKeySequence.Copy) self.action_paste_item.setShortcuts(QKeySequence.Paste) self.action_delete_item.setShortcuts(QKeySequence.Delete) self.action_configure_autokey.setShortcuts(QKeySequence.Preferences)
QtDesigner does not support QKeySequence::StandardKey enum based default keyboard shortcuts. This means that all default key combinations ("Save", "Quit", etc) have to be defined in code.
21,338
def hgetall(self, name): with self.pipe as pipe: f = Future() res = pipe.hgetall(self.redis_key(name)) def cb(): data = {} m_decode = self.memberparse.decode v_decode = self._value_decode for k, v in res.result.items(): k = m_decode(k) v = v_decode(k, v) data[k] = v f.set(data) pipe.on_execute(cb) return f
Returns all the fields and values in the Hash. :param name: str the name of the redis key :return: Future()
21,339
def _manual_lookup(self, facebook_id, facebook_id_string): resp = self._session.get( % facebook_id, allow_redirects=True, timeout=10 ) m = _MANUAL_NAME_MATCHER.search(resp.text) if m: name = m.group(1) else: name = facebook_id_string self._cached_profiles[facebook_id] = name return name
People who we have not communicated with in a long time will not appear in the look-ahead cache that Facebook keeps. We must manually resolve them. :param facebook_id: Profile ID of the user to lookup. :return:
21,340
def _cleanup(self): self._expkg = None self._extmp = None self._flag_e = True self._ifile.close()
Cleanup after extraction & analysis.
21,341
def caldata(self, time): if time.date() in self.__ocdate[]: return False elif time.date() in self.__ocdate[]: return True else: if time.weekday() <= 4: return True else: return False
Market open or not. :param datetime time: 欲判斷的日期 :rtype: bool :returns: True 為開市、False 為休市
21,342
def collect(self): def append_frames(frame_a, frame_b): return frame_a.append(frame_b) return self._custom_rdd_reduce(append_frames)
Collect the elements in an PRDD and concatenate the partition.
21,343
def OnMouseUp(self, event): if not self.IsInControl: self.IsDrag = False elif self.IsDrag: if not self.IsDrag: self.hitIndex = self.HitTest(event.GetPosition()) self.dropIndex = self.hitIndex[0] if not (self.dropIndex == self.startIndex or self.dropIndex == -1): dropList = [] thisItem = self.GetItem(self.startIndex) for x in xrange(self.GetColumnCount()): dropList.append( self.GetItem(self.startIndex, x).GetText()) thisItem.SetId(self.dropIndex) self.DeleteItem(self.startIndex) self.InsertItem(thisItem) for x in range(self.GetColumnCount()): self.SetStringItem(self.dropIndex, x, dropList[x]) self.IsDrag = False event.Skip()
Generate a dropIndex. Process: check self.IsInControl, check self.IsDrag, HitTest, compare HitTest value The mouse can end up in 5 different places: Outside the Control On itself Above its starting point and on another item Below its starting point and on another item Below its starting point and not on another item
21,344
def do_shell(self, args): if _debug: ConsoleCmd._debug("do_shell %r", args) os.system(args)
Pass command to a system shell when line begins with '!
21,345
def fanpower_watts(ddtt): from eppy.bunch_subclass import BadEPFieldError try: fan_tot_eff = ddtt.Fan_Total_Efficiency except BadEPFieldError as e: fan_tot_eff = ddtt.Fan_Efficiency pascal = float(ddtt.Pressure_Rise) if str(ddtt.Maximum_Flow_Rate).lower() == : return else: m3s = float(ddtt.Maximum_Flow_Rate) return fan_watts(fan_tot_eff, pascal, m3s)
return fan power in bhp given the fan IDF object
21,346
def addattachments(message, template_path): if not in message: return message, 0 message = make_message_multipart(message) attachment_filepaths = message.get_all(, failobj=[]) template_parent_dir = os.path.dirname(template_path) for attachment_filepath in attachment_filepaths: attachment_filepath = os.path.expanduser(attachment_filepath.strip()) if not attachment_filepath: continue if not os.path.isabs(attachment_filepath): sys.exit(1) filename = os.path.basename(normalized_path) with open(normalized_path, "rb") as attachment: part = email.mime.application.MIMEApplication(attachment.read(), Name=filename) part.add_header(, .format(filename)) message.attach(part) print(">>> attached {}".format(normalized_path)) del message[] return message, len(attachment_filepaths)
Add the attachments from the message from the commandline options.
21,347
def protein_subsequences_around_mutations(effects, padding_around_mutation): protein_subsequences = {} protein_subsequence_start_offsets = {} for effect in effects: protein_sequence = effect.mutant_protein_sequence if protein_sequence: mutation_start = effect.aa_mutation_start_offset mutation_end = effect.aa_mutation_end_offset seq_start_offset = max( 0, mutation_start - padding_around_mutation) first_stop_codon_index = protein_sequence.find("*") if first_stop_codon_index < 0: first_stop_codon_index = len(protein_sequence) seq_end_offset = min( first_stop_codon_index, mutation_end + padding_around_mutation) subsequence = protein_sequence[seq_start_offset:seq_end_offset] protein_subsequences[effect] = subsequence protein_subsequence_start_offsets[effect] = seq_start_offset return protein_subsequences, protein_subsequence_start_offsets
From each effect get a mutant protein sequence and pull out a subsequence around the mutation (based on the given padding). Returns a dictionary of subsequences and a dictionary of subsequence start offsets.
21,348
def enterEvent(self, event): if self.__checkable: not self.__checked and self.setPixmap(self.__hover_pixmap) else: self.setPixmap(self.__hover_pixmap)
Reimplements the :meth:`QLabel.enterEvent` method. :param event: QEvent. :type event: QEvent
21,349
def flags(self, index): column = index.column() if index.isValid(): if column in [C.COL_START, C.COL_END]: return Qt.ItemFlags(Qt.ItemIsEnabled) else: return Qt.ItemFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable) else: return Qt.ItemFlags(Qt.ItemIsEnabled)
Override Qt method
21,350
def resolve_file_path_list(pathlist, workdir, prefix=, randomize=False): files = [] with open(pathlist, ) as f: files = [line.strip() for line in f] newfiles = [] for f in files: f = os.path.expandvars(f) if os.path.isfile(f): newfiles += [f] else: newfiles += [os.path.join(workdir, f)] if randomize: _, tmppath = tempfile.mkstemp(prefix=prefix, dir=workdir) else: tmppath = os.path.join(workdir, prefix) tmppath += with open(tmppath, ) as tmpfile: tmpfile.write("\n".join(newfiles)) return tmppath
Resolve the path of each file name in the file ``pathlist`` and write the updated paths to a new file.
21,351
def fire_metric(metric_name, metric_value): metric_value = float(metric_value) metric = {metric_name: metric_value} metric_client.fire_metrics(**metric) return "Fired metric <{}> with value <{}>".format(metric_name, metric_value)
Fires a metric using the MetricsApiClient
21,352
def EXP_gas(self, base, exponent): EXP_SUPPLEMENTAL_GAS = 10 def nbytes(e): result = 0 for i in range(32): result = Operators.ITEBV(512, Operators.EXTRACT(e, i * 8, 8) != 0, i + 1, result) return result return EXP_SUPPLEMENTAL_GAS * nbytes(exponent)
Calculate extra gas fee
21,353
def _parse_alt_title(html_chunk): title = html_chunk.find("img", fn=has_param("alt")) if not title: raise UserWarning("Can't find alternative title source!") return title[0].params["alt"].strip()
Parse title from alternative location if not found where it should be. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: str: Book's title.
21,354
def nice_number(number, thousands_separator=, max_ndigits_after_dot=None): if isinstance(number, float): if max_ndigits_after_dot is not None: number = round(number, max_ndigits_after_dot) int_part, frac_part = str(number).split() return % (nice_number(int(int_part), thousands_separator), frac_part) else: chars_in = list(str(number)) number = len(chars_in) chars_out = [] for i in range(0, number): if i % 3 == 0 and i != 0: chars_out.append(thousands_separator) chars_out.append(chars_in[number - i - 1]) chars_out.reverse() return .join(chars_out)
Return nicely printed number NUMBER in language LN. Return nicely printed number NUMBER in language LN using given THOUSANDS_SEPARATOR character. If max_ndigits_after_dot is specified and the number is float, the number is rounded by taking in consideration up to max_ndigits_after_dot digit after the dot. This version does not pay attention to locale. See tmpl_nice_number_via_locale().
21,355
def data_properties(data, mask=None, background=None): from ..segmentation import SourceProperties segment_image = np.ones(data.shape, dtype=np.int) return SourceProperties(data, segment_image, label=1, mask=mask, background=background)
Calculate the morphological properties (and centroid) of a 2D array (e.g. an image cutout of an object) using image moments. Parameters ---------- data : array_like or `~astropy.units.Quantity` The 2D array of the image. mask : array_like (bool), optional A boolean mask, with the same shape as ``data``, where a `True` value indicates the corresponding element of ``data`` is masked. Masked data are excluded from all calculations. background : float, array_like, or `~astropy.units.Quantity`, optional The background level that was previously present in the input ``data``. ``background`` may either be a scalar value or a 2D image with the same shape as the input ``data``. Inputting the ``background`` merely allows for its properties to be measured within each source segment. The input ``background`` does *not* get subtracted from the input ``data``, which should already be background-subtracted. Returns ------- result : `~photutils.segmentation.SourceProperties` instance A `~photutils.segmentation.SourceProperties` object.
21,356
def set_video_crop(self, x1, y1, x2, y2): crop = "%s %s %s %s" % (str(x1),str(y1),str(x2),str(y2)) self._player_interface.SetVideoCropPos(ObjectPath(), String(crop))
Args: x1 (int): Top left x coordinate (px) y1 (int): Top left y coordinate (px) x2 (int): Bottom right x coordinate (px) y2 (int): Bottom right y coordinate (px)
21,357
def gaussian_gradient_magnitude(image, sigma = 5, voxelspacing = None, mask = slice(None)): r return _extract_feature(_extract_gaussian_gradient_magnitude, image, mask, sigma = sigma, voxelspacing = voxelspacing)
r""" Computes the gradient magnitude (edge-detection) of the supplied image using gaussian derivates and returns the intensity values. Optionally a binary mask can be supplied to select the voxels for which the feature should be extracted. Parameters ---------- image : array_like or list/tuple of array_like A single image or a list/tuple of images (for multi-spectral case). sigma : number or sequence of numbers Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. Note that the voxel spacing of the image is taken into account, the given values are treated as mm. voxelspacing : sequence of floats The side-length of each voxel. mask : array_like A binary mask for the image. Returns ------- gaussian_gradient_magnitude : ndarray The gaussian gradient magnitude of the supplied image.
21,358
def param_map_rc_encode(self, target_system, target_component, param_id, param_index, parameter_rc_channel_index, param_value0, scale, param_value_min, param_value_max): return MAVLink_param_map_rc_message(target_system, target_component, param_id, param_index, parameter_rc_channel_index, param_value0, scale, param_value_min, param_value_max)
Bind a RC channel to a parameter. The parameter should change accoding to the RC channel value. target_system : System ID (uint8_t) target_component : Component ID (uint8_t) param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char) param_index : Parameter index. Send -1 to use the param ID field as identifier (else the param id will be ignored), send -2 to disable any existing map for this rc_channel_index. (int16_t) parameter_rc_channel_index : Index of parameter RC channel. Not equal to the RC channel id. Typically correpsonds to a potentiometer-knob on the RC. (uint8_t) param_value0 : Initial parameter value (float) scale : Scale, maps the RC range [-1, 1] to a parameter value (float) param_value_min : Minimum param value. The protocol does not define if this overwrites an onboard minimum value. (Depends on implementation) (float) param_value_max : Maximum param value. The protocol does not define if this overwrites an onboard maximum value. (Depends on implementation) (float)
21,359
def transmute_sites( self, old_site_label, new_site_label, n_sites_to_change ): selected_sites = self.select_sites( old_site_label ) for site in random.sample( selected_sites, n_sites_to_change ): site.label = new_site_label self.site_labels = set( [ site.label for site in self.sites ] )
Selects a random subset of sites with a specific label and gives them a different label. Args: old_site_label (String or List(String)): Site label(s) of the sites to be modified.. new_site_label (String): Site label to be applied to the modified sites. n_sites_to_change (Int): Number of sites to modify. Returns: None
21,360
def predict(self, x, distributed=True): if is_distributed: if isinstance(x, np.ndarray): features = to_sample_rdd(x, np.zeros([x.shape[0]])) elif isinstance(x, RDD): features = x else: raise TypeError("Unsupported prediction data type: %s" % type(x)) return self.predict_distributed(features) else: if isinstance(x, np.ndarray): return self.predict_local(x) else: raise TypeError("Unsupported prediction data type: %s" % type(x))
Use a model to do prediction. # Arguments x: Input data. A Numpy array or RDD of Sample. distributed: Boolean. Whether to do prediction in distributed mode or local mode. Default is True. In local mode, x must be a Numpy array.
21,361
def make_graph(pkg): ignore = [, , , ] pkg_deps = recursive_dependencies(pkg_resources.Requirement.parse(pkg)) dependencies = {key: {} for key in pkg_deps if key not in ignore} installed_packages = pkg_resources.working_set versions = {package.key: package.version for package in installed_packages} for package in dependencies: try: dependencies[package][] = versions[package] except KeyError: warnings.warn("{} is not installed so we cannot compute " "resources for its dependencies.".format(package), PackageNotInstalledWarning) dependencies[package][] = None for package in dependencies: package_data = research_package(package, dependencies[package][]) dependencies[package].update(package_data) return OrderedDict( [(package, dependencies[package]) for package in sorted(dependencies.keys())] )
Returns a dictionary of information about pkg & its recursive deps. Given a string, which can be parsed as a requirement specifier, return a dictionary where each key is the name of pkg or one of its recursive dependencies, and each value is a dictionary returned by research_package. (No, it's not really a graph.)
21,362
def start(self, ccallbacks=None): self.__manage_g = gevent.spawn(self.__manage_connections, ccallbacks) self.__ready_ev.wait()
Establish and maintain connections.
21,363
def add_resolved_requirements(self, reqs, platforms=None): distributions = self._resolve_distributions_by_platform(reqs, platforms=platforms) locations = set() for platform, dists in distributions.items(): for dist in dists: if dist.location not in locations: self._log.debug(.format(os.path.basename(dist.location))) self.add_distribution(dist) locations.add(dist.location)
Multi-platform dependency resolution for PEX files. :param builder: Dump the requirements into this builder. :param interpreter: The :class:`PythonInterpreter` to resolve requirements for. :param reqs: A list of :class:`PythonRequirement` to resolve. :param log: Use this logger. :param platforms: A list of :class:`Platform`s to resolve requirements for. Defaults to the platforms specified by PythonSetup.
21,364
def get_code(self, *args, **kwargs): callback = self._commands[args[0]] source = _inspect.getsourcelines(callback)[0] return "\n" + "".join(source)
get the python source code from callback
21,365
def _check_table(self): cursor = self._db.execute("PRAGMA table_info(%s)"%self.table) lines = cursor.fetchall() if not lines: return True types = {} keys = [] for line in lines: keys.append(line[1]) types[line[1]] = line[2] if self._keys != keys: self.log.warn() return False for key in self._keys: if types[key] != self._types[key]: self.log.warn( %(key,types[key],self._types[key]) ) return False return True
Ensure that an incorrect table doesn't exist If a bad (old) table does exist, return False
21,366
def db_type(self, connection): conn_module = type(connection).__module__ if "mysql" in conn_module: return "bigint AUTO_INCREMENT" elif "postgres" in conn_module: return "bigserial" return super(BigAutoField, self).db_type(connection)
The type of the field to insert into the database.
21,367
def build(self): pkg_security([self.name]) self.error_uns() if self.FAULT: print("") self.msg.template(78) print("| Package {0} {1} {2} {3}".format(self.prgnam, self.red, self.FAULT, self.endc)) self.msg.template(78) else: sources = [] if not os.path.exists(self.meta.build_path): os.makedirs(self.meta.build_path) if not os.path.exists(self._SOURCES): os.makedirs(self._SOURCES) os.chdir(self.meta.build_path) Download(self.meta.build_path, self.sbo_dwn.split(), repo="sbo").start() Download(self._SOURCES, self.source_dwn, repo="sbo").start() script = self.sbo_dwn.split("/")[-1] for src in self.source_dwn: sources.append(src.split("/")[-1]) BuildPackage(script, sources, self.meta.build_path, auto=False).build() slack_package(self.prgnam)
Only build and create Slackware package
21,368
def detect_voice(self, prob_detect_voice=0.5): assert self.frame_rate in (48000, 32000, 16000, 8000), "Try resampling to one of the allowed frame rates." assert self.sample_width == 2, "Try resampling to 16 bit." assert self.channels == 1, "Try resampling to one channel." class model_class: def __init__(self, aggressiveness): self.v = webrtcvad.Vad(int(aggressiveness)) def predict(self, vector): if self.v.is_speech(vector.raw_data, vector.frame_rate): return 1 else: return 0 model = model_class(aggressiveness=2) pyesno = 0.3 pnoyes = 0.2 p_realyes_outputyes = 0.4 ret.append(t) return ret
Returns self as a list of tuples: [('v', voiced segment), ('u', unvoiced segment), (etc.)] The overall order of the AudioSegment is preserved. :param prob_detect_voice: The raw probability that any random 20ms window of the audio file contains voice. :returns: The described list.
21,369
def redis_key(cls, key): keyspace = cls.keyspace tpl = cls.keyspace_template key = "%s" % key if keyspace is None else tpl % (keyspace, key) return cls.keyparse.encode(key)
Get the key we pass to redis. If no namespace is declared, it will use the class name. :param key: str the name of the redis key :return: str
21,370
def previous(self, day_of_week=None): if day_of_week is None: day_of_week = self.day_of_week if day_of_week < SUNDAY or day_of_week > SATURDAY: raise ValueError("Invalid day of week") dt = self.subtract(days=1) while dt.day_of_week != day_of_week: dt = dt.subtract(days=1) return dt
Modify to the previous occurrence of a given day of the week. If no day_of_week is provided, modify to the previous occurrence of the current day of the week. Use the supplied consts to indicate the desired day_of_week, ex. pendulum.MONDAY. :param day_of_week: The previous day of week to reset to. :type day_of_week: int or None :rtype: Date
21,371
def cropped(self, t0, t1): if abs(self.delta*(t1 - t0)) <= 180: new_large_arc = 0 else: new_large_arc = 1 return Arc(self.point(t0), radius=self.radius, rotation=self.rotation, large_arc=new_large_arc, sweep=self.sweep, end=self.point(t1), autoscale_radius=self.autoscale_radius)
returns a cropped copy of this segment which starts at self.point(t0) and ends at self.point(t1).
21,372
def add_name(self, tax_id, tax_name, source_name=None, source_id=None, name_class=, is_primary=False, is_classified=None, execute=True, **ignored): assert isinstance(is_primary, bool) assert is_classified in {None, True, False} if ignored: log.info(.format(str(ignored))) source_id = self.get_source(source_id, source_name)[] statements = [] if is_primary: statements.append(self.names.update( whereclause=self.names.c.tax_id == tax_id, values={: False})) statements.append(self.names.insert().values( tax_id=tax_id, tax_name=tax_name, source_id=source_id, is_primary=is_primary, name_class=name_class, is_classified=is_classified)) if execute: self.execute(statements) else: return statements
Add a record to the names table corresponding to ``tax_id``. Arguments are as follows: - tax_id (string, required) - tax_name (string, required) *one* of the following are required: - source_id (int or string coercable to int) - source_name (string) ``source_id`` or ``source_name`` must identify an existing record in table "source". The following are optional: - name_class (string, default 'synonym') - is_primary (bool, see below) - is_classified (bool or None, default None) ``is_primary`` is optional and defaults to True if only one name is provided; otherwise is_primary must be True for exactly one name (and is optional in others).
21,373
def play(self, sgffile): "Play a game" global verbose if verbose >= 1: print "Setting boardsize and komi for black\n" self.blackplayer.boardsize(self.size) self.blackplayer.komi(self.komi) if verbose >= 1: print "Setting boardsize and komi for white\n" self.whiteplayer.boardsize(self.size) self.whiteplayer.komi(self.komi) self.handicap_stones = [] if self.endgamefile == "": if self.handicap < 2: self.first_to_play = "B" else: self.handicap_stones = self.blackplayer.handicap(self.handicap, self.handicap_type) for stone in self.handicap_stones: self.whiteplayer.black(stone) self.first_to_play = "W" else: self.blackplayer.loadsgf(self.endgamefile, self.endgame_start) self.blackplayer.set_random_seed("0") self.whiteplayer.loadsgf(self.endgamefile, self.endgame_start) self.whiteplayer.set_random_seed("0") if self.blackplayer.is_known_command("list_stones"): self.get_position_from_engine(self.blackplayer) elif self.whiteplayer.is_known_command("list_stones"): self.get_position_from_engine(self.whiteplayer) to_play = self.first_to_play self.moves = [] passes = 0 won_by_resignation = "" while passes < 2: if to_play == "B": move = self.blackplayer.genmove("black") if move[:5] == "ERROR": sys.exit(1) if move[:6] == "resign": if verbose >= 1: print "Black resigns" won_by_resignation = "W+Resign" break else: self.moves.append(move) if string.lower(move[:4]) == "pass": passes = passes + 1 if verbose >= 1: print "Black passes" else: passes = 0 self.whiteplayer.black(move) if verbose >= 1: print "Black plays " + move to_play = "W" else: move = self.whiteplayer.genmove("white") if move[:5] == "ERROR": sys.exit(1) if move[:6] == "resign": if verbose >= 1: print "White resigns" won_by_resignation = "B+Resign" break else: self.moves.append(move) if string.lower(move[:4]) == "pass": passes = passes + 1 if verbose >= 1: print "White passes" else: passes = 0 self.blackplayer.white(move) if verbose >= 1: print "White plays " + move to_play = "B" if verbose >= 2: print self.whiteplayer.showboard() + "\n" if won_by_resignation == "": self.resultw = self.whiteplayer.final_score() self.resultb = self.blackplayer.final_score() else: self.resultw = won_by_resignation; self.resultb = won_by_resignation; if sgffile != "": self.writesgf(sgffile)
Play a game
21,374
def apply_range_set(self, hist: Hist) -> None: axis = self.axis(hist) assert not isinstance(self.min_val, float) assert not isinstance(self.max_val, float) min_val = self.min_val(axis) max_val = self.max_val(axis) self.axis(hist).SetRange(min_val, max_val)
Apply the associated range set to the axis of a given hist. Note: The min and max values should be bins, not user ranges! For more, see the binning explanation in ``apply_func_to_find_bin(...)``. Args: hist: Histogram to which the axis range restriction should be applied. Returns: None. The range is set on the axis.
21,375
def _get_support_sound_mode(self): if self._receiver_type == AVR_X_2016.type: return self._get_support_sound_mode_avr_2016() else: return self._get_support_sound_mode_avr()
Get if sound mode is supported from device. Method executes the method for the current receiver type.
21,376
def delete_lambda_deprecated(awsclient, function_name, s3_event_sources=[], time_event_sources=[], delete_logs=False): unwire_deprecated(awsclient, function_name, s3_event_sources=s3_event_sources, time_event_sources=time_event_sources, alias_name=ALIAS_NAME) client_lambda = awsclient.get_client() response = client_lambda.delete_function(FunctionName=function_name) if delete_logs: log_group_name = % function_name delete_log_group(awsclient, log_group_name) log.info(json2table(response)) return 0
Deprecated: please use delete_lambda! :param awsclient: :param function_name: :param s3_event_sources: :param time_event_sources: :param delete_logs: :return: exit_code
21,377
def gblocks(self, new_path = None, seq_type = or ): if new_path is None: final = self.__class__(new_temp_path()) else: final = self.__class__(new_path) orig_name_to_temp = {seq.description: + str(i) for i,seq in enumerate(self)} temp_name_to_orig = {v: k for k, v in orig_name_to_temp.items()} temp_fasta = self.rename_sequences(orig_name_to_temp) if seq_type == : t_option = "-t=d" if seq_type == : t_option = "-t=p" result = sh.gblocks91(temp_fasta.path, t_option, , "-b4=3", "-b3=20", "-b5=a", _ok_code=[0,1]) created_file = temp_fasta.path + assert os.path.exists(created_file) if "Execution terminated" in result.stdout: raise Exception("gblocks crashed again.") temp_fasta.rename_sequences(temp_name_to_orig, final) return final
Apply the gblocks filtering algorithm to the alignment. See http://molevol.cmima.csic.es/castresana/Gblocks/Gblocks_documentation.html Need to rename all sequences, because it will complain with long names.
21,378
def next(self): while True: if not self._resp: self._start() if self._stop: raise StopIteration skip, data = self._process_data(next_(self._lines)) if not skip: break return data
Handles the iteration by pulling the next line out of the stream, attempting to convert the response to JSON if necessary. :returns: Data representing what was seen in the feed
21,379
def getobjectsize(self, window_name, object_name=None): if not object_name: handle, name, app = self._get_window_handle(window_name) else: handle = self._get_object_handle(window_name, object_name) return self._getobjectsize(handle)
Get object size @param window_name: Window name to look for, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to look for, either full name, LDTP's name convention, or a Unix glob. Or menu heirarchy @type object_name: string @return: x, y, width, height on success. @rtype: list
21,380
def revert(self, revision_id): if self.model is None: raise MissingModelError() revision = self.revisions[revision_id] with db.session.begin_nested(): before_record_revert.send( current_app._get_current_object(), record=self ) self.model.json = dict(revision) db.session.merge(self.model) after_record_revert.send( current_app._get_current_object(), record=self ) return self.__class__(self.model.json, model=self.model)
Revert the record to a specific revision. #. Send a signal :data:`invenio_records.signals.before_record_revert` with the current record as parameter. #. Revert the record to the revision id passed as parameter. #. Send a signal :data:`invenio_records.signals.after_record_revert` with the reverted record as parameter. :param revision_id: Specify the record revision id :returns: The :class:`Record` instance corresponding to the revision id
21,381
def potential_from_grid(self, grid): potential_grid = quad_grid(self.potential_func, 0.0, 1.0, grid, args=(self.axis_ratio, self.kappa_s, self.scale_radius), epsrel=1.49e-5)[0] return potential_grid
Calculate the potential at a given set of arc-second gridded coordinates. Parameters ---------- grid : grids.RegularGrid The grid of (y,x) arc-second coordinates the deflection angles are computed on.
21,382
def add(self, labels, value): if type(value) not in (float, int): raise TypeError("Summary only works with digits (int, float)") with mutex: try: e = self.get_value(labels) except KeyError: e = quantile.Estimator(*self.__class__.DEFAULT_INVARIANTS) self.set_value(labels, e) e.observe(float(value))
Add adds a single observation to the summary.
21,383
def solution_to_array(solution, events, slots): array = np.zeros((len(events), len(slots)), dtype=np.int8) for item in solution: array[item[0], item[1]] = 1 return array
Convert a schedule from solution to array form Parameters ---------- solution : list or tuple of tuples of event index and slot index for each scheduled item events : list or tuple of :py:class:`resources.Event` instances slots : list or tuple of :py:class:`resources.Slot` instances Returns ------- np.array An E by S array (X) where E is the number of events and S the number of slots. Xij is 1 if event i is scheduled in slot j and zero otherwise Example ------- For For 3 events, 7 slots and the solution:: [(0, 1), (1, 4), (2, 5)] The resulting array would be:: [[0, 1, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 1, 0]]
21,384
def amplification_type(self, channels=None): if channels is None: channels = self._channels channels = self._name_to_index(channels) if hasattr(channels, ) \ and not isinstance(channels, six.string_types): return [self._amplification_type[ch] for ch in channels] else: return self._amplification_type[channels]
Get the amplification type used for the specified channel(s). Each channel uses one of two amplification types: linear or logarithmic. This function returns, for each channel, a tuple of two numbers, in which the first number indicates the number of decades covered by the logarithmic amplifier, and the second indicates the linear value corresponding to the channel value zero. If the first value is zero, the amplifier used is linear The amplification type for channel "n" is extracted from the required $PnE parameter. Parameters ---------- channels : int, str, list of int, list of str Channel(s) for which to get the amplification type. If None, return a list with the amplification type of all channels, in the order of ``FCSData.channels``. Return ------ tuple, or list of tuples The amplification type of the specified channel(s). This is reported as a tuple, in which the first element indicates how many decades the logarithmic amplifier covers, and the second indicates the linear value that corresponds to a channel value of zero. If the first element is zero, the amplification type is linear.
21,385
def get_stats_item(self, item): if isinstance(self.stats, dict): try: return self._json_dumps({item: self.stats[item]}) except KeyError as e: logger.error("Cannot get item {} ({})".format(item, e)) return None elif isinstance(self.stats, list): try: return self._json_dumps({item: list(map(itemgetter(item), self.stats))}) except (KeyError, ValueError) as e: logger.error("Cannot get item {} ({})".format(item, e)) return None else: return None
Return the stats object for a specific item in JSON format. Stats should be a list of dict (processlist, network...)
21,386
def print_table(self, stream=sys.stdout, filter_function=None): print(self.to_table(filter_function=filter_function), file=stream)
A pretty ASCII printer for the periodic table, based on some filter_function. Args: stream: file-like object filter_function: A filtering function that take a Pseudo as input and returns a boolean. For example, setting filter_function = lambda p: p.Z_val > 2 will print a periodic table containing only pseudos with Z_val > 2.
21,387
def bfs(self, root = None, display = None): if root == None: root = self.root if display == None: display = self.attr[] self.traverse(root, display, Queue())
API: bfs(self, root = None, display = None) Description: Searches tree starting from node named root using breadth-first strategy if root argument is provided. Starts search from root node of the tree otherwise. Pre: Node indicated by root argument should exist. Input: root: Starting node name. display: Display argument.
21,388
def _mb_model(self, beta, mini_batch): Y = np.array(self.data[self.max_lag:]) sample = np.random.choice(len(Y), mini_batch, replace=False) Y = Y[sample] X = self.X[:, sample] z = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) return neural_network_tanh_mb(Y, X, z, self.units, self.layers, self.ar+len(self.X_names)), Y
Creates the structure of the model (model matrices etc) for mini batch model Parameters ---------- beta : np.ndarray Contains untransformed starting values for the latent variables mini_batch : int Mini batch size for the data sampling Returns ---------- mu : np.ndarray Contains the predicted values (location) for the time series Y : np.ndarray Contains the length-adjusted time series (accounting for lags)
21,389
def iter_node(node, name=, unknown=None, list=list, getattr=getattr, isinstance=isinstance, enumerate=enumerate, missing=NonExistent): fields = getattr(node, , None) if fields is not None: for name in fields: value = getattr(node, name, missing) if value is not missing: yield value, name if unknown is not None: unknown.update(set(vars(node)) - set(fields)) elif isinstance(node, list): for value in node: yield value, name
Iterates over an object: - If the object has a _fields attribute, it gets attributes in the order of this and returns name, value pairs. - Otherwise, if the object is a list instance, it returns name, value pairs for each item in the list, where the name is passed into this function (defaults to blank). - Can update an unknown set with information about attributes that do not exist in fields.
21,390
def name(self): name = self._platform_impl.get_process_name() if os.name == : try: cmdline = self.cmdline except AccessDenied: pass else: if cmdline: extended_name = os.path.basename(cmdline[0]) if extended_name.startswith(name): name = extended_name self._platform_impl._process_name = name return name
The process name.
21,391
def create_api_handler(self): try: self.github = github3.login(username=config.data[], password=config.data[]) except KeyError as e: raise config.NotConfigured(e) logger.info("ratelimit remaining: {}".format(self.github.ratelimit_remaining)) if hasattr(self.github, ): self.github.set_user_agent(.format(self.org_name, self.org_homepage)) try: self.org = self.github.organization(self.org_name) except github3.GitHubError: logger.error("Possibly the github ratelimit has been exceeded") logger.info("ratelimit: " + str(self.github.ratelimit_remaining))
Creates an api handler and sets it on self
21,392
def _find_free_location(self, free_locations, required_sectors=1, preferred=None): if preferred and all(free_locations[preferred:preferred+required_sectors]): return preferred i = 2 while i < len(free_locations): if all(free_locations[i:i+required_sectors]): break i += 1 return i
Given a list of booleans, find a list of <required_sectors> consecutive True values. If no such list is found, return length(free_locations). Assumes first two values are always False.
21,393
def on_path(self, new): self.name = basename(new) self.graph = self.editor_input.load()
Handle the file path changing.
21,394
def rpc_call(self, request, method=None, **payload): if not method or self.separator not in method: raise AssertionError("Wrong method name: {0}".format(method)) resource_name, method = method.split(self.separator, 1) if resource_name not in self.api.resources: raise AssertionError("Unknown method " + method) data = QueryDict(, mutable=True) data.update(payload.get(, dict())) data[] = payload.get() or request.GET.get( ) or request.GET.get() or for h, v in payload.get(, dict()).iteritems(): request.META["HTTP_%s" % h.upper().replace(, )] = v request.POST = request.PUT = request.GET = data delattr(request, ) request.method = method.upper() request.META[] = params = payload.pop(, dict()) response = self.api.call(resource_name, request, **params) if not isinstance(response, SerializedHttpResponse): return response if response[] in self._meta.emitters_dict: return HttpResponse(response.content, status=response.status_code) if response.status_code == 200: return response.response raise AssertionError(response.response)
Call REST API with RPC force. return object: a result
21,395
def merge_dicts(*dict_list): all_dicts = [] for ag in dict_list: if isinstance(ag, dict): all_dicts.append(ag) try: qitem = { k: v for d in all_dicts for k, v in d.items() } return qitem except Exception: return {}
Extract all of the dictionaries from this list, then merge them together
21,396
def copy_files(self): files = [u, u] this_dir = dirname(abspath(__file__)) for _file in files: sh.cp( .format(this_dir, _file), .format(self.book.local_path) ) if self.book.meta.rdf_path: sh.cp( self.book.meta.rdf_path, .format(self.book.local_path) ) if not in self.book.meta.subjects: if not self.book.meta.subjects: self.book.meta.metadata[] = [] self.book.meta.metadata[].append() self.save_meta()
Copy the LICENSE and CONTRIBUTING files to each folder repo Generate covers if needed. Dump the metadata.
21,397
def decode_nibbles(value): nibbles_with_flag = bytes_to_nibbles(value) flag = nibbles_with_flag[0] needs_terminator = flag in {HP_FLAG_2, HP_FLAG_2 + 1} is_odd_length = flag in {HP_FLAG_0 + 1, HP_FLAG_2 + 1} if is_odd_length: raw_nibbles = nibbles_with_flag[1:] else: raw_nibbles = nibbles_with_flag[2:] if needs_terminator: nibbles = add_nibbles_terminator(raw_nibbles) else: nibbles = raw_nibbles return nibbles
The inverse of the Hex Prefix function
21,398
def notify_created(room, event, user): tpl = get_plugin_template_module(, chatroom=room, event=event, user=user) _send(event, tpl)
Notifies about the creation of a chatroom. :param room: the chatroom :param event: the event :param user: the user performing the action
21,399
def validate_deprecation_semver(version_string, version_description): if version_string is None: raise MissingSemanticVersionError(.format(version_description)) if not isinstance(version_string, six.string_types): raise BadSemanticVersionError(.format(version_description)) try: v = Version(version_string) if len(v.base_version.split()) != 3: raise BadSemanticVersionError( .format(version_description, version_string)) if not v.is_prerelease: raise NonDevSemanticVersionError( .format(version_description, version_string)) return v except InvalidVersion as e: raise BadSemanticVersionError( .format(version_description, version_string, e))
Validates that version_string is a valid semver. If so, returns that semver. Raises an error otherwise. :param str version_string: A pantsbuild.pants version which affects some deprecated entity. :param str version_description: A string used in exception messages to describe what the `version_string` represents. :rtype: `packaging.version.Version` :raises DeprecationApplicationError: if the version_string parameter is invalid.