Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
386,300
def get_nested_streams(dmap): return list({s for dmap in get_nested_dmaps(dmap) for s in dmap.streams})
Recurses supplied DynamicMap to find all streams Args: dmap: DynamicMap to recurse to look for streams Returns: List of streams that were found
386,301
def create_folder(query, default_name=None, default_path=None): from gi.repository import Gtk from os.path import expanduser, dirname, join, exists, isdir from rafcon.core.storage.storage import STATEMACHINE_FILE from rafcon.gui.singleton import main_window_controller from rafcon.gui.runtime_config import global_runtime_config last_path = global_runtime_config.get_config_value(, "") if last_path and isdir(last_path) and not exists(join(last_path, STATEMACHINE_FILE)): pass elif last_path: last_path = dirname(last_path) else: last_path = expanduser() dialog = Gtk.FileChooserDialog(query, None, Gtk.FileChooserAction.CREATE_FOLDER, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL, Gtk.STOCK_SAVE, Gtk.ResponseType.OK)) dialog.set_default_response(Gtk.ResponseType.OK) if main_window_controller: dialog.set_transient_for(main_window_controller.view.get_top_widget()) dialog.set_current_folder(last_path) if default_name: dialog.set_current_name(default_name) dialog.set_show_hidden(False) add_library_root_path_to_shortcut_folders_of_dialog(dialog) response = dialog.run() if response != Gtk.ResponseType.OK: dialog.destroy() if default_path and default_name: default = os.path.join(default_path, default_name) if os.path.isdir(default): return default return None path = dialog.get_filename() dialog.destroy() if os.path.isdir(path): global_runtime_config.set_config_value(, path) return path return None
Shows a user dialog for folder creation A dialog is opened with the prompt `query`. The current path is set to the last path that was opened/created. The roots of all libraries is added to the list of shortcut folders. :param str query: Prompt asking the user for a specific folder :param str default_name: Default name of the folder to be created :param str default_path: Path in which the folder is created if the user doesn't specify a path :return: Path created by the user or `default_path`\`default_name` if no path was specified or None if none of the paths is valid :rtype: str
386,302
def get_line_flux(line_wave, wave, flux, **kwargs): return np.interp(line_wave, wave, flux, **kwargs)
Interpolated flux at a given wavelength (calls np.interp).
386,303
def get_certificate(): if os.path.exists(CERT_PATH): log(.format(CERT_PATH)) with open(CERT_PATH, ) as cert: full_cert = cert.read() begin_marker = "-----BEGIN CERTIFICATE-----" end_marker = "-----END CERTIFICATE-----" begin_index = full_cert.find(begin_marker) end_index = full_cert.rfind(end_marker) if end_index == -1 or begin_index == -1: raise RuntimeError("Certificate does not contain valid begin" " and end markers.") full_cert = full_cert[begin_index:(end_index + len(end_marker))] return full_cert else: log(, level=WARNING) return None
Read openvswitch certificate from disk
386,304
def _is_empty(self): block_items = list(self.iter_block_items()) if len(block_items) > 1: return False p = block_items[0] if len(p.r_lst) == 0: return True return False
True if this cell contains only a single empty ``<w:p>`` element.
386,305
def fwdl_status_output_fwdl_state(self, **kwargs): config = ET.Element("config") fwdl_status = ET.Element("fwdl_status") config = fwdl_status output = ET.SubElement(fwdl_status, "output") fwdl_state = ET.SubElement(output, "fwdl-state") fwdl_state.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
386,306
def dump(file_name, predictions=None, algo=None, verbose=0): dump_obj = {: predictions, : algo } pickle.dump(dump_obj, open(file_name, ), protocol=pickle.HIGHEST_PROTOCOL) if verbose: print(, file_name)
A basic wrapper around Pickle to serialize a list of prediction and/or an algorithm on drive. What is dumped is a dictionary with keys ``'predictions'`` and ``'algo'``. Args: file_name(str): The name (with full path) specifying where to dump the predictions. predictions(list of :obj:`Prediction\ <surprise.prediction_algorithms.predictions.Prediction>`): The predictions to dump. algo(:class:`Algorithm\ <surprise.prediction_algorithms.algo_base.AlgoBase>`, optional): The algorithm to dump. verbose(int): Level of verbosity. If ``1``, then a message indicates that the dumping went successfully. Default is ``0``.
386,307
def _processing_controller_status(self): LOG.info() while True: LOG.info(, len(self._queue)) time.sleep(self._report_interval) if active_count() != 5: LOG.critical( , active_count(), 5)
Report on the status of the Processing Block queue(s).
386,308
def start(name, call=None): if call != : raise SaltCloudSystemExit( ) log.info(, name) instanceId = _get_node(name)[] params = {: , : instanceId} result = query(params) return result
Start a node CLI Examples: .. code-block:: bash salt-cloud -a start myinstance
386,309
def state(self): if self._proto.HasField(): return yamcsManagement_pb2.ServiceState.Name(self._proto.state) return None
State of this service.
386,310
def w(msg, *args, **kwargs): return logging.log(WARN, msg, *args, **kwargs)
log a message at warn level;
386,311
def copyfileobj(fsrc, fdst, length=16*1024): while 1: buf = fsrc.read(length) if not buf: break fdst.write(buf)
copy data from file-like object fsrc to file-like object fdst
386,312
def refresh_oauth_credential(self): if self.session.token_type == auth.SERVER_TOKEN_TYPE: return credential = self.session.oauth2credential if credential.is_stale(): refresh_session = refresh_access_token(credential) self.session = refresh_session
Refresh session's OAuth 2.0 credentials if they are stale.
386,313
def E(self,*args,**kwargs): if not in kwargs or kwargs[] is None: try: pot= self._pot except AttributeError: raise AttributeError("Integrate orbit or specify pot=") if in kwargs and kwargs[] is None: kwargs.pop() else: pot= kwargs.pop() if isinstance(pot,Potential): thispot= RZToplanarPotential(pot) elif isinstance(pot,list): thispot= [] for p in pot: if isinstance(p,Potential): thispot.append(RZToplanarPotential(p)) else: thispot.append(p) else: thispot= pot if len(args) > 0: t= args[0] else: t= 0. thiso= self(*args,**kwargs) onet= (len(thiso.shape) == 1) if onet: return _evaluateplanarPotentials(thispot,thiso[0], t=t)\ +thiso[1]**2./2.\ +thiso[2]**2./2. else: return nu.array([_evaluateplanarPotentials(thispot,thiso[0,ii], t=t[ii])\ +thiso[1,ii]**2./2.\ +thiso[2,ii]**2./2. for ii in range(len(t))])
NAME: E PURPOSE: calculate the energy INPUT: t - (optional) time at which to get the radius pot= potential instance or list of such instances OUTPUT: energy HISTORY: 2010-09-15 - Written - Bovy (NYU) 2011-04-18 - Added t - Bovy (NYU)
386,314
def add_callback(self, name, func): if name == : events = [] def callback(_conn_string, _conn_id, _name, event): func(self.id, event, event.get(, 60)) elif name == : events = [, ] def callback(_conn_string, conn_id, _name, event): func(conn_id, event) elif name == : events = [] def callback(_conn_string, conn_id, _name, event): func(conn_id, event) elif name == : events = [] def callback(_conn_string, conn_id, _name, _event): func(self.id, conn_id) else: raise ArgumentError("Unknown callback type {}".format(name)) self._adapter.register_monitor([None], events, callback)
Add a callback when device events happen. Args: name (str): currently support 'on_scan' and 'on_disconnect' func (callable): the function that should be called
386,315
def get_account_tokens(self, address): cur = self.db.cursor() return namedb_get_account_tokens(cur, address)
Get the list of tokens that this address owns
386,316
def push(self, undoObj): if not isinstance(undoObj, QtmacsUndoCommand): raise QtmacsArgumentError(, , inspect.stack()[0][3]) self._wasUndo = False self._push(undoObj)
Add ``undoObj`` command to stack and run its ``commit`` method. |Args| * ``undoObj`` (**QtmacsUndoCommand**): the new command object. |Returns| * **None** |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
386,317
async def fetch_lightpad(self, lpid): url = "https://production.plum.technology/v2/getLightpad" data = {"lpid": lpid} return await self.__post(url, data)
Lookup details for a given lightpad
386,318
def Serialize(self, writer): super(Header, self).Serialize(writer) writer.WriteByte(0)
Serialize full object. Args: writer (neo.IO.BinaryWriter):
386,319
def filter_slaves(selfie, slaves): return [(s[], s[]) for s in slaves if not s[] and not s[] and s[] == ]
Remove slaves that are in an ODOWN or SDOWN state also remove slaves that do not have 'ok' master-link-status
386,320
def lookup_field_label(self, context, field, default=None): default = None for form_field in self.form: if form_field.name == field: default = form_field.label break return super(SmartFormMixin, self).lookup_field_label(context, field, default=default)
Figures out what the field label should be for the passed in field name. We overload this so as to use our form to see if there is label set there. If so then we'll pass that as the default instead of having our parent derive the field from the name.
386,321
def custom_background_code(): while True: logger.info("Block %s / %s", str(Blockchain.Default().Height), str(Blockchain.Default().HeaderHeight)) sleep(15)
Custom code run in a background thread. Prints the current block height. This function is run in a daemonized thread, which means it can be instantly killed at any moment, whenever the main thread quits. If you need more safety, don't use a daemonized thread and handle exiting this thread in another way (eg. with signals and events).
386,322
def compose(f: Callable[[Any], Monad], g: Callable[[Any], Monad]) -> Callable[[Any], Monad]: r return lambda x: g(x).bind(f)
r"""Monadic compose function. Right-to-left Kleisli composition of two monadic functions. (<=<) :: Monad m => (b -> m c) -> (a -> m b) -> a -> m c f <=< g = \x -> g x >>= f
386,323
def get_critical_path_timings(self): setup_workunit = WorkUnitLabel.SETUP.lower() transitive_dependencies = dict() for goal_info in self._sorted_goal_infos: deps = transitive_dependencies.setdefault(goal_info.goal.name, set()) for dep in goal_info.goal_dependencies: deps.add(dep.name) deps.update(transitive_dependencies.get(dep.name)) deps.add(setup_workunit) raw_timings = dict() for entry in self.cumulative_timings.get_all(): raw_timings[entry["label"]] = entry["timing"] critical_path_timings = AggregatedTimings() def add_to_timings(goal, dep): tracking_label = get_label(goal) timing_label = get_label(dep) critical_path_timings.add_timing(tracking_label, raw_timings.get(timing_label, 0.0)) def get_label(dep): return "{}:{}".format(RunTracker.DEFAULT_ROOT_NAME, dep) add_to_timings(setup_workunit, setup_workunit) for goal, deps in transitive_dependencies.items(): add_to_timings(goal, goal) for dep in deps: add_to_timings(goal, dep) return critical_path_timings
Get the cumulative timings of each goal and all of the goals it (transitively) depended on.
386,324
def create_response_dic(self): dic = {} for scope in self.scopes: if scope in self._scopes_registered(): dic.update(getattr(self, + scope)()) dic = self._clean_dic(dic) return dic
Generate the dic that will be jsonify. Checking scopes given vs registered. Returns a dic.
386,325
def getTypeWidth(self, dtype: "HdlType", do_eval=False) -> Tuple[int, str, bool]: raise NotImplementedError( "Implement this method in your HdlType classes")
:return: tuple (current value of width, string of value (can be ID or int), Flag which specifies if width of signal is locked or can be changed by parameter)
386,326
def set_hostname(hostname=None, deploy=False): ** if not hostname: raise CommandExecutionError("Hostname option must not be none.") ret = {} query = {: , : , : localhost.localdomain\, : .format(hostname)} ret.update(__proxy__[](query)) if deploy is True: ret.update(commit()) return ret
Set the hostname of the Palo Alto proxy minion. A commit will be required before this is processed. CLI Example: Args: hostname (str): The hostname to set deploy (bool): If true then commit the full candidate configuration, if false only set pending change. .. code-block:: bash salt '*' panos.set_hostname newhostname salt '*' panos.set_hostname newhostname deploy=True
386,327
def _delete(self): collection = JSONClientValidated(, collection=, runtime=self._runtime) collection.delete_one({: ObjectId(self.get_id().get_identifier())})
Deletes this AssessmentSection from database. Will be called by AssessmentTaken._delete() for clean-up purposes.
386,328
def run_license_checker(config_path): whitelist_licenses = _get_whitelist_licenses(config_path) table = PrintTable(ROW_HEADERS) warnings = [] for pkg in _get_packages(): allowed = pkg.license in whitelist_licenses table.add_row((pkg.name, pkg.version, pkg.license, str(allowed))) if not allowed: warnings.append(pkg) print(table) print(.format(len(warnings)))
Generate table of installed packages and check for license warnings based off user defined restricted license values. :param config_path: str :return:
386,329
def create_unihan_table(columns, metadata): if TABLE_NAME not in metadata.tables: table = Table(TABLE_NAME, metadata) table.append_column(Column(, String(12), primary_key=True)) table.append_column(Column(, String(12), primary_key=True)) for column_name in columns: col = Column(column_name, String(256), nullable=True) table.append_column(col) return table else: return Table(TABLE_NAME, metadata)
Create table and return :class:`sqlalchemy.Table`. Parameters ---------- columns : list columns for table, e.g. ``['kDefinition', 'kCantonese']`` metadata : :class:`sqlalchemy.schema.MetaData` Instance of sqlalchemy metadata Returns ------- :class:`sqlalchemy.schema.Table` : Newly created table with columns and index.
386,330
def angleOfView2(x,y, b, x0=None,y0=None): if x0 is None: x0 = x[-1,-1] if y0 is None: y0 = y[-1,-1] return np.cos( np.arctan( np.sqrt( ( (x-x0/2)**2+(y-y0/2)**2 ) ) /b ) )
Corrected AngleOfView equation by Koentges (via mail from 14/02/2017) b --> distance between the camera and the module in m x0 --> viewable with in the module plane of the camera in m y0 --> viewable height in the module plane of the camera in m x,y --> pixel position [m] from top left
386,331
def verify(info, directory_path): base_path = os.path.join(directory_path, info[]) if in info: if os.stat(base_path).st_size != info[]: return False getfile = lambda: open(base_path, ) else: assert in info, for f in info[]: p = os.path.join(base_path, *f[]) if os.stat(p).st_size != f[]: return False getfile = lambda: ConcatenatedFile(base_path, info[]) with getfile() as f: return compare_checksum(info, f)
Return True if the checksum values in the torrent file match the computed checksum values of downloaded file(s) in the directory and if each file has the correct length as specified in the torrent file.
386,332
def run(self, queue): time.sleep(random.random()) obj = self.get_object() obj.fullname.hset( % tuple(obj.hmget(, ))) result = % (obj.pk.get(), obj.fullname.hget()) self.result.set(result) return result
Create the fullname, and store a a message serving as result in the job
386,333
def to_dict(self, index=True, ordered=False): result = OrderedDict() if ordered else dict() if index: result.update({self._index_name: self._index}) if ordered: data_dict = [(column, self._data[i]) for i, column in enumerate(self._columns)] else: data_dict = {column: self._data[i] for i, column in enumerate(self._columns)} result.update(data_dict) return result
Returns a dict where the keys are the column names and the values are lists of the values for that column. :param index: If True then include the index in the dict with the index_name as the key :param ordered: If True then return an OrderedDict() to preserve the order of the columns in the DataFrame :return: dict or OrderedDict()
386,334
def expected_counts_stationary(T, n, mu=None): r if n <= 0: EC = np.zeros(T.shape) return EC else: if mu is None: mu = stationary_distribution(T) EC = n * mu[:, np.newaxis] * T return EC
r"""Expected transition counts for Markov chain in equilibrium. Since mu is stationary for T we have .. math:: E(C^{(N)})=N diag(mu)*T. Parameters ---------- T : (M, M) ndarray Transition matrix. n : int Number of steps for chain. mu : (M,) ndarray (optional) Stationary distribution for T. If mu is not specified it will be computed via diagonalization of T. Returns ------- EC : numpy array, shape=(n,n) Expected value for transition counts after a propagation of n steps.
386,335
def _set_preprovision(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=preprovision.preprovision, is_container=, presence=False, yang_name="preprovision", rest_name="preprovision", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: u, u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__preprovision = t if hasattr(self, ): self._set()
Setter method for preprovision, mapped from YANG variable /preprovision (container) If this variable is read-only (config: false) in the source YANG file, then _set_preprovision is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_preprovision() directly. YANG Description: Preprovision profile
386,336
def _find_single(self, match_class, **keywds): self._logger.debug() start_time = timeit.default_timer() norm_keywds = self.__normalize_args(**keywds) decl_matcher = self.__create_matcher(match_class, **norm_keywds) dtype = self.__findout_decl_type(match_class, **norm_keywds) recursive_ = self.__findout_recursive(**norm_keywds) decls = self.__findout_range(norm_keywds[], dtype, recursive_) found = matcher.get_single(decl_matcher, decls, False) self._logger.debug( , (timeit.default_timer() - start_time)) return found
implementation details
386,337
def _group_by_batches(samples, check_fn): batch_groups = collections.defaultdict(list) extras = [] for data in [x[0] for x in samples]: if check_fn(data): batch_groups[multi.get_batch_for_key(data)].append(data) else: extras.append([data]) return batch_groups, extras
Group calls by batches, processing families together during ensemble calling.
386,338
def execute_task(bufs): user_ns = locals() user_ns.update({: __builtins__}) f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False) prefix = "parsl_" fname = prefix + "f" argname = prefix + "args" kwargname = prefix + "kwargs" resultname = prefix + "result" user_ns.update({fname: f, argname: args, kwargname: kwargs, resultname: resultname}) code = "{0} = {1}(*{2}, **{3})".format(resultname, fname, argname, kwargname) try: exec(code, user_ns, user_ns) except Exception as e: logger.warning("Caught exception; will raise it: {}".format(e), exc_info=True) raise e else: return user_ns.get(resultname)
Deserialize the buffer and execute the task. Returns the result or throws exception.
386,339
def backward_word_extend_selection(self, e): u self.l_buffer.backward_word_extend_selection(self.argument_reset) self.finalize()
u"""Move back to the start of the current or previous word. Words are composed of letters and digits.
386,340
def leave_room(self, sid, room, namespace=None): return self.server.leave_room(sid, room, namespace=namespace or self.namespace)
Leave a room. The only difference with the :func:`socketio.Server.leave_room` method is that when the ``namespace`` argument is not given the namespace associated with the class is used.
386,341
def calculate_mean(samples, weights): r assert len(samples) == len(weights), "The number of samples (got %i) must equal the number of weights (got %i)." % (len(samples),len(weights)) return _np.average(samples, axis=0, weights=weights)
r'''Calculate the mean of weighted samples (like the output of an importance-sampling run). :param samples: Matrix-like numpy array; the samples to be used. :param weights: Vector-like numpy array; the (unnormalized) importance weights.
386,342
def negate(arg): op = arg.op() if hasattr(op, ): result = op.negate() else: result = ops.Negate(arg) return result.to_expr()
Negate a numeric expression Parameters ---------- arg : numeric value expression Returns ------- negated : type of caller
386,343
def __process_by_ccore(self): cure_data_pointer = wrapper.cure_algorithm(self.__pointer_data, self.__number_cluster, self.__number_represent_points, self.__compression) self.__clusters = wrapper.cure_get_clusters(cure_data_pointer) self.__representors = wrapper.cure_get_representors(cure_data_pointer) self.__means = wrapper.cure_get_means(cure_data_pointer) wrapper.cure_data_destroy(cure_data_pointer)
! @brief Performs cluster analysis using CCORE (C/C++ part of pyclustering library).
386,344
def binary_operation_math(self, rule, left, right, **kwargs): if isinstance(left, NumberRule) and isinstance(right, NumberRule): return self._calculate_operation_math(rule, left, right) return self._compile_operation_rule( rule, left, right, MathBinOpRule )
Implementation of :py:func:`pynspect.traversers.RuleTreeTraverser.binary_operation_math` interface.
386,345
def btc_script_classify(scriptpubkey, private_key_info=None): if scriptpubkey.startswith("76a914") and scriptpubkey.endswith("88ac") and len(scriptpubkey) == 50: return elif scriptpubkey.startswith("a914") and scriptpubkey.endswith("87") and len(scriptpubkey) == 46: if private_key_info: if btc_is_singlesig_segwit(private_key_info): return elif btc_is_multisig_segwit(private_key_info): return return elif scriptpubkey.startswith() and len(scriptpubkey) == 44: return elif scriptpubkey.startswith() and len(scriptpubkey) == 68: return script_tokens = btc_script_deserialize(scriptpubkey) if len(script_tokens) == 0: return None if script_tokens[0] == OPCODE_VALUES[]: return "nulldata" elif script_tokens[-1] == OPCODE_VALUES[]: return "multisig" elif len(script_tokens) == 2 and script_tokens[-1] == OPCODE_VALUES["OP_CHECKSIG"]: return "p2pk" return None
Classify a scriptpubkey, optionally also using the private key info that will generate the corresponding scriptsig/witness Return None if not known (nonstandard)
386,346
def handle(self, t_input: inference.TranslatorInput, t_output: inference.TranslatorOutput, t_walltime: float = 0.): self.stream.write("{:.3f}\t{}\t{}\n".format(t_output.score, C.TOKEN_SEPARATOR.join(t_input.tokens), t_output.translation)) self.stream.flush()
:param t_input: Translator input. :param t_output: Translator output. :param t_walltime: Total walltime for translation.
386,347
def unapply_top_patch(self, force=False): self._check(force) patch = self.db.top_patch() self._unapply_patch(patch) self.db.save() self.unapplied(self.db.top_patch())
Unapply top patch
386,348
def wb_db020(self, value=None): if value is not None: try: value = float(value) except ValueError: raise ValueError( .format(value)) self._wb_db020 = value
Corresponds to IDD Field `wb_db020` mean coincident wet-bulb temperature to Dry-bulb temperature corresponding to 2.0% annual cumulative frequency of occurrence (warm conditions) Args: value (float): value for IDD Field `wb_db020` Unit: C if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
386,349
def remove_objects_not_in(self, objects_to_keep, verbosity): for class_ in objects_to_keep.keys(): current = class_.objects.all() current_ids = set([x.pk for x in current]) keep_ids = set([x.pk for x in objects_to_keep[class_]]) remove_these_ones = current_ids.difference(keep_ids) if remove_these_ones: for obj in current: if obj.pk in remove_these_ones: obj.delete() if verbosity >= 2: print("Deleted object: %s" % six.u(obj)) if verbosity > 0 and remove_these_ones: num_deleted = len(remove_these_ones) if num_deleted > 1: type_deleted = six.u(class_._meta.verbose_name_plural) else: type_deleted = six.u(class_._meta.verbose_name) print("Deleted %s %s" % (str(num_deleted), type_deleted))
Delete all the objects in the database that are not in objects_to_keep. - objects_to_keep: A map where the keys are classes, and the values are a set of the objects of that class we should keep.
386,350
def setParameter(self, parameterName, index, parameterValue): if hasattr(self, parameterName): setattr(self, parameterName, parameterValue) else: raise Exception("Unknown parameter: " + parameterName)
Set the value of a Spec parameter. Most parameters are handled automatically by PyRegion's parameter set mechanism. The ones that need special treatment are explicitly handled here.
386,351
def list_runtime(self, scope="", skip_policy_evaluation=True, start_time=None, end_time=None): host.domain = "example.com" and container.image != "alpine:latest" containers = { : scope, : skip_policy_evaluation } if start_time or end_time: containers[] = {} containers[][] = int(start_time * 100000) if start_time else 0 end_time = end_time if end_time else time.time() containers[][] = int(end_time * 1000000) url = self.url + data = json.dumps(containers) res = requests.post(url, headers=self.hdrs, data=data, verify=self.ssl_verify) if not self._checkResponse(res): return [False, self.lasterr] return [True, res.json()]
**Description** List runtime containers **Arguments** - scope: An AND-composed string of predicates that selects the scope in which the alert will be applied. (like: 'host.domain = "example.com" and container.image != "alpine:latest"') - skip_policy_evaluation: If true, no policy evaluations will be triggered for the images. - start_time: Start of the time range (integer of unix time). - end_time: End of the time range (integer of unix time). **Success Return Value** A JSON object representing the list of runtime containers.
386,352
def send_vdp_query_msg(self, mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data): if not self.is_ncb: LOG.error("EVB cannot be set on NB") return vdp_key_str = self.construct_vdp_dict(mode, mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, None, None) if len(vdp_key_str) == 0: LOG.error("NULL List") return reply = self.run_vdptool(["-t", "-i", self.port_name, "-R", "-V", mode, "-c", vdp_key_str[], "-c", vdp_key_str[], "-c", vdp_key_str[], "-c", vdp_key_str[], "-c", vdp_key_str[]]) return reply
Constructs and Sends the VDP Query Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param mode: Associate or De-associate :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param vsiid: VSI value :param filter_frmt: Filter Format :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return reply: Reply from vdptool
386,353
def decompressBWTPoolProcess(tup): (inputDir, outputDir, startIndex, endIndex) = tup if startIndex == endIndex: return True outputBwt[startIndex:endIndex] = msbwt.getBWTRange(startIndex, endIndex) return True
Individual process for decompression
386,354
def parse_gzip(file_path): newDecoder = MMTFDecoder() newDecoder.decode_data(_unpack(gzip.open(file_path, "rb"))) return newDecoder
Return a decoded API to the data from a file path. File is gzip compressed. :param file_path: the input file path. Data is gzip compressed. :return an API to decoded data
386,355
def artist_create(self, name, other_names_comma=None, group_name=None, url_string=None, body=None): params = { : name, : other_names_comma, : group_name, : url_string, : body, } return self.get(, params, method=, auth=True)
Function to create an artist (Requires login) (UNTESTED). Parameters: name (str): other_names_comma (str): List of alternative names for this artist, comma delimited. group_name (str): The name of the group this artist belongs to. url_string (str): List of URLs associated with this artist, whitespace or newline delimited. body (str): DText that will be used to create a wiki entry at the same time.
386,356
def main(doc, timeout, size, debug, allow_codes, whitelist): t0 = time.time() links = [i[0] for i in LINK_RE.findall(doc.read())] request_urls = [] counts = {} for link in links: if is_static(link): STATICS.append(link) continue if link in counts: counts[link] += 1 continue else: counts[link] = 1 parsed = urlparse(link) if not parsed.scheme: link = .format(link) if whitelist: exists = [i for i in whitelist if i in parsed.netloc] if exists: WHITELISTED.append(link) continue request_urls.append(link) counts_keys = counts.keys() DUPES.extend([(i, counts[i]) for i in counts_keys if counts[i] > 1]) requests = (grequests.head(u, timeout=timeout, verify=False) for u in request_urls) responses = grequests.imap(requests, exception_handler=handle_exception, size=size) for res in responses: color = if is_error_code(res.status_code): if res.status_code not in allow_codes: ERRORS.append((res.status_code, res.url)) color = else: WHITELISTED.append(res.url) status = click.style(str(res.status_code), fg=color) click.echo(.format(status, res.url)) errors_len = len(ERRORS) exceptions_len = len(EXCEPTIONS) dupes_len = len(DUPES) white_len = len(WHITELISTED) if errors_len: click.echo() click.echo() for code, url in ERRORS: code = click.style(str(code), fg=) click.echo(.format(code, url)) if exceptions_len and debug: import ssl click.echo() click.echo(.format(ssl.OPENSSL_VERSION)) click.secho(, fg=) for url, exception in EXCEPTIONS: click.echo(.format(url)) click.secho(.format(exception), fg=, bold=True) if dupes_len and debug: click.echo() for url, count in DUPES: click.secho(.format(url, count), fg=, bold=True) if white_len and debug: click.echo() click.echo() for url in WHITELISTED: click.secho(.format(url), fg=) click.secho(.format(len(links)), fg=) click.secho(.format(errors_len), fg=) click.secho(.format(exceptions_len), fg=) click.secho(.format(dupes_len), fg=) click.secho(.format(white_len), fg=) click.secho(.format(len(STATICS)), fg=) if debug: click.echo(.format(time.time() - t0)) if errors_len: sys.exit(1)
Examples: simple call $ vl README.md Adding debug outputs $ vl README.md --debug Adding a custom timeout for each url. time on seconds. $ vl README.md -t 3 Adding a custom size param, to add throttle n requests per time $ vl README -s 1000 Skipping some error codes. This will allow 500 and 404 responses to be ignored $ vl README.md -a 500,404 Adding Whitelists $ vl README.md -w server1.com,server2.com
386,357
def at(host, command, seq, params): params_str = [] for p in params: if type(p) == int: params_str.append(.format(p)) elif type(p) == float: params_str.append(.format(f2i(p))) elif type(p) == str: params_str.append(.format(p)) msg = .format(command, seq, .join(params_str)) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT))
Parameters: command -- the command seq -- the sequence number params -- a list of elements which can be either int, float or string
386,358
def get_tree(self, process_name): for tree_name, tree in self.trees.items(): if process_name in tree: return tree
return tree that is managing time-periods for given process
386,359
def precheck(context): if context.noprecheck: return True func_name = "precheck_" + context.key.replace("-", "_") if func_name in globals() and isfunction(globals()[func_name]): return globals()[func_name](context) else: return True
calls a function named "precheck_<key>" where <key> is context_key with '-' changed to '_' (e.g. "precheck_ami_id") Checking function should return True if OK, or raise RuntimeError w/ message if not Args: context: a populated EFVersionContext object Returns: True if the precheck passed, or if there was no precheck function for context.key Raises: RuntimeError if precheck failed, with explanatory message
386,360
def _runResponder(self, responder, request, command, identifier): d = defer.maybeDeferred(responder, **request) def _addIdentifier(response): response["_answer"] = identifier return response def _serializeFailure(failure): key = failure.trap(*command.allErrors) response = { "_error_code": command.allErrors[key], "_error_description": str(failure.value), "_error": identifier } return response d.addCallbacks(_addIdentifier, _serializeFailure) return d
Run the responser function. If it succeeds, add the _answer key. If it fails with an error known to the command, serialize the error.
386,361
def discover(service="ssdp:all", timeout=1, retries=2, ipAddress="239.255.255.250", port=1900): socket.setdefaulttimeout(timeout) messages = [] if isinstance(service, str): services = [service] elif isinstance(service, list): services = service for service in services: message = + \ ipAddress + + str(port) + message += "ST: " + service + "\r\n\r\n" messages.append(message) responses = {} for _ in range(retries): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 2) for _ in range(2): for message in messages: sock.sendto(message.encode(), (ipAddress, port)) while True: try: data = sock.recv(1024) except socket.timeout: break else: response = DiscoveryResponse(data) responses[response.location] = response return list(responses.values())
Discovers UPnP devices in the local network. Try to discover all devices in the local network which do support UPnP. The discovery process can fail for various reasons and it is recommended to do at least two discoveries, which you can specify with the ``retries`` parameter. The default ``service`` parameter tries to address all devices also if you know which kind of service type you are looking for you should set it as some devices do not respond or respond differently otherwise. :param service: the service type or list of service types of devices you look for :type service: str or list[str] :param float timeout: the socket timeout for each try :param int retries: how often should be a discovery request send :param str ipAddress: the multicast ip address to use :param int port: the port to use :return: a list of DiscoveryResponse objects or empty if no device was found :rtype: list[DiscoveryResponse] Example: :: results = discover() for result in results: print("Host: " + result.locationHost + " Port: " + result.locationPort + " Device definitions: " + \\ result.location) .. seealso:: :class:`~simpletr64.DiscoveryResponse`, :meth:`~simpletr64.Discover.discoverParticularHost`
386,362
def gpgga_to_dms(gpgga): deg_min, dmin = gpgga.split() degrees = int(deg_min[:-2]) minutes = float( % (deg_min[-2:], dmin)) decimal = degrees + (minutes / 60) return decimal
Convert GPS coordinate in GPGGA format to degree/minute/second Reference: http://us.cactii.net/~bb/gps.py
386,363
def geodetic_distance(lons1, lats1, lons2, lats2, diameter=2*EARTH_RADIUS): lons1, lats1, lons2, lats2 = _prepare_coords(lons1, lats1, lons2, lats2) distance = numpy.arcsin(numpy.sqrt( numpy.sin((lats1 - lats2) / 2.0) ** 2.0 + numpy.cos(lats1) * numpy.cos(lats2) * numpy.sin((lons1 - lons2) / 2.0) ** 2.0 )) return diameter * distance
Calculate the geodetic distance between two points or two collections of points. Parameters are coordinates in decimal degrees. They could be scalar float numbers or numpy arrays, in which case they should "broadcast together". Implements http://williams.best.vwh.net/avform.htm#Dist :returns: Distance in km, floating point scalar or numpy array of such.
386,364
def vlr_factory(raw_vlr): user_id = raw_vlr.header.user_id.rstrip(NULL_BYTE).decode() known_vlrs = BaseKnownVLR.__subclasses__() for known_vlr in known_vlrs: if ( known_vlr.official_user_id() == user_id and raw_vlr.header.record_id in known_vlr.official_record_ids() ): return known_vlr.from_raw(raw_vlr) else: return VLR.from_raw(raw_vlr)
Given a raw_vlr tries to find its corresponding KnownVLR class that can parse its data. If no KnownVLR implementation is found, returns a VLR (record_data will still be bytes)
386,365
def doExperiment(numColumns, l2Overrides, objectDescriptions, noiseMu, noiseSigma, numInitialTraversals, noiseEverywhere): layer4sdr = lambda : np.array(sorted(random.sample(xrange(L4_CELL_COUNT), 40)), dtype="uint32") featureLocationSDRs = [defaultdict(layer4sdr) for _ in xrange(numColumns)] params = {"inputWidth": L4_CELL_COUNT, "lateralInputWidths": [4096]*(numColumns-1), "seed": random.randint(0, 1024)} params.update(l2Overrides) l2Columns = [ColumnPooler(**params) for _ in xrange(numColumns)] objectL2Representations = {} for objectName, featureLocations in objectDescriptions.iteritems(): for featureLocationName in featureLocations: for _ in xrange(4): allLateralInputs = [l2.getActiveCells() for l2 in l2Columns] for columnNumber, l2 in enumerate(l2Columns): feedforwardInput = featureLocationSDRs[columnNumber][featureLocationName] lateralInputs = [lateralInput for i, lateralInput in enumerate(allLateralInputs) if i != columnNumber] l2.compute(feedforwardInput, lateralInputs, learn=True) objectL2Representations[objectName] = [set(l2.getActiveCells()) for l2 in l2Columns] for l2 in l2Columns: l2.reset() results = [] for objectName, featureLocations in objectDescriptions.iteritems(): for l2 in l2Columns: l2.reset() sensorPositionsIterator = greedySensorPositions(numColumns, len(featureLocations)) numTouchesPerTraversal = len(featureLocations) / float(numColumns) numInitialTouches = int(math.ceil(numInitialTraversals * numTouchesPerTraversal)) if noiseEverywhere: numTestTouches = int(math.ceil(1 * numTouchesPerTraversal)) else: numTestTouches = len(featureLocations) for touch in xrange(numInitialTouches + numTestTouches): sensorPositions = next(sensorPositionsIterator) for _ in xrange(3): allLateralInputs = [l2.getActiveCells() for l2 in l2Columns] for columnNumber, l2 in enumerate(l2Columns): position = sensorPositions[columnNumber] featureLocationName = featureLocations[position] feedforwardInput = featureLocationSDRs[columnNumber][featureLocationName] if noiseEverywhere or columnNumber == 0: noiseLevel = random.gauss(noiseMu, noiseSigma) noiseLevel = max(0.0, min(1.0, noiseLevel)) feedforwardInput = noisy(feedforwardInput, noiseLevel, L4_CELL_COUNT) lateralInputs = [lateralInput for i, lateralInput in enumerate(allLateralInputs) if i != columnNumber] l2.compute(feedforwardInput, lateralInputs, learn=False) if touch >= numInitialTouches: if noiseEverywhere: for columnNumber, l2 in enumerate(l2Columns): activeCells = set(l2.getActiveCells()) correctCells = objectL2Representations[objectName][columnNumber] results.append((len(activeCells & correctCells), len(activeCells - correctCells))) else: activeCells = set(l2Columns[0].getActiveCells()) correctCells = objectL2Representations[objectName][0] results.append((len(activeCells & correctCells), len(activeCells - correctCells))) return results
Touch every point on an object 'numInitialTraversals' times, then evaluate whether it has inferred the object by touching every point once more and checking the number of correctly active and incorrectly active cells. @param numColumns (int) The number of sensors to use @param l2Overrides (dict) Parameters for the ColumnPooler @param objectDescriptions (dict) A mapping of object names to their feature-locations. See 'createRandomObjectDescriptions'. @param noiseMu (float) The average amount of noise in a feedforward input. The noise level for each column's input is determined once per touch. It is a gaussian distribution with mean 'noiseMu' and sigma 'noiseSigma'. @param noiseSigma (float) The sigma for the gaussian distribution of noise levels. If the noiseSigma is 0, then the noise level will always be 'noiseMu'. @param numInitialTraversals (int) The number of times to traverse the object before testing whether the object has been inferred. @param noiseEverywhere (bool) If true, add noise to every column's input, and record accuracy of every column. If false, add noise to one column's input, and only record accuracy of that column.
386,366
def print_tb(tb, limit=None, file=None): if file is None: file = sys.stderr if limit is None: if hasattr(sys, ): limit = sys.tracebacklimit file.write(.join(format_tb(tb, limit)) + )
Print up to 'limit' stack trace entries from the traceback 'tb'. If 'limit' is omitted or None, all entries are printed. If 'file' is omitted or None, the output goes to sys.stderr; otherwise 'file' should be an open file or file-like object with a write() method.
386,367
def on_receive_transactions(self, proto, transactions): "receives rlp.decoded serialized" log.debug() log.debug(, count=len(transactions), remote_id=proto) def _add_txs(): for tx in transactions: self.add_transaction(tx, origin=proto) gevent.spawn(_add_txs)
receives rlp.decoded serialized
386,368
def _with_meta_to_py_ast( ctx: GeneratorContext, node: WithMeta, **kwargs ) -> GeneratedPyAST: assert node.op == NodeOp.WITH_META handle_expr = _WITH_META_EXPR_HANDLER.get(node.expr.op) assert ( handle_expr is not None ), "No expression handler for with-meta child node type" return handle_expr(ctx, node.expr, meta_node=node.meta, **kwargs)
Generate a Python AST node for Python interop method calls.
386,369
def qteSplitApplet(self, applet: (QtmacsApplet, str)=None, splitHoriz: bool=True, windowObj: QtmacsWindow=None): if isinstance(applet, str): newAppObj = self.qteGetAppletHandle(applet) else: newAppObj = applet if windowObj is None: windowObj = self.qteActiveWindow() if windowObj is None: msg = self.qteLogger.error(msg, stack_info=True) return if splitHoriz: splitOrientation = QtCore.Qt.Horizontal else: splitOrientation = QtCore.Qt.Vertical if newAppObj is None: newAppObj = self.qteNextApplet(skipVisible=True, skipInvisible=False) else: if newAppObj.qteIsVisible(): return False if newAppObj is None: self.qteLogger.warning() return False if windowObj.qteAppletSplitter.count() == 0: windowObj.qteAppletSplitter.qteAddWidget(newAppObj) windowObj.qteAppletSplitter.setOrientation(splitOrientation) return True curApp = self.qteNextApplet(numSkip=0, windowObj=windowObj) split = self._qteFindAppletInSplitter( curApp, windowObj.qteAppletSplitter) if split is None: msg = msg = msg.format(curApp.qteAppletID()) self.qteLogger.error(msg, stack_info=True) return False if split is windowObj.qteAppletSplitter: if split.count() == 1: split.qteAddWidget(newAppObj) split.setOrientation(splitOrientation) return True curAppIdx = split.indexOf(curApp) newSplit = QtmacsSplitter(splitOrientation, windowObj) curApp.setParent(None) newSplit.qteAddWidget(curApp) newSplit.qteAddWidget(newAppObj) split.insertWidget(curAppIdx, newSplit) split.qteAdjustWidgetSizes() return True
Reveal ``applet`` by splitting the space occupied by the current applet. If ``applet`` is already visible then the method does nothing. Furthermore, this method does not change the focus, ie. the currently active applet will remain active. If ``applet`` is **None** then the next invisible applet will be shown. If ``windowObj`` is **None** then the currently active window will be used. The ``applet`` parameter can either be an instance of ``QtmacsApplet`` or a string denoting an applet ID. In the latter case the ``qteGetAppletHandle`` method is used to fetch the respective applet instance. |Args| * ``applet`` (**QtmacsApplet**, **str**): the applet to reveal. * ``splitHoriz`` (**bool**): whether to split horizontally or vertically. * ``windowObj`` (**QtmacsWindow**): the window in which to reveal ``applet``. |Returns| * **bool**: if **True**, ``applet`` was revealed. |Raises| * **QtmacsArgumentError** if at least one argument has an invalid type.
386,370
def row_table(cls, d, order=None, labels=None): header = list(d) x = PrettyTable(labels) if order is None: order = header for key in order: value = d[key] if type(value) == list: x.add_row([key, value[0]]) for element in value[1:]: x.add_row(["", element]) elif type(value) == dict: value_keys = list(value) first_key = value_keys[0] rest_keys = value_keys[1:] x.add_row( [key, "{0} : {1}".format(first_key, value[first_key])]) for element in rest_keys: x.add_row(["", "{0} : {1}".format(element, value[element])]) else: x.add_row([key, value]) x.align = "l" return x
prints a pretty table from data in the dict. :param d: A dict to be printed :param order: The order in which the columns are printed. The order is specified by the key names of the dict. :param labels: The array of labels for the column
386,371
def security(policy, app_secret): validate(policy) policy_enc = base64.urlsafe_b64encode(json.dumps(policy).encode()) signature = hmac.new(app_secret.encode(), policy_enc, hashlib.sha256).hexdigest() return {: policy_enc, : signature}
Creates a valid signature and policy based on provided app secret and parameters ```python from filestack import Client, security # a policy requires at least an expiry policy = {'expiry': 56589012, 'call': ['read', 'store', 'pick']} sec = security(policy, 'APP_SECRET') client = Client('API_KEY', security=sec) ```
386,372
def replace_nan( trainingset, replace_with = None ): training_data = np.array( [instance.features for instance in trainingset ] ).astype( np.float64 ) def encoder( dataset ): for instance in dataset: instance.features = instance.features.astype( np.float64 ) if np.sum(np.isnan( instance.features )): if replace_with == None: instance.features[ np.isnan( instance.features ) ] = means[ np.isnan( instance.features ) ] else: instance.features[ np.isnan( instance.features ) ] = replace_with return dataset if replace_nan_with == None: means = np.mean( np.nan_to_num(training_data), axis=0 ) return encoder
Replace instanced of "not a number" with either the mean of the signal feature or a specific value assigned by `replace_nan_with`
386,373
def update_entity(self, table_name, entity, if_match=, timeout=None): _validate_not_none(, table_name) request = _update_entity(entity, if_match, self.require_encryption, self.key_encryption_key, self.encryption_resolver_function) request.host_locations = self._get_host_locations() request.path = _get_entity_path(table_name, entity[], entity[]) request.query[] = _int_to_str(timeout) return self._perform_request(request, _extract_etag)
Updates an existing entity in a table. Throws if the entity does not exist. The update_entity operation replaces the entire entity and can be used to remove properties. :param str table_name: The name of the table containing the entity to update. :param entity: The entity to update. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param str if_match: The client may specify the ETag for the entity on the request in order to compare to the ETag maintained by the service for the purpose of optimistic concurrency. The update operation will be performed only if the ETag sent by the client matches the value maintained by the server, indicating that the entity has not been modified since it was retrieved by the client. To force an unconditional update, set If-Match to the wildcard character (*). :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str
386,374
def UpdateFlow(self, client_id, flow_id, flow_obj=db.Database.unchanged, flow_state=db.Database.unchanged, client_crash_info=db.Database.unchanged, pending_termination=db.Database.unchanged, processing_on=db.Database.unchanged, processing_since=db.Database.unchanged, processing_deadline=db.Database.unchanged, cursor=None): updates = [] args = [] if flow_obj != db.Database.unchanged: updates.append("flow=%s") args.append(flow_obj.SerializeToString()) updates.append("flow_state=%s") args.append(int(flow_obj.flow_state)) updates.append("user_cpu_time_used_micros=%s") args.append( db_utils.SecondsToMicros(flow_obj.cpu_time_used.user_cpu_time)) updates.append("system_cpu_time_used_micros=%s") args.append( db_utils.SecondsToMicros(flow_obj.cpu_time_used.system_cpu_time)) updates.append("network_bytes_sent=%s") args.append(flow_obj.network_bytes_sent) updates.append("num_replies_sent=%s") args.append(flow_obj.num_replies_sent) if flow_state != db.Database.unchanged: updates.append("flow_state=%s") args.append(int(flow_state)) if client_crash_info != db.Database.unchanged: updates.append("client_crash_info=%s") args.append(client_crash_info.SerializeToString()) if pending_termination != db.Database.unchanged: updates.append("pending_termination=%s") args.append(pending_termination.SerializeToString()) if processing_on != db.Database.unchanged: updates.append("processing_on=%s") args.append(processing_on) if processing_since != db.Database.unchanged: updates.append("processing_since=FROM_UNIXTIME(%s)") args.append(mysql_utils.RDFDatetimeToTimestamp(processing_since)) if processing_deadline != db.Database.unchanged: updates.append("processing_deadline=FROM_UNIXTIME(%s)") args.append(mysql_utils.RDFDatetimeToTimestamp(processing_deadline)) if not updates: return query = "UPDATE flows SET last_update=NOW(6), " query += ", ".join(updates) query += " WHERE client_id=%s AND flow_id=%s" args.append(db_utils.ClientIDToInt(client_id)) args.append(db_utils.FlowIDToInt(flow_id)) updated = cursor.execute(query, args) if updated == 0: raise db.UnknownFlowError(client_id, flow_id)
Updates flow objects in the database.
386,375
def get_mods(self, project_path, vars): mods = [var.name for var in self.vars if vars[var.name].lower() == ] mods = set(mods) for name in self.mods_list: mods.add(name) if in mods or in mods: mods.add() mods.add() return mods
Build the mod list to enable
386,376
def _legacy_symbol_table(build_file_aliases): table = { alias: _make_target_adaptor(TargetAdaptor, target_type) for alias, target_type in build_file_aliases.target_types.items() } for alias, factory in build_file_aliases.target_macro_factories.items(): if len(factory.target_types) == 1: table[alias] = _make_target_adaptor( TargetAdaptor, tuple(factory.target_types)[0], ) table[] = _make_target_adaptor(PythonTargetAdaptor, PythonLibrary) table[] = _make_target_adaptor(AppAdaptor, JvmApp) table[] = _make_target_adaptor(JvmBinaryAdaptor, JvmBinary) table[] = _make_target_adaptor(AppAdaptor, PythonApp) table[] = _make_target_adaptor(PythonTestsAdaptor, PythonTests) table[] = _make_target_adaptor(PythonBinaryAdaptor, PythonBinary) table[] = _make_target_adaptor(RemoteSourcesAdaptor, RemoteSources) table[] = _make_target_adaptor(PageAdaptor, Page) table[] = PantsPluginAdaptor table[] = PantsPluginAdaptor return SymbolTable(table)
Construct a SymbolTable for the given BuildFileAliases. :param build_file_aliases: BuildFileAliases to register. :type build_file_aliases: :class:`pants.build_graph.build_file_aliases.BuildFileAliases` :returns: A SymbolTable.
386,377
def search(self, initial_state: State, transition_function: TransitionFunction) -> Dict[int, List[State]]: finished_states: Dict[int, List[State]] = defaultdict(list) states = [initial_state] step_num = 0 while states: step_num += 1 next_states: Dict[int, List[State]] = defaultdict(list) grouped_state = states[0].combine_states(states) allowed_actions = [] for batch_index, action_history in zip(grouped_state.batch_indices, grouped_state.action_history): allowed_actions.append(self._allowed_transitions[batch_index][tuple(action_history)]) for next_state in transition_function.take_step(grouped_state, max_actions=self._per_node_beam_size, allowed_actions=allowed_actions): batch_index = next_state.batch_indices[0] if next_state.is_finished(): finished_states[batch_index].append(next_state) else: next_states[batch_index].append(next_state) states = [] for batch_index, batch_states in next_states.items(): if self._beam_size: batch_states = batch_states[:self._beam_size] states.extend(batch_states) best_states: Dict[int, List[State]] = {} for batch_index, batch_states in finished_states.items(): finished_to_sort = [(-state.score[0].item(), state) for state in batch_states] finished_to_sort.sort(key=lambda x: x[0]) best_states[batch_index] = [state[1] for state in finished_to_sort[:self._beam_size]] return best_states
Parameters ---------- initial_state : ``State`` The starting state of our search. This is assumed to be `batched`, and our beam search is batch-aware - we'll keep ``beam_size`` states around for each instance in the batch. transition_function : ``TransitionFunction`` The ``TransitionFunction`` object that defines and scores transitions from one state to the next. Returns ------- best_states : ``Dict[int, List[State]]`` This is a mapping from batch index to the top states for that instance.
386,378
def refresh(self, accept=MEDIA_TYPE_TAXII_V20): response = self.__raw = self._conn.get(self.url, headers={"Accept": accept}) self._populate_fields(**response)
Updates Status information
386,379
def heating_stats(self): local_5 = [] local_10 = [] for i in range(0, 10): level = self.past_heating_level(i) if level == 0: _LOGGER.debug() return if i < 5: local_5.append(level) local_10.append(level) _LOGGER.debug(, self.side, local_10) try: fiveminavg = statistics.mean(local_5) tenminavg = statistics.mean(local_10) _LOGGER.debug(, self.side, fiveminavg) _LOGGER.debug(, self.side, tenminavg) fivestdev = statistics.stdev(local_5) tenstdev = statistics.stdev(local_10) _LOGGER.debug(, self.side, fivestdev) _LOGGER.debug(, self.side, tenstdev) fivevar = statistics.variance(local_5) tenvar = statistics.variance(local_10) _LOGGER.debug(, self.side, fivevar) _LOGGER.debug(, self.side, tenvar) except: _LOGGER.debug()
Calculate some heating data stats.
386,380
def nearest(items, pivot): return min(items, key=lambda x: abs(x - pivot))
Find nearest value in array, including datetimes Args ---- items: iterable List of values from which to find nearest value to `pivot` pivot: int or float Value to find nearest of in `items` Returns ------- nearest: int or float Value in items nearest to `pivot`
386,381
def render_response(self): name, value = self.name, self.value renderer = self.attribute_renderers.get(, None) if renderer: name = renderer(name) renderer = self.attribute_renderers.get(, None) if renderer: value = renderer(value) return .join( [.format(name, value)] + [key if isinstance(val, bool) else .join((key, val)) for key, val in self.attributes().items()] )
Render as a string formatted for HTTP response headers (detailed 'Set-Cookie: ' style).
386,382
def get_method(self, method_name, default=None): for method in self.methods: if method.name == method_name: return method return default
Returns the contained method of the specified name, or `default` if not found.
386,383
def set_focused(self, account, is_focused): endpoint = if is_focused: classification = else: classification = data = dict(ClassifyAs=classification, SenderEmailAddress=dict(Address=self.email)) r = requests.post(endpoint, headers=account._headers, data=json.dumps(data)) result = check_response(r) self.focused = is_focused return result
Emails from this contact will either always be put in the Focused inbox, or always put in Other, based on the value of is_focused. Args: account (OutlookAccount): The :class:`OutlookAccount <pyOutlook.core.main.OutlookAccount>` the override should be set for is_focused (bool): Whether this contact should be set to Focused, or Other. Returns: True if the request was successful
386,384
def _leave_event_hide(self): if (not self._hide_timer.isActive() and QtGui.qApp.topLevelAt(QtGui.QCursor.pos()) != self): self._hide_timer.start(300, self)
Hides the tooltip after some time has passed (assuming the cursor is not over the tooltip).
386,385
def p_base_type(self, p): name = p[1] if name == : name = p[0] = ast.PrimitiveType(name, p[2])
base_type : BOOL annotations | BYTE annotations | I8 annotations | I16 annotations | I32 annotations | I64 annotations | DOUBLE annotations | STRING annotations | BINARY annotations
386,386
def reset(self): with util.disable_constant(self): for k, p in self.params().items(): if k != : setattr(self, k, p.default)
Resets stream parameters to their defaults.
386,387
def connect(self, address=): if isinstance(address, six.string_types): addresses = parse_dbus_address(address) else: addresses = [address] for addr in addresses: try: super(DbusClient, self).connect(addr) except pyuv.error.UVError: continue break else: raise DbusError() self.get_unique_name()
Connect to *address* and wait until the connection is established. The *address* argument must be a D-BUS server address, in the format described in the D-BUS specification. It may also be one of the special addresses ``'session'`` or ``'system'``, to connect to the D-BUS session and system bus, respectively.
386,388
def generate(self, trilegal_filename, ra=None, dec=None, n=2e4, ichrone=, MAfn=None, mags=None, maxrad=None, f_binary=0.4, **kwargs): n = int(n) bgpop = BGStarPopulation_TRILEGAL(trilegal_filename, ra=ra, dec=dec, mags=mags, maxrad=maxrad, **kwargs) mass = bgpop.stars[].values age = bgpop.stars[].values feh = bgpop.stars[].values ichrone = get_ichrone(ichrone) pct = 0.05 mass[mass < ichrone.minmass*(1+pct)] = ichrone.minmass*(1+pct) mass[mass > ichrone.maxmass*(1-pct)] = ichrone.maxmass*(1-pct) age[age < ichrone.minage*(1+pct)] = ichrone.minage*(1+pct) age[age > ichrone.maxage*(1-pct)] = ichrone.maxage*(1-pct) feh[feh < ichrone.minfeh+0.05] = ichrone.minfeh+0.05 feh[feh > ichrone.maxfeh-0.05] = ichrone.maxfeh-0.05 distance = bgpop.stars[].values pop = MultipleStarPopulation(mA=mass, age=age, feh=feh, f_triple=0, f_binary=1, distance=distance, ichrone=ichrone) all_stars = pop.stars.dropna(subset=[]) all_stars.reset_index(inplace=True) stars = pd.DataFrame() df_orbpop = pd.DataFrame() tot_prob = None; tot_dprob=None; prob_norm=None n_adapt = n while len(stars) < n: n_adapt = int(n_adapt) inds = np.random.randint(len(all_stars), size=n_adapt) s = all_stars.iloc[inds] u1A, u2A = ldcoeffs(s[], s[]) u1B, u2B = ldcoeffs(s[], s[]) inds, df, (prob,dprob) = calculate_eclipses(s[], s[], s[], s[], s[.format(self.band)], s[.format(self.band)], u11s=u1A, u21s=u2A, u12s=u1B, u22s=u2B, band=self.band, period=self.period, calc_mininc=True, return_indices=True, MAfn=MAfn) s = s.iloc[inds].copy() s.reset_index(inplace=True) for col in df.columns: s[col] = df[col] stars = pd.concat((stars, s)) logging.info(.format(len(stars),n)) if tot_prob is None: prob_norm = (1/dprob**2) tot_prob = prob tot_dprob = dprob else: prob_norm = (1/tot_dprob**2 + 1/dprob**2) tot_prob = (tot_prob/tot_dprob**2 + prob/dprob**2)/prob_norm tot_dprob = 1/np.sqrt(prob_norm) n_adapt = min(int(1.2*(n-len(stars)) * n_adapt//len(s)), 5e5) n_adapt = max(n_adapt, 100) n_adapt = int(n_adapt) stars = stars.iloc[:n] if in stars: stars.drop(, axis=1, inplace=True) stars = stars.reset_index() stars.drop(, axis=1, inplace=True) stars[] = stars[] stars[] = stars[] stars[] = stars[] stars[] = stars[] MultipleStarPopulation.__init__(self, stars=stars, f_triple=0, f_binary=f_binary, period_long=self.period) priorfactors = {:f_binary} self.density = bgpop.density self.trilegal_args = bgpop.trilegal_args self._maxrad = bgpop._maxrad EclipsePopulation.__init__(self, stars=stars, period=self.period, cadence=self.cadence, model=self.model, lhoodcachefile=self.lhoodcachefile, priorfactors=priorfactors, prob=tot_prob) self.stars[] = randpos_in_circle(len(self.stars), self._maxrad, return_rad=True)
Generate population.
386,389
def daterange(value, details=False): if not isinstance(value, db.DateRange): raise ValueError() if details: return daterange_with_details(value) date_format = delta = value.end - value.start start, end = None, None start = format_date(value.start, date_format) if delta.days > 365: end = format_date(value.end, date_format) return .format(start=start, end=end) if end else start
Display a date range in the shorter possible maner.
386,390
def set_data(self, index, value): acces, field = self.get_item(index), self.header[index.column()] self.beginResetModel() self.set_data_hook(acces, field, value) self.endResetModel()
Uses given data setter, and emit modelReset signal
386,391
def _run_command(self, arguments: List[str], input_data: Any=None, output_encoding: str="utf-8") -> str: process = subprocess.Popen(arguments, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE) if isinstance(input_data, List): for to_write in input_data: to_write_as_json = json.dumps(to_write) process.stdin.write(str.encode(to_write_as_json)) input_data = None else: input_data = str.encode(json.dumps(input_data)) timeout_in_seconds = self.timeout_queries_after.total_seconds() if self.timeout_queries_after is not None \ else None out, error = process.communicate(input=input_data, timeout=timeout_in_seconds) if len(out) == 0 and len(error) > 0: raise RuntimeError(error) return out.decode(output_encoding).rstrip()
Run a command as a subprocess. Ignores errors given over stderr if there is output on stdout (this is the case where baton has been run correctly and has expressed the error in it's JSON out, which can be handled more appropriately upstream to this method.) :param arguments: the arguments to run :param input_data: the input data to pass to the subprocess :param output_encoding: optional specification of the output encoding to expect :return: the process' standard out
386,392
def prepare_response(self, request, cached): if "*" in cached.get("vary", {}): return for header, value in cached.get("vary", {}).items(): if request.headers.get(header, None) != value: return body_raw = cached["response"].pop("body") try: body = io.BytesIO(body_raw) except TypeError: body = io.BytesIO(body_raw.encode()) return HTTPResponse( body=body, preload_content=False, **cached["response"] )
Verify our vary headers match and construct a real urllib3 HTTPResponse object.
386,393
def line_cap_type(self): key = self._data.get(b).enum return self.STROKE_STYLE_LINE_CAP_TYPES.get(key, str(key))
Cap type, one of `butt`, `round`, `square`.
386,394
def nodes(self): resource = sub_collection( self.get_relation(), VSSContainerNode) resource._load_from_engine(self, ) return resource
Return the nodes for this VSS Container :rtype: SubElementCollection(VSSContainerNode)
386,395
def repl_command(fxn): @functools.wraps(fxn) def wrapper(self, arglist): args = [] kwargs = {} if arglist: for arg in shlex.split(arglist): if "=" in arg: split = arg.split("=", 1) kwargs[split[0]] = split[1] else: args.append(arg) return fxn(self, *args, **kwargs) return wrapper
Decorator for cmd methods Parses arguments from the arg string and passes them to the method as *args and **kwargs.
386,396
def verify(self, otp, for_time=None, valid_window=0): if for_time is None: for_time = datetime.datetime.now() if valid_window: for i in range(-valid_window, valid_window + 1): if utils.strings_equal(str(otp), str(self.at(for_time, i))): return True return False return utils.strings_equal(str(otp), str(self.at(for_time)))
Verifies the OTP passed in against the current time OTP @param [String/Integer] otp the OTP to check against @param [Integer] valid_window extends the validity to this many counter ticks before and after the current one
386,397
def expectation_importance_sampler_logspace( log_f, log_p, sampling_dist_q, z=None, n=None, seed=None, name=): r q = sampling_dist_q with tf.name_scope(name): z = _get_samples(q, z, n, seed) log_values = log_f(z) + log_p(z) - q.log_prob(z) return _logspace_mean(log_values)
r"""Importance sampling with a positive function, in log-space. With \\(p(z) := exp^{log_p(z)}\\), and \\(f(z) = exp{log_f(z)}\\), this `Op` returns \\(Log[ n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ] ], z_i ~ q,\\) \\(\approx Log[ E_q[ f(Z) p(Z) / q(Z) ] ]\\) \\(= Log[E_p[f(Z)]]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. In contrast to `expectation_importance_sampler`, this `Op` returns values in log-space. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: log_f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_f` works "just like" `sampling_dist_q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: Logarithm of the importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`.
386,398
def write_elements(fd, mtp, data, is_name=False): fmt = etypes[mtp][] if isinstance(data, Sequence): if fmt == or is_name: if isinstance(data, bytes): if is_name and len(data) > 31: raise ValueError( .format(data)) fmt = .format(len(data)) data = (data,) else: fmt = .join(.format(len(s)) for s in data) else: l = len(data) if l == 0: fmt = if l > 1: fmt = .format(l, fmt) else: data = (data,) num_bytes = struct.calcsize(fmt) if num_bytes <= 4: if num_bytes < 4: fmt += .format(4 - num_bytes) fd.write(struct.pack( + fmt, etypes[mtp][], *chain([num_bytes], data))) return fd.write(struct.pack(, etypes[mtp][], num_bytes)) mod8 = num_bytes % 8 if mod8: fmt += .format(8 - mod8) fd.write(struct.pack(fmt, *data))
Write data element tag and data. The tag contains the array type and the number of bytes the array data will occupy when written to file. If data occupies 4 bytes or less, it is written immediately as a Small Data Element (SDE).
386,399
def graft_neuron(root_section): assert isinstance(root_section, Section) return Neuron(soma=Soma(root_section.points[:1]), neurites=[Neurite(root_section)])
Returns a neuron starting at root_section