Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
381,400
def _make_pkh_address(pubkey_hash, witness=False, cashaddr=True): addr_bytes = bytearray() if riemann.network.CASHADDR_P2PKH is not None and cashaddr: addr_bytes.extend(riemann.network.CASHADDR_P2PKH) addr_bytes.extend(pubkey_hash) return riemann.network.CASHADDR_ENCODER.encode(addr_bytes) if witness: addr_bytes.extend(riemann.network.P2WPKH_PREFIX) addr_bytes.extend(pubkey_hash) return riemann.network.SEGWIT_ENCODER.encode(addr_bytes) else: addr_bytes.extend(riemann.network.P2PKH_PREFIX) addr_bytes.extend(pubkey_hash) return riemann.network.LEGACY_ENCODER.encode(addr_bytes)
bytes, bool -> str
381,401
def get_report(self, value): if self.multiselect: value = value or [] children = [] for child in value: children.append(self.cast_to_report(child)) return children return self.cast_to_report(value)
Return provided field Python value formatted for use in report filter
381,402
def request(self, persist_id=None): node = new_ele("cancel-commit") if persist_id is not None: sub_ele(node, "persist-id").text = persist_id return self._request(node)
Cancel an ongoing confirmed commit. Depends on the `:candidate` and `:confirmed-commit` capabilities. *persist-id* value must be equal to the value given in the <persist> parameter to the previous <commit> operation.
381,403
def plot_spectra_overlapped(ss, title=None, setup=_default_setup): plt.figure() draw_spectra_overlapped(ss, title, setup) plt.show()
Plots one or more spectra in the same plot. Args: ss: list of Spectrum objects title=None: window title setup: PlotSpectrumSetup object
381,404
def attr_case_name(self, name): lower_name = name.lower() for i in self.attrs(): if lower_name == i.lower(): return i for key in self.keys_nD(): for i in self[key].children.attrs(): if lower_name == i.lower(): return i return name
Returns preserved case name for case insensitive value of name. Checks first within standard attributes. If not found there, checks attributes for higher order data structures. If not found, returns supplied name as it is available for use. Intended to be used to help ensure that the same case is applied to all repetitions of a given variable name. Parameters ---------- name : str name of variable to get stored case form Returns ------- str name in proper case
381,405
def mmapFile(self, addr, size, perms, filename, offset=0): assert addr is None or isinstance(addr, int), assert size > 0 self.cpu._publish(, addr, size, perms, filename, offset) if addr is not None: assert addr < self.memory_size, addr = self._floor(addr) size = self._ceil(size) addr = self._search(size, addr) for i in range(self._page(addr), self._page(addr + size)): assert i not in self._page2map, m = FileMap(addr, size, perms, filename, offset) self._add(m) logger.debug(, addr, size) self.cpu._publish(, addr, size, perms, filename, offset, addr) return addr
Creates a new file mapping in the memory address space. :param addr: the starting address (took as hint). If C{addr} is C{0} the first big enough chunk of memory will be selected as starting address. :param size: the contents of a file mapping are initialized using C{size} bytes starting at offset C{offset} in the file C{filename}. :param perms: the access permissions to this memory. :param filename: the pathname to the file to map. :param offset: the contents of a file mapping are initialized using C{size} bytes starting at offset C{offset} in the file C{filename}. :return: the starting address where the file was mapped. :rtype: int :raises error: - 'Address shall be concrete' if C{addr} is not an integer number. - 'Address too big' if C{addr} goes beyond the limit of the memory. - 'Map already used' if the piece of memory starting in C{addr} and with length C{size} isn't free.
381,406
def jacobian(sess, x, grads, target, X, nb_features, nb_classes, feed=None): warnings.warn( "This function is dead code and will be removed on or after 2019-07-18") feed_dict = {x: X} if feed is not None: feed_dict.update(feed) jacobian_val = np.zeros((nb_classes, nb_features), dtype=np_dtype) for class_ind, grad in enumerate(grads): run_grad = sess.run(grad, feed_dict) jacobian_val[class_ind] = np.reshape(run_grad, (1, nb_features)) other_classes = utils.other_classes(nb_classes, target) grad_others = np.sum(jacobian_val[other_classes, :], axis=0) return jacobian_val[target], grad_others
TensorFlow implementation of the foward derivative / Jacobian :param x: the input placeholder :param grads: the list of TF gradients returned by jacobian_graph() :param target: the target misclassification class :param X: numpy array with sample input :param nb_features: the number of features in the input :return: matrix of forward derivatives flattened into vectors
381,407
def set_key(self, key): key_len = len(key) if key_len not in [16, 24, 32]: raise KeyError("key must be 16, 24 or 32 bytes") if key_len % 4: raise KeyError("key not a multiple of 4") if key_len > 32: raise KeyError("key_len > 32") self.context = TWI() key_word32 = [0] * 32 i = 0 while key: key_word32[i] = struct.unpack("<L", key[0:4])[0] key = key[4:] i += 1 set_key(self.context, key_word32, key_len)
Init.
381,408
def _edges_classify_intersection9(): edges1 = ( bezier.Curve.from_nodes( np.asfortranarray([[32.0, 30.0], [20.0, 25.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[30.0, 25.0, 20.0], [25.0, 20.0, 20.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[20.0, 25.0, 30.0], [20.0, 20.0, 15.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[30.0, 32.0], [15.0, 20.0]]) ), ) edges2 = ( bezier.Curve.from_nodes( np.asfortranarray([[8.0, 10.0], [20.0, 15.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[10.0, 15.0, 20.0], [15.0, 20.0, 20.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[20.0, 15.0, 10.0], [20.0, 20.0, 25.0]]) ), bezier.Curve.from_nodes( np.asfortranarray([[10.0, 8.0], [25.0, 20.0]]) ), ) return edges1, edges2
The edges for the curved polygon intersection used below. Helper for :func:`classify_intersection9`.
381,409
def interpolate(self, factor, minLayer, maxLayer, round=True, suppressError=True): factor = normalizers.normalizeInterpolationFactor(factor) if not isinstance(minLayer, BaseLayer): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, minLayer.__class__.__name__)) if not isinstance(maxLayer, BaseLayer): raise TypeError(("Interpolation to an instance of %r can not be " "performed from an instance of %r.") % (self.__class__.__name__, maxLayer.__class__.__name__)) round = normalizers.normalizeBoolean(round) suppressError = normalizers.normalizeBoolean(suppressError) self._interpolate(factor, minLayer, maxLayer, round=round, suppressError=suppressError)
Interpolate all possible data in the layer. :: >>> layer.interpolate(0.5, otherLayer1, otherLayer2) >>> layer.interpolate((0.5, 2.0), otherLayer1, otherLayer2, round=False) The interpolation occurs on a 0 to 1.0 range where **minLayer** is located at 0 and **maxLayer** is located at 1.0. **factor** is the interpolation value. It may be less than 0 and greater than 1.0. It may be a :ref:`type-int-float` or a tuple of two :ref:`type-int-float`. If it is a tuple, the first number indicates the x factor and the second number indicates the y factor. **round** indicates if the result should be rounded to integers. **suppressError** indicates if incompatible data should be ignored or if an error should be raised when such incompatibilities are found.
381,410
def true_num_reactions(model, custom_spont_id=None): true_num = 0 for rxn in model.reactions: if len(rxn.genes) == 0: continue if len(rxn.genes) == 1 and is_spontaneous(list(rxn.genes)[0], custom_id=custom_spont_id): continue else: true_num += 1 return true_num
Return the number of reactions associated with a gene. Args: model (Model): custom_spont_id (str): Optional custom spontaneous ID if it does not match the regular expression ``[Ss](_|)0001`` Returns: int: Number of reactions associated with a gene
381,411
def mongodump(mongo_user, mongo_password, mongo_dump_directory_path, database=None, silent=False): if path.exists(mongo_dump_directory_path): rmtree(mongo_dump_directory_path) if silent: dump_command = ("mongodump --quiet -u %s -p %s -o %s" % (mongo_user, mongo_password, mongo_dump_directory_path)) else: dump_command = ("mongodump -u %s -p %s -o %s" % (mongo_user, mongo_password, mongo_dump_directory_path)) if database: dump_command += (" --db %s" % database) call(dump_command, silent=silent)
Runs mongodump using the provided credentials on the running mongod process. WARNING: This function will delete the contents of the provided directory before it runs.
381,412
def sanitize_capabilities(caps): platform = caps["platform"] upper_platform = platform.upper() if upper_platform.startswith("WINDOWS 8"): caps["platform"] = "WIN8" elif upper_platform.startswith("OS X "): caps["platform"] = "MAC" elif upper_platform == "WINDOWS 10": del caps["platform"] caps["os"] = "Windows" caps["os_version"] = "10" if caps["browserName"].upper() == "MICROSOFTEDGE": caps["version"] = caps["version"].split(".", 1)[0] + ".0" caps["browser_version"] = caps["version"] del caps["version"] return caps
Sanitize the capabilities we pass to Selenic so that they can be consumed by Browserstack. :param caps: The capabilities passed to Selenic. This dictionary is modified. :returns: The sanitized capabilities.
381,413
def choose_parent_view(self, request): kwargs = {: self} view_class = self.choose_parent_view_class return view_class.as_view(**kwargs)(request)
Instantiates a class-based view to provide a view that allows a parent page to be chosen for a new object, where the assigned model extends Wagtail's Page model, and there is more than one potential parent for new instances. The view class used can be overridden by changing the 'choose_parent_view_class' attribute.
381,414
def get_or_create(cls, filter_key=None, with_status=False, **kwargs): was_created = False if not in kwargs: raise ElementNotFound( ) if filter_key: elements = cls.objects.filter(**filter_key) element = elements.first() if elements.exists() else None else: try: element = cls.get(kwargs.get()) except ElementNotFound: if not hasattr(cls, ): raise CreateElementFailed( % (cls.__name__, kwargs[])) element = None if not element: params = {k: v() if callable(v) else v for k, v in kwargs.items()} try: element = cls.create(**params) was_created = True except TypeError: raise CreateElementFailed( % (cls.__name__, kwargs[])) if with_status: return element, was_created return element
Convenience method to retrieve an Element or create if it does not exist. If an element does not have a `create` classmethod, then it is considered read-only and the request will be redirected to :meth:`~get`. Any keyword arguments passed except the optional filter_key will be used in a create() call. If filter_key is provided, this should define an attribute and value to use for an exact match on the element. Valid attributes are ones required on the elements ``create`` method or can be viewed by the elements class docs. If no filter_key is provided, the name field will be used to find the element. :: >>> Network.get_or_create( filter_key={'ipv4_network': '123.123.123.0/24'}, name='mynetwork', ipv4_network='123.123.123.0/24') Network(name=mynetwork) The kwargs should be used to satisfy the elements ``create`` classmethod parameters to create in the event it cannot be found. :param dict filter_key: filter key represents the data attribute and value to use to find the element. If none is provided, the name field will be used. :param kwargs: keyword arguments mapping to the elements ``create`` method. :param bool with_status: if set to True, a tuple is returned with (Element, created), where the second tuple item indicates if the element has been created or not. :raises CreateElementFailed: could not create element with reason :raises ElementNotFound: if read-only element does not exist :return: element instance by type :rtype: Element
381,415
def process_service_check_result(self, service, return_code, plugin_output): now = time.time() cls = service.__class__ if not cls.accept_passive_checks or not service.passive_checks_enabled: return try: plugin_output = plugin_output.decode(, ) logger.debug(, service.get_full_name(), plugin_output) except AttributeError: pass except UnicodeError: pass if self.current_timestamp < service.last_chk: logger.debug( , service.get_full_name(), self.current_timestamp < service.last_chk, plugin_output) return chk = service.launch_check(now, self.hosts, self.services, self.timeperiods, self.daemon.macromodulations, self.daemon.checkmodulations, self.daemon.checks, force=True) chk.set_type_passive() self.send_an_element(chk) if self.my_conf.log_passive_checks: log_level = if return_code == 1: log_level = if return_code == 2: log_level = self.send_an_element(make_monitoring_log( log_level, % ( self.hosts[service.host].get_name(), service.get_name(), return_code, chk.output, chk.long_output, chk.perf_data)))
Process service check result Format of the line that triggers function call:: PROCESS_SERVICE_CHECK_RESULT;<host_name>;<service_description>;<return_code>;<plugin_output> :param service: service to process check to :type service: alignak.objects.service.Service :param return_code: exit code of plugin :type return_code: int :param plugin_output: plugin output :type plugin_output: str :return: None
381,416
def grouper(iterable, n, fillvalue=None): if isinstance(iterable, int): warnings.warn( "grouper expects iterable as first parameter", DeprecationWarning, ) n, iterable = iterable, n args = [iter(iterable)] * n return zip_longest(fillvalue=fillvalue, *args)
Collect data into fixed-length chunks or blocks. >>> list(grouper('ABCDEFG', 3, 'x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')]
381,417
def readline(self, raise_exception=False): buf = self.buffer if self.socket: recv = self.socket.recv else: recv = lambda bufsize: while True: index = buf.find() if index >= 0: break data = recv(4096) if not data: buf += data self.buffer = buf[index+2:] return buf[:index]
Read a line and return it. If "raise_exception" is set, raise _ConnectionDeadError if the read fails, otherwise return an empty string.
381,418
def get_path_variables(**kwargs): enterprise_customer_uuid = kwargs.get(, ) course_run_id = kwargs.get(, ) course_key = kwargs.get(, ) program_uuid = kwargs.get(, ) return enterprise_customer_uuid, course_run_id, course_key, program_uuid
Get the base variables for any view to route to. Currently gets: - `enterprise_uuid` - the UUID of the enterprise customer. - `course_run_id` - the ID of the course, if applicable. - `program_uuid` - the UUID of the program, if applicable.
381,419
def use_comparative_vault_view(self): self._catalog_view = COMPARATIVE if self._catalog_session is not None: self._catalog_session.use_comparative_catalog_view()
The returns from the lookup methods may omit or translate elements based on this session, such as authorization, and not result in an error. This view is used when greater interoperability is desired at the expense of precision. *compliance: mandatory -- This method is must be implemented.*
381,420
def report_device_attributes(self, mode=0, **kwargs): if mode == 0 and not kwargs.get("private"): self.write_process_input(ctrl.CSI + "?6c")
Report terminal identity. .. versionadded:: 0.5.0 .. versionchanged:: 0.7.0 If ``private`` keyword argument is set, the method does nothing. This behaviour is consistent with VT220 manual.
381,421
def list_listeners(self, retrieve_all=True, **_params): return self.list(, self.lbaas_listeners_path, retrieve_all, **_params)
Fetches a list of all lbaas_listeners for a project.
381,422
def add_service(self, name, long_name, preregistered=False, notify=True): if name in self.services: raise ArgumentError("Could not add service because the long_name is taken", long_name=long_name) serv_state = states.ServiceState(name, long_name, preregistered) service = { : serv_state, : 600 } self.services[name] = service if notify: return self._notify_update(name, , self.service_info(name)) return None
Add a service to the list of tracked services. Args: name (string): A unique short service name for the service long_name (string): A longer, user friendly name for the service preregistered (bool): Whether this service is an expected preregistered service. notify (bool): Send notifications about this service to all clients Returns: awaitable: If notify is True, an awaitable for the notifications. Otherwise None.
381,423
def libvlc_media_player_set_media(p_mi, p_md): f = _Cfunctions.get(, None) or \ _Cfunction(, ((1,), (1,),), None, None, MediaPlayer, Media) return f(p_mi, p_md)
Set the media that will be used by the media_player. If any, previous md will be released. @param p_mi: the Media Player. @param p_md: the Media. Afterwards the p_md can be safely destroyed.
381,424
def enable_thread_safety(self): if self.threadsafe: return if self._running.isSet(): raise RuntimeError() def _getattr(obj, name): if make_threadsafe: assert not make_threadsafe_blocking meth = self._make_threadsafe(meth) setattr(self, name, meth) elif make_threadsafe_blocking: meth = self._make_threadsafe_blocking(meth) setattr(self, name, meth) self._threadsafe = True
Enable thread-safety features. Must be called before start().
381,425
def init_tree(self, tree_alias, context): request = context.get(, None) if request is None: raise SiteTreeError( ) if id(request) != id(self.current_request): self.init(context) tree_alias = self.resolve_var(tree_alias) tree_alias, sitetree_items = self.get_sitetree(tree_alias) if not sitetree_items: return None, None return tree_alias, sitetree_items
Initializes sitetree in memory. Returns tuple with resolved tree alias and items on success. On fail returns (None, None). :param str|unicode tree_alias: :param Context context: :rtype: tuple
381,426
def views_show_many(self, ids=None, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/views api_path = "/api/v2/views/show_many.json" api_query = {} if "query" in kwargs.keys(): api_query.update(kwargs["query"]) del kwargs["query"] if ids: api_query.update({ "ids": ids, }) return self.call(api_path, query=api_query, **kwargs)
https://developer.zendesk.com/rest_api/docs/core/views#list-views-by-id
381,427
def open_icmp_firewall(host): with open(os.devnull, ) as DEVNULL: return subprocess.Popen("ping -4 -w 1 -n 1 %s" % host, shell=True, stdout=DEVNULL, stderr=DEVNULL).wait()
Temporarily open the ICMP firewall. Tricks Windows into allowing ICMP packets for a short period of time (~ 1 minute)
381,428
def load_and_parse(self): archives = [] to_return = {} for name, project in self.all_projects.items(): archives = archives + self.parse_archives_from_project(project) archive = UnparsedNode(**a) node_path = self.get_path(archive.resource_type, archive.package_name, archive.name) to_return[node_path] = self.parse_node( archive, node_path, self.all_projects.get(archive.package_name), archive_config=archive_config) return to_return
Load and parse archives in a list of projects. Returns a dict that maps unique ids onto ParsedNodes
381,429
def download(directory, filename): filepath = os.path.join(directory, filename) if tf.gfile.Exists(filepath): return filepath if not tf.gfile.Exists(directory): tf.gfile.MakeDirs(directory) url = + filename + _, zipped_filepath = tempfile.mkstemp(suffix=) print( % (url, zipped_filepath)) urllib.request.urlretrieve(url, zipped_filepath) with gzip.open(zipped_filepath, ) as f_in, \ tf.gfile.Open(filepath, ) as f_out: shutil.copyfileobj(f_in, f_out) os.remove(zipped_filepath) return filepath
Download (and unzip) a file from the MNIST dataset if not already done.
381,430
def list_metrics(ctx): config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) from wily.commands.list_metrics import list_metrics list_metrics()
List the available metrics.
381,431
def plot(self, origin=(0, 0), ax=None, fill=False, **kwargs): aper = self.to_aperture() aper.plot(origin=origin, ax=ax, fill=fill, **kwargs)
Plot the `BoundingBox` on a matplotlib `~matplotlib.axes.Axes` instance. Parameters ---------- origin : array_like, optional The ``(x, y)`` position of the origin of the displayed image. ax : `matplotlib.axes.Axes` instance, optional If `None`, then the current `~matplotlib.axes.Axes` instance is used. fill : bool, optional Set whether to fill the aperture patch. The default is `False`. kwargs Any keyword arguments accepted by `matplotlib.patches.Patch`.
381,432
def return_or_raise(cls, response, expected_status_code): try: if response.status_code in expected_status_code: return response except TypeError: if response.status_code == expected_status_code: return response raise cls(response)
Check for ``expected_status_code``.
381,433
def create(self, start_date, end_date, include_subaccounts=values.unset, status_callback=values.unset, status_callback_method=values.unset): data = values.of({ : serialize.iso8601_date(start_date), : serialize.iso8601_date(end_date), : include_subaccounts, : status_callback, : status_callback_method, }) payload = self._version.create( , self._uri, data=data, ) return FeedbackSummaryInstance(self._version, payload, account_sid=self._solution[], )
Create a new FeedbackSummaryInstance :param date start_date: Only include feedback given on or after this date :param date end_date: Only include feedback given on or before this date :param bool include_subaccounts: `true` includes feedback from the specified account and its subaccounts :param unicode status_callback: The URL that we will request when the feedback summary is complete :param unicode status_callback_method: The HTTP method we use to make requests to the StatusCallback URL :returns: Newly created FeedbackSummaryInstance :rtype: twilio.rest.api.v2010.account.call.feedback_summary.FeedbackSummaryInstance
381,434
def iter_columns(condition): visited = set() for condition in iter_conditions(condition): if condition.operation in ("and", "or", "not"): continue column = proxied(condition.column) if column is None: continue if column not in visited: visited.add(column) yield column for value in condition.values: if isinstance(value, ComparisonMixin): if value not in visited: visited.add(value) yield value
Yield all columns in the condition or its inner conditions. Unwraps proxies when the condition's column (or any of its values) include paths.
381,435
def get_collection(self, name): from sregistry.database.models import Collection return Collection.query.filter(Collection.name == name).first()
get a collection, if it exists, otherwise return None.
381,436
def GetRootKey(self): regf_key = self._regf_file.get_root_key() if not regf_key: return None return REGFWinRegistryKey(regf_key, key_path=self._key_path_prefix)
Retrieves the root key. Returns: WinRegistryKey: Windows Registry root key or None if not available.
381,437
async def request(self, method, url=None, *, path=, retries=1, connection_timeout=60, **kwargs): GETPOSTre really calling a partial method that has the argument pre-completed. timeoutheadersconnectionclose': sock._active = False await sock.close() except KeyError: pass await self.return_to_pool(sock) except ConnectionError as e: if retries > 0: retry = True retries -= 1 else: raise e except Exception as e: if sock: await self._handle_exception(e, sock) raise except BaseException as e: if sock: await sock.close() raise e if retry: return (await self.request(method, url, path=path, retries=retries, headers=headers, **kwargs)) return r
This is the template for all of the `http method` methods for the Session. Args: method (str): A http method, such as 'GET' or 'POST'. url (str): The url the request should be made to. path (str): An optional kw-arg for use in Session method calls, for specifying a particular path. Usually to be used in conjunction with the base_location/endpoint paradigm. kwargs: Any number of the following: data (dict or str): Info to be processed as a body-bound query. params (dict or str): Info to be processed as a url-bound query. headers (dict): User HTTP headers to be used in the request. encoding (str): The str representation of the codec to process the request under. json (dict): A dict to be formatted as json and sent in the request body. files (dict): A dict of `filename:filepath`s to be sent as multipart. cookies (dict): A dict of `name:value` cookies to be passed in request. callback (func): A callback function to be called on each bytechunk of of the response body. timeout (int or float): A numeric representation of the longest time to wait on a complete response once a request has been sent. retries (int): The number of attempts to try against connection errors. max_redirects (int): The maximum number of redirects allowed. persist_cookies (True or None): Passing True instantiates a CookieTracker object to manage the return of cookies to the server under the relevant domains. auth (child of AuthBase): An object for handling auth construction. When you call something like Session.get() or asks.post(), you're really calling a partial method that has the 'method' argument pre-completed.
381,438
def get_pgid(path, follow_symlinks=True): * if not os.path.exists(path): raise CommandExecutionError(.format(path)) if follow_symlinks and sys.getwindowsversion().major >= 6: path = _resolve_symlink(path) group_name = salt.utils.win_dacl.get_primary_group(path) return salt.utils.win_dacl.get_sid_string(group_name)
Return the id of the primary group that owns a given file (Windows only) This function will return the rarely used primary group of a file. This generally has no bearing on permissions unless intentionally configured and is most commonly used to provide Unix compatibility (e.g. Services For Unix, NFS services). Ensure you know what you are doing before using this function. Args: path (str): The path to the file or directory follow_symlinks (bool): If the object specified by ``path`` is a symlink, get attributes of the linked file instead of the symlink itself. Default is True Returns: str: The gid of the primary group CLI Example: .. code-block:: bash salt '*' file.get_pgid c:\\temp\\test.txt
381,439
def execute(self, args): _run_atstart() hook_name = os.path.basename(args[0]) if hook_name in self._hooks: try: self._hooks[hook_name]() except SystemExit as x: if x.code is None or x.code == 0: _run_atexit() raise _run_atexit() else: raise UnregisteredHookError(hook_name)
Execute a registered hook based on args[0]
381,440
def get_graph_data(self, graph, benchmark): if benchmark.get(): param_iter = enumerate(zip(itertools.product(*benchmark[]), graph.get_steps())) else: param_iter = [(None, (None, graph.get_steps()))] for j, (param, steps) in param_iter: if param is None: entry_name = benchmark[] else: entry_name = benchmark[] + .format(.join(param)) start_revision = self._get_start_revision(graph, benchmark, entry_name) threshold = self._get_threshold(graph, benchmark, entry_name) if start_revision is None: continue steps = [step for step in steps if step[1] >= start_revision] yield j, entry_name, steps, threshold
Iterator over graph data sets Yields ------ param_idx Flat index to parameter permutations for parameterized benchmarks. None if benchmark is not parameterized. entry_name Name for the data set. If benchmark is non-parameterized, this is the benchmark name. steps Steps to consider in regression detection. threshold User-specified threshold for regression detection.
381,441
def pop_all(self, priority=None): output = [] with self.lock: if not priority: priority = self.highest_entry() if priority: output = list(self.queue[priority].queue) self.queue[priority].queue.clear() return output
NON-BLOCKING POP ALL IN QUEUE, IF ANY
381,442
def create_lti_session(self, user_id, roles, realname, email, course_id, task_id, consumer_key, outcome_service_url, outcome_result_id, tool_name, tool_desc, tool_url, context_title, context_label): self._destroy_session() session_id = self._session.session_id self._session.lti = { "email": email, "username": user_id, "realname": realname, "roles": roles, "task": (course_id, task_id), "outcome_service_url": outcome_service_url, "outcome_result_id": outcome_result_id, "consumer_key": consumer_key, "context_title": context_title, "context_label": context_label, "tool_description": tool_desc, "tool_name": tool_name, "tool_url": tool_url } return session_id
Creates an LTI cookieless session. Returns the new session id
381,443
def _SetColour(self, value_list): if value_list is None: self._color = None return colors = [] for color in value_list: if color in terminal.SGR: colors.append(color) elif color in terminal.FG_COLOR_WORDS: colors += terminal.FG_COLOR_WORDS[color] elif color in terminal.BG_COLOR_WORDS: colors += terminal.BG_COLOR_WORDS[color] else: raise ValueError("Invalid colour specification.") self._color = list(set(colors))
Sets row's colour attributes to a list of values in terminal.SGR.
381,444
def n_chunks(self): return self._data_source.n_chunks(self.chunksize, stride=self.stride, skip=self.skip)
rough estimate of how many chunks will be processed
381,445
def equivalence_transform(compound, from_positions, to_positions, add_bond=True): warn( , DeprecationWarning) from mbuild.port import Port T = None if isinstance(from_positions, (list, tuple)) and isinstance(to_positions, (list, tuple)): equivalence_pairs = zip(from_positions, to_positions) elif isinstance(from_positions, Port) and isinstance(to_positions, Port): equivalence_pairs, T = _choose_correct_port(from_positions, to_positions) from_positions.used = True to_positions.used = True else: equivalence_pairs = [(from_positions, to_positions)] if not T: T = _create_equivalence_transform(equivalence_pairs) atom_positions = compound.xyz_with_ports atom_positions = T.apply_to(atom_positions) compound.xyz_with_ports = atom_positions if add_bond: if isinstance(from_positions, Port) and isinstance(to_positions, Port): if not from_positions.anchor or not to_positions.anchor: warn("Attempting to form bond from port that has no anchor") else: from_positions.anchor.parent.add_bond((from_positions.anchor, to_positions.anchor)) to_positions.anchor.parent.add_bond((from_positions.anchor, to_positions.anchor))
Computes an affine transformation that maps the from_positions to the respective to_positions, and applies this transformation to the compound. Parameters ---------- compound : mb.Compound The Compound to be transformed. from_positions : np.ndarray, shape=(n, 3), dtype=float Original positions. to_positions : np.ndarray, shape=(n, 3), dtype=float New positions.
381,446
def _get_all_files(filename_regex, path, base_dir, excluded_paths=None, excluded_filename_regex=None): def replace_backslashes(string): return string.replace(, ) excluded_paths = _normalize_excluded_paths(base_dir, excluded_paths) if excluded_paths: logger.info(, excluded_paths) logger.info(, filename_regex, os.path.join(base_dir, path)) if excluded_filename_regex: logger.info(, excluded_filename_regex) path_expression = re.compile(replace_backslashes(path)) target_files = [] for root, _, files in os.walk(base_dir): if not root.startswith(tuple(excluded_paths)) \ and path_expression.search(replace_backslashes(root)): for filename in files: filepath = os.path.join(root, filename) is_file, matched, excluded_filename, excluded_path = \ _set_match_parameters( filename, filepath, filename_regex, excluded_filename_regex, excluded_paths) if is_file and matched and not excluded_filename \ and not excluded_path: logger.debug(, filepath) target_files.append(filepath) return target_files
Get all files for processing. This starts iterating from `base_dir` and checks for all files that look like `filename_regex` under `path` regex excluding all paths under the `excluded_paths` list, whether they are files or folders. `excluded_paths` are explicit paths, not regex. `excluded_filename_regex` are files to be excluded as well.
381,447
def make_function_value_private(self, value, value_type, function): value = self.remove_quotes(value) if function == "base64": try: import base64 value = base64.b64decode(value).decode("utf-8") except TypeError as e: self.notify_user("base64(..) error %s" % str(e)) if not self.current_module: self.notify_user("%s(..) used outside of module or section" % function) return None module = self.current_module[-1].split()[0] if module in CONFIG_FILE_SPECIAL_SECTIONS + I3S_MODULE_NAMES: self.notify_user( "%s(..) cannot be used outside of py3status module " "configuration" % function ) return None value = self.value_convert(value, value_type) module_name = self.current_module[-1] return PrivateHide(value, module_name)
Wraps converted value so that it is hidden in logs etc. Note this is not secure just reduces leaking info Allows base 64 encode stuff using base64() or plain hide() in the config
381,448
def write(self, h, txt=, link=): "Output text in flowing mode" txt = self.normalize_text(txt) cw=self.current_font[] w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size s=txt.replace("\r",) nb=len(s) sep=-1 i=0 j=0 l=0 nl=1 while(i<nb): c=s[i] if(c=="\n"): self.cell(w,h,substr(s,j,i-j),0,2,,0,link) i+=1 sep=-1 j=i l=0 if(nl==1): self.x=self.l_margin w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size nl+=1 continue if(c==): sep=i if self.unifontsubset: l += self.get_string_width(c) / self.font_size*1000.0 else: l += cw.get(c,0) if(l>wmax): if(sep==-1): if(self.x>self.l_margin): self.x=self.l_margin self.y+=h w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size i+=1 nl+=1 continue if(i==j): i+=1 self.cell(w,h,substr(s,j,i-j),0,2,,0,link) else: self.cell(w,h,substr(s,j,sep-j),0,2,,0,link) i=sep+1 sep=-1 j=i l=0 if(nl==1): self.x=self.l_margin w=self.w-self.r_margin-self.x wmax=(w-2*self.c_margin)*1000.0/self.font_size nl+=1 else: i+=1 if(i!=j): self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,,0,link)
Output text in flowing mode
381,449
def unique_iter(src, key=None): if not is_iterable(src): raise TypeError( % type(src)) if key is None: key_func = lambda x: x elif callable(key): key_func = key elif isinstance(key, basestring): key_func = lambda x: getattr(x, key, x) else: raise TypeError( % key) seen = set() for i in src: k = key_func(i) if k not in seen: seen.add(k) yield i return
Yield unique elements from the iterable, *src*, based on *key*, in the order in which they first appeared in *src*. >>> repetitious = [1, 2, 3] * 10 >>> list(unique_iter(repetitious)) [1, 2, 3] By default, *key* is the object itself, but *key* can either be a callable or, for convenience, a string name of the attribute on which to uniqueify objects, falling back on identity when the attribute is not present. >>> pleasantries = ['hi', 'hello', 'ok', 'bye', 'yes'] >>> list(unique_iter(pleasantries, key=lambda x: len(x))) ['hi', 'hello', 'bye']
381,450
def to_boulderio(infile, outfile): seq_reader = sequences.file_reader(infile) f_out = utils.open_file_write(outfile) for sequence in seq_reader: print("SEQUENCE_ID=" + sequence.id, file=f_out) print("SEQUENCE_TEMPLATE=" + sequence.seq, file=f_out) print("=", file=f_out) utils.close(f_out)
Converts input sequence file into a "Boulder-IO format", as used by primer3
381,451
def _set_cluster(self, v, load=False): if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=cluster.cluster, is_container=, presence=False, yang_name="cluster", rest_name="cluster", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True) except (TypeError, ValueError): raise ValueError({ : , : "container", : , }) self.__cluster = t if hasattr(self, ): self._set()
Setter method for cluster, mapped from YANG variable /mgmt_cluster/cluster (container) If this variable is read-only (config: false) in the source YANG file, then _set_cluster is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cluster() directly.
381,452
def optional(name, default) -> : return Wildcard(min_count=1, fixed_size=True, variable_name=name, optional=default)
Create a `Wildcard` that matches a single argument with a default value. If the wildcard does not match, the substitution will contain the default value instead. Args: name: The name for the wildcard. default: The default value of the wildcard. Returns: A n optional wildcard.
381,453
def train(self, jsondocs, model_dir): modelUtil = ModelStorageUtil(model_dir) modelUtil.makedir() modelUtil.copy_settings(self.settings) nerdocs = [json_document_to_estner_document(jsondoc) for jsondoc in jsondocs] self.fex.prepare(nerdocs) self.fex.process(nerdocs) self.trainer.train(nerdocs, modelUtil.model_filename)
Train a NER model using given documents. Each word in the documents must have a "label" attribute, which denote the named entities in the documents. Parameters ---------- jsondocs: list of JSON-style documents. The documents used for training the CRF model. model_dir: str A directory where the model will be saved.
381,454
def extend_peaks(self, prop_thresh=50): temp_peaks = [i + 1200 for i in self.peaks["peaks"][0]] temp_peaks.extend([i - 1200 for i in self.peaks["peaks"][0]]) extended_peaks = [] extended_peaks.extend(self.peaks["peaks"][0]) for i in temp_peaks: nearest_ind = slope.find_nearest_index(self.peaks["peaks"][0], i) diff = abs(self.peaks["peaks"][0][nearest_ind] - i) diff = np.mod(diff, 1200) if diff > prop_thresh: extended_peaks.append(i) return extended_peaks
Each peak in the peaks of the object is checked for its presence in other octaves. If it does not exist, it is created. prop_thresh is the cent range within which the peak in the other octave is expected to be present, i.e., only if there is a peak within this cent range in other octaves, then the peak is considered to be present in that octave. Note that this does not change the peaks of the object. It just returns the extended peaks.
381,455
def mach2tas(Mach, H): a = vsound(H) Vtas = Mach*a return Vtas
Mach number to True Airspeed
381,456
def deploy(verbose, app): config = PsiturkConfig() config.load_config() config.set("Experiment Configuration", "mode", "deploy") config.set("Server Parameters", "logfile", "-") config.set("Shell Parameters", "launch_in_sandbox_mode", "false") deploy_sandbox_shared_setup(verbose=verbose, app=app)
Deploy app using Heroku to MTurk.
381,457
def reduce(self, dimensions=[], function=None, spreadfn=None, **reductions): if any(dim in self.vdims for dim in dimensions): raise Exception("Reduce cannot be applied to value dimensions") function, dims = self._reduce_map(dimensions, function, reductions) dims = [d for d in self.kdims if d not in dims] return self.aggregate(dims, function, spreadfn)
Applies reduction along the specified dimension(s). Allows reducing the values along one or more key dimension with the supplied function. Supports two signatures: Reducing with a list of dimensions, e.g.: ds.reduce(['x'], np.mean) Defining a reduction using keywords, e.g.: ds.reduce(x=np.mean) Args: dimensions: Dimension(s) to apply reduction on Defaults to all key dimensions function: Reduction operation to apply, e.g. numpy.mean spreadfn: Secondary reduction to compute value spread Useful for computing a confidence interval, spread, or standard deviation. **reductions: Keyword argument defining reduction Allows reduction to be defined as keyword pair of dimension and function Returns: The Dataset after reductions have been applied.
381,458
def all_files(models=[]): rmodels def nsort(a, b): fa = os.path.basename(a).split() fb = os.path.basename(b).split() elements_to_remove = [] assert len(fa) == len(fb) for i in range(0, len(fa)): if fa[i] == fb[i]: elements_to_remove.append(fa[i]) for e in elements_to_remove: fa.remove(e) fb.remove(e) assert len(fa) == len(fb) assert len(fa) == 1 fa = keep_only_digits(fa[0]) fb = keep_only_digits(fb[0]) if fa < fb: return -1 if fa == fb: return 0 if fa > fb: return 1 base = list(map(lambda x: os.path.abspath(x), maybe_inspect_zip(models))) base.sort(cmp=nsort) return base
r''' Return a list of full path of files matching 'models', sorted in human numerical order (i.e., 0 1 2 ..., 10 11 12, ..., 100, ..., 1000). Files are supposed to be named identically except one variable component e.g. the list, test.weights.e5.lstm1200.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm800.ldc93s1.pb gets sorted: test.weights.e5.lstm800.ldc93s1.pb test.weights.e5.lstm1000.ldc93s1.pb test.weights.e5.lstm1200.ldc93s1.pb
381,459
def verifyUpdatewcs(fname): updated = True numsci,extname = count_sci_extensions(fname) for n in range(1,numsci+1): hdr = fits.getheader(fname, extname=extname, extver=n, memmap=False) if not in hdr: updated = False break return updated
Verify the existence of WCSNAME in the file. If it is not present, report this to the user and raise an exception. Returns True if WCSNAME was found in all SCI extensions.
381,460
def make_action(self, fn, schema_parser, meta): validate_input = validate_output = None if "$input" in meta: with MarkKey("$input"): validate_input = schema_parser.parse(meta["$input"]) if "$output" in meta: with MarkKey("$output"): validate_output = schema_parser.parse(meta["$output"]) def action(data): if validate_input: try: data = validate_input(data) except Invalid as ex: return abort(400, "InvalidData", str(ex)) if isinstance(data, dict): rv = fn(**data) else: rv = fn(data) else: rv = fn() rv, status, headers = unpack(rv) if validate_output: try: rv = validate_output(rv) except Invalid as ex: return abort(500, "ServerError", str(ex)) return rv, status, headers return action
Make resource's method an action Validate input, output by schema in meta. If no input schema, call fn without params. If no output schema, will not validate return value. Args: fn: resource's method schema_parser: for parsing schema in meta meta: meta data of the action
381,461
def get_platform(): platform_data = { : None, : None } os_name = platform.system() normalize_os = { : , : , : } if os_name in normalize_os.keys(): platform_data[] = normalize_os[os_name] else: raise Exception(.format(os_name)) maxsize = sys.maxsize if maxsize == EXPECTED_MAXSIZE_32: platform_data[] = elif maxsize == EXPECTED_MAXSIZE_64: platform_data[] = else: platform_data[] = logger.warning() return platform_data
Get the current platform data. Returns a dictionary with keys: `os_name`, `os_bits`
381,462
def index(self, value, start=0, end=None): try: index = self._dict[value] except KeyError: raise ValueError else: start = self._fix_neg_index(start) end = self._fix_end_index(end) if start <= index and index < end: return index else: raise ValueError
Return the index of value between start and end. By default, the entire setlist is searched. This runs in O(1) Args: value: The value to find the index of start (int): The index to start searching at (defaults to 0) end (int): The index to stop searching at (defaults to the end of the list) Returns: int: The index of the value Raises: ValueError: If the value is not in the list or outside of start - end IndexError: If start or end are out of range
381,463
def fromDataFrameRDD(cls, rdd, sql_ctx): result = DataFrame(None, sql_ctx) return result.from_rdd_of_dataframes(rdd)
Construct a DataFrame from an RDD of DataFrames. No checking or validation occurs.
381,464
def load_vm_uuid_by_name(self, si, vcenter_data_model, vm_name): path = VMLocation.combine([vcenter_data_model.default_datacenter, vm_name]) paths = path.split() name = paths[len(paths) - 1] path = VMLocation.combine(paths[:len(paths) - 1]) vm = self.pv_service.find_vm_by_name(si, path, name) if not vm: raise ValueError(.format(path, name)) if isinstance(vm, vim.VirtualMachine): return vm.config.uuid raise ValueError(.format(path, name))
Returns the vm uuid :param si: Service instance to the vcenter :param vcenter_data_model: vcenter data model :param vm_name: the vm name :return: str uuid
381,465
def do_wordwrap(s, width=79, break_long_words=True): import textwrap return u.join(textwrap.wrap(s, width=width, expand_tabs=False, replace_whitespace=False, break_long_words=break_long_words))
Return a copy of the string passed to the filter wrapped after ``79`` characters. You can override this default using the first parameter. If you set the second parameter to `false` Jinja will not split words apart if they are longer than `width`.
381,466
def upgrade_plan_list(self, subid, params=None): params = update_params(params, {: subid}) return self.request(, params, )
/v1/server/upgrade_plan_list GET - account Retrieve a list of the VPSPLANIDs for which a virtual machine can be upgraded. An empty response array means that there are currently no upgrades available. Link: https://www.vultr.com/api/#server_upgrade_plan_list
381,467
def _GetDataStreams(self): if self._data_streams is None: if self._directory is None: self._directory = self._GetDirectory() self._data_streams = [] if not self._directory and not self.link: data_stream = DataStream() self._data_streams.append(data_stream) return self._data_streams
Retrieves the data streams. Returns: list[DataStream]: data streams.
381,468
def set_options_values(self, options, parse=False, strict=False): if strict: for opt_name in options.keys(): if not self.has_option(opt_name): raise ValueError(" is not a option of the component" % opt_name) elif self.option_is_hidden(opt_name): raise ValueError(" is hidden, you can't set it" % opt_name) for opt_name, opt in self._options.items(): if opt.hidden: continue if opt_name in options: opt.set(options[opt_name], parse=parse)
Set the options from a dict of values (in string). :param option_values: the values of options (in format `{"opt_name": "new_value"}`) :type option_values: dict :param parse: whether to parse the given value :type parse: bool :param strict: if True the given `option_values` dict should only contains existing options (no other key) :type strict: bool
381,469
def write_reaction(self, value_dict): con = self.connection or self._connect() self._initialize(con) cur = con.cursor() ase_ids = value_dict[] energy_corrections = value_dict.get(, {}) key_list = get_key_list(start_index=1) values = [value_dict[key] for key in key_list] key_str = get_key_str(, start_index=1) value_str = get_value_str(values) insert_command = \ \ .format(key_str, value_str) cur.execute(insert_command) id = cur.fetchone()[0] reaction_system_values = [] for name, ase_id in ase_ids.items(): if name in energy_corrections: energy_correction = energy_corrections[name] else: energy_correction = 0 reaction_system_values += [tuple([name, energy_correction, ase_id, id])] key_str = get_key_str() insert_command = .format(key_str) execute_values(cur=cur, sql=insert_command, argslist=reaction_system_values, page_size=1000) if self.connection is None: con.commit() con.close() return id
Write to reaction_system tables
381,470
def kill_current_session(ctx: Context_T) -> None: ctx_id = context_id(ctx) if ctx_id in _sessions: del _sessions[ctx_id]
Force kill current session of the given context, despite whether it is running or not. :param ctx: message context
381,471
def register_postparsing_hook(self, func: Callable[[plugin.PostparsingData], plugin.PostparsingData]) -> None: self._validate_postparsing_callable(func) self._postparsing_hooks.append(func)
Register a function to be called after parsing user input but before running the command
381,472
def feed(self, pred, label): assert pred.shape == label.shape, "{} != {}".format(pred.shape, label.shape) self.nr_pos += (label == 1).sum() self.nr_neg += (label == 0).sum() self.nr_pred_pos += (pred == 1).sum() self.nr_pred_neg += (pred == 0).sum() self.corr_pos += ((pred == 1) & (pred == label)).sum() self.corr_neg += ((pred == 0) & (pred == label)).sum()
Args: pred (np.ndarray): binary array. label (np.ndarray): binary array of the same size.
381,473
def policy_exists(policy_name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: conn.get_policy(_get_policy_arn(policy_name, region=region, key=key, keyid=keyid, profile=profile)) return True except boto.exception.BotoServerError: return False
Check to see if policy exists. CLI Example: .. code-block:: bash salt myminion boto_iam.instance_profile_exists myiprofile
381,474
def fft_coefficient(self, x, param=None): if param is None: param = [{: , : 44}, {: , : 63}, {: , : 0}, {: , : 0}, {: , : 23}] _fft_coef = feature_calculators.fft_coefficient(x, param) logging.debug("fft coefficient by tsfresh calculated") return list(_fft_coef)
As in tsfresh `fft_coefficient <https://github.com/blue-yonder/tsfresh/blob/master/tsfresh/feature_extraction/\ feature_calculators.py#L852>`_ \ Calculates the fourier coefficients of the one-dimensional discrete Fourier Transform for real input by fast \ fourier transformation algorithm .. math:: A_k = \\sum_{m=0}^{n-1} a_m \\exp \\left \\{ -2 \\pi i \\frac{m k}{n} \\right \\}, \\qquad k = 0, \\ldots , n-1. The resulting coefficients will be complex, this feature calculator can return the real part (attr=="real"), \ the imaginary part (attr=="imag), the absolute value (attr=""abs) and the angle in degrees (attr=="angle). :param x: the time series to calculate the feature of :type x: pandas.Series :param param: contains dictionaries {"coeff": x, "attr": s} with x int and x >= 0, s str and in ["real", "imag"\ , "abs", "angle"] :type param: list :return: the different feature values :rtype: pandas.Series
381,475
def handle_stdin_request(self, timeout=0.1): msg_rep = self.km.stdin_channel.get_msg(timeout=timeout) self.handle_iopub() if self.session_id == msg_rep["parent_header"].get("session"): real_handler = signal.getsignal(signal.SIGINT) def double_int(sig,frame): real_handler(sig,frame) raise KeyboardInterrupt signal.signal(signal.SIGINT, double_int) try: raw_data = raw_input(msg_rep["content"]["prompt"]) except EOFError: raw_data = except KeyboardInterrupt: sys.stdout.write() return finally: signal.signal(signal.SIGINT, real_handler) if not (self.km.stdin_channel.msg_ready() or self.km.shell_channel.msg_ready()): self.km.stdin_channel.input(raw_data)
Method to capture raw_input
381,476
def modify_content(request, page_id, content_type, language_id): page = get_object_or_404(Page, pk=page_id) perm = request.user.has_perm() if perm and request.method == : content = request.POST.get(, False) if not content: raise Http404 page = Page.objects.get(pk=page_id) if settings.PAGE_CONTENT_REVISION: Content.objects.create_content_if_changed(page, language_id, content_type, content) else: Content.objects.set_or_create_content(page, language_id, content_type, content) page.invalidate() page.save() return HttpResponse() raise Http404
Modify the content of a page.
381,477
def query_by_postid(postid, limit=5): recs = TabPostHist.select().where( TabPostHist.post_id == postid ).order_by( TabPostHist.time_update.desc() ).limit(limit) return recs
Query history of certian records.
381,478
def dump_viewset(viewset_class, root_folder, folder_fn=lambda i: ".", sample_size=None): if os.path.exists(root_folder): shutil.rmtree(root_folder) os.makedirs(root_folder) vs = viewset_class() vs.request = rf.get() serializer_class = vs.get_serializer_class() serializer = serializer_class(context={: vs.request, : , : vs}) renderer = PrettyJSONRenderer() bar = progressbar.ProgressBar() for instance in bar(vs.get_queryset()[:sample_size]): dct = serializer.to_representation(instance) content = renderer.render(dct) folder = os.path.join(root_folder, folder_fn(instance)) if not os.path.exists(folder): os.makedirs(folder) filename = "%s.json" % instance.slug f = file(os.path.join(folder, filename), ) f.write(content) f.close()
Dump the contents of a rest-api queryset to a folder structure. :param viewset_class: A rest-api viewset to iterate through :param root_folder: The root folder to write results to. :param folder_fn: A function to generate a subfolder name for the instance. :param sample_size: Number of items to process, for test purposes. :return:
381,479
def sdiv(a, b): if len(a) != len(b): raise ValueError() idx = 0 ret = matrix(0, (len(a), 1), ) for m, n in zip(a, b): try: ret[idx] = m / n except ZeroDivisionError: ret[idx] = 1 finally: idx += 1 return ret
Safe division: if a == b == 0, sdiv(a, b) == 1
381,480
def stepper_step(self, motor_speed, number_of_steps): task = asyncio.ensure_future(self.core.stepper_step(motor_speed, number_of_steps)) self.loop.run_until_complete(task)
Move a stepper motor for the number of steps at the specified speed This is a FirmataPlus feature. :param motor_speed: 21 bits of data to set motor speed :param number_of_steps: 14 bits for number of steps & direction positive is forward, negative is reverse
381,481
def absolute(requestContext, seriesList): for series in seriesList: series.name = "absolute(%s)" % (series.name) series.pathExpression = series.name for i, value in enumerate(series): series[i] = safeAbs(value) return seriesList
Takes one metric or a wildcard seriesList and applies the mathematical abs function to each datapoint transforming it to its absolute value. Example:: &target=absolute(Server.instance01.threads.busy) &target=absolute(Server.instance*.threads.busy)
381,482
def _create_gates(self, inputs, memory): num_gates = 2 * self._calculate_gate_size() memory = tf.tanh(memory) inputs = basic.BatchFlatten()(inputs) gate_inputs = basic.BatchApply(basic.Linear(num_gates), n_dims=1)(inputs) gate_inputs = tf.expand_dims(gate_inputs, axis=1) gate_memory = basic.BatchApply(basic.Linear(num_gates))(memory) gates = tf.split(gate_memory + gate_inputs, num_or_size_splits=2, axis=2) input_gate, forget_gate = gates input_gate = tf.sigmoid(input_gate + self._input_bias) forget_gate = tf.sigmoid(forget_gate + self._forget_bias) return input_gate, forget_gate
Create input and forget gates for this step using `inputs` and `memory`. Args: inputs: Tensor input. memory: The current state of memory. Returns: input_gate: A LSTM-like insert gate. forget_gate: A LSTM-like forget gate.
381,483
def light2mass_interpol(lens_light_model_list, kwargs_lens_light, numPix=100, deltaPix=0.05, subgrid_res=5, center_x=0, center_y=0): x_grid_sub, y_grid_sub = util.make_grid(numPix=numPix*5, deltapix=deltaPix, subgrid_res=subgrid_res) import lenstronomy.Util.mask as mask_util mask = mask_util.mask_sphere(x_grid_sub, y_grid_sub, center_x, center_y, r=1) x_grid, y_grid = util.make_grid(numPix=numPix, deltapix=deltaPix) lightModel = LightModel(light_model_list=lens_light_model_list) flux = lightModel.surface_brightness(x_grid_sub, y_grid_sub, kwargs_lens_light) flux_norm = np.sum(flux[mask == 1]) / np.sum(mask) flux /= flux_norm from lenstronomy.LensModel.numerical_profile_integrals import ConvergenceIntegrals integral = ConvergenceIntegrals() convergence_sub = flux f_x_sub, f_y_sub = integral.deflection_from_kappa(convergence_sub, x_grid_sub, y_grid_sub, deltaPix=deltaPix/float(subgrid_res)) f_sub = integral.potential_from_kappa(convergence_sub, x_grid_sub, y_grid_sub, deltaPix=deltaPix/float(subgrid_res)) x_axes_sub, y_axes_sub = util.get_axes(x_grid_sub, y_grid_sub) from lenstronomy.LensModel.Profiles.interpol import Interpol interp_func = Interpol() interp_func.do_interp(x_axes_sub, y_axes_sub, f_sub, f_x_sub, f_y_sub) x_axes, y_axes = util.get_axes(x_grid, y_grid) f_ = interp_func.function(x_grid, y_grid) f_x, f_y = interp_func.derivatives(x_grid, y_grid) from lenstronomy.LensModel.numeric_lens_differentials import NumericLens lens_differential = NumericLens(lens_model_list=[]) kwargs = [{: x_axes_sub, : y_axes_sub, : f_sub, : f_x_sub, : f_y_sub}] f_xx, f_xy, f_yx, f_yy = lens_differential.hessian(x_grid, y_grid, kwargs) kwargs_interpol = {: x_axes, : y_axes, : util.array2image(f_), : util.array2image(f_x), : util.array2image(f_y), : util.array2image(f_xx), : util.array2image(f_xy), : util.array2image(f_yy)} return kwargs_interpol
takes a lens light model and turns it numerically in a lens model (with all lensmodel quantities computed on a grid). Then provides an interpolated grid for the quantities. :param kwargs_lens_light: lens light keyword argument list :param numPix: number of pixels per axis for the return interpolation :param deltaPix: interpolation/pixel size :param center_x: center of the grid :param center_y: center of the grid :param subgrid: subgrid for the numerical integrals :return:
381,484
def get_queue(cls, name, priority=0, **fields_if_new): queue_kwargs = {: name, : priority} retries = 0 while retries < 10: retries += 1 try: queue, created = cls.get_or_connect(**queue_kwargs) except IndexError: continue except ValueError: try: queue = cls.collection(**queue_kwargs).instances()[0] except IndexError: continue else: created = False break if created and fields_if_new: queue.set_fields(**fields_if_new) return queue
Get, or create, and return the wanted queue. If the queue is created, fields in fields_if_new will be set for the new queue.
381,485
def destroy(self): try: self.lock() _logger.debug("Unlocked connection %r to close.", self.container_id) self._close() finally: self.release() uamqp._Platform.deinitialize()
Close the connection, and close any associated CBS authentication session.
381,486
def _fill_levenshtein_table(self, first, second, update_func, add_pred, clear_pred, threshold=None): m, n = len(first), len(second) if threshold is None: threshold = 0.0 for a, b in zip(first, second): threshold += self.get_operation_cost(a, b) if m > n: for a in first[n: ]: threshold += self.get_operation_cost(a, ) elif m < n: for b in second[m: ]: threshold += self.get_operation_cost(, b) threshold *= 2 costs = np.zeros(shape=(m + 1, n + 1), dtype=np.float64) costs[:] = np.inf backtraces = [None] * (m + 1) for i in range(m + 1): backtraces[i] = [[] for j in range(n + 1)] costs[0][0] = 0.0 for i in range(m + 1): for i_right in range(i, min(i + self.max_up_length, m) + 1): up = first[i: i_right] max_low_length = self.max_low_lengths_by_up.get(up, -1) if max_low_length == -1: continue up_costs = self.operation_costs[up] for j in range(n + 1): if costs[i][j] > threshold: continue if len(backtraces[i][j]) == 0 and i + j > 0: continue for j_right in range((j if i_right > i else j + 1), min(j + max_low_length, n) + 1): low = second[j: j_right] curr_cost = up_costs.get(low, np.inf) old_cost = costs[i_right][j_right] new_cost = costs[i][j] + curr_cost if new_cost > threshold: continue if add_pred(new_cost, old_cost): if clear_pred(new_cost, old_cost): backtraces[i_right][j_right] = [] costs[i_right][j_right] = update_func(new_cost, old_cost) backtraces[i_right][j_right].append((i, j)) return costs, backtraces
Функция, динамически заполняющая таблицу costs стоимости трансдукций, costs[i][j] --- минимальная стоимость трансдукции, переводящей first[:i] в second[:j] Аргументы: ---------- first, second : string Верхний и нижний элементы трансдукции update_func : callable, float*float -> bool update_func(x, y) возвращает новое значение в ячейке таблицы costs, если старое значение --- y, а потенциально новое значение --- x везде update_func = min add_pred : callable : float*float -> bool add_pred(x, y) возвращает, производится ли добавление нового элемента p стоимости x в ячейку backtraces[i][j] в зависимости от значения costs[i][j]=y и текущей стоимости x clear_pred : callable : float*float -> bool clear_pred(x, y) возвращает, производится ли очистка ячейки backtraces[i][j] в зависимости от значения costs[i][j]=y и текущей стоимости x элемента p, добавляемого в эту ячейку Возвращает: ----------- costs : array, dtype=float, shape=(len(first)+1, len(second)+1) массив, в ячейке с индексами i, j которого хранится минимальная стоимость трансдукции, переводящей first[:i] в second[:j] backtraces : array, dtype=list, shape=(len(first)+1, len(second)+1) массив, в ячейке с индексами i, j которого хранятся обратные ссылки на предыдущую ячейку в оптимальной трансдукции, приводящей в ячейку backtraces[i][j]
381,487
def helioX(self,*args,**kwargs): out= self._orb.helioX(*args,**kwargs) if len(out) == 1: return out[0] else: return out
NAME: helioX PURPOSE: return Heliocentric Galactic rectangular x-coordinate (aka "X") INPUT: t - (optional) time at which to get X (can be Quantity) obs=[X,Y,Z] - (optional) position of observer in the Galactocentric frame (in kpc and km/s) (default=[8.0,0.,0.]; entries can be Quantity) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= (Object-wide default) physical scale for distances to use to convert (can be Quantity) OUTPUT: helioX(t) in kpc HISTORY: 2011-02-24 - Written - Bovy (NYU)
381,488
def processors(self): return self.preprocessors + list(reversed(self.compilers)) + self.postprocessors
The list of all processors (preprocessors, compilers, postprocessors) used to build asset.
381,489
def decode(string, base): base = int(base) code_string = get_code_string(base) result = 0 if base == 16: string = string.lower() while len(string) > 0: result *= base result += code_string.find(string[0]) string = string[1:] return result
Given a string (string) and a numeric base (base), decode the string into an integer. Returns the integer
381,490
def get_github_login(self, user, rol, commit_hash, repo): login = None try: login = self.github_logins[user] except KeyError: GITHUB_API_URL = "https://api.github.com" commit_url = GITHUB_API_URL + "/repos/%s/commits/%s" % (repo, commit_hash) headers = {: + self.github_token} r = self.requests.get(commit_url, headers=headers) try: r.raise_for_status() except requests.exceptions.ConnectionError as ex: logger.error("CanX-RateLimit-RemainingX-RateLimit-Resett find commit %s %s", commit_url, ex) return login commit_json = r.json() author_login = None if in commit_json and commit_json[]: author_login = commit_json[][] else: self.github_logins_author_not_found += 1 user_login = None if in commit_json and commit_json[]: user_login = commit_json[][] else: self.github_logins_committer_not_found += 1 if rol == "author": login = author_login elif rol == "committer": login = user_login else: logger.error("Wrong rol: %s" % (rol)) raise RuntimeError self.github_logins[user] = login logger.debug("%s is %s in github (not found %i authors %i committers )", user, login, self.github_logins_author_not_found, self.github_logins_committer_not_found) return login
rol: author or committer
381,491
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_admin_status(self, **kwargs): config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop() fcoe_intf_admin_status = ET.SubElement(fcoe_intf_list, "fcoe-intf-admin-status") fcoe_intf_admin_status.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
381,492
def execute(mp): with mp.open("file1", resampling="bilinear") as raster_file: if raster_file.is_empty(): return "empty" dem = raster_file.read() return dem
Example process for testing. Inputs: ------- file1 raster file Parameters: ----------- Output: ------- np.ndarray
381,493
def hide_routemap_holder_route_map_content_match_metric_metric_rmm(self, **kwargs): config = ET.Element("config") hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy") route_map = ET.SubElement(hide_routemap_holder, "route-map") name_key = ET.SubElement(route_map, "name") name_key.text = kwargs.pop() action_rm_key = ET.SubElement(route_map, "action-rm") action_rm_key.text = kwargs.pop() instance_key = ET.SubElement(route_map, "instance") instance_key.text = kwargs.pop() content = ET.SubElement(route_map, "content") match = ET.SubElement(content, "match") metric = ET.SubElement(match, "metric") metric_rmm = ET.SubElement(metric, "metric-rmm") metric_rmm.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
381,494
def create_from_tuples(self, tuples, **args): amap = {} subject_label_map = {} for a in tuples: subj = a[0] subject_label_map[subj] = a[1] if subj not in amap: amap[subj] = [] amap[subj].append(a[2]) aset = AssociationSet(subject_label_map=subject_label_map, association_map=amap, **args) return aset
Creates from a list of (subj,subj_name,obj) tuples
381,495
def prepare_queues(queues, lock): for queue in queues: queue._pebble_lock = lock with queue.mutex: queue._pebble_old_method = queue._put queue._put = MethodType(new_method, queue)
Replaces queue._put() method in order to notify the waiting Condition.
381,496
def _BuildMessageFromTypeName(type_name, descriptor_pool): from google.protobuf import symbol_database database = symbol_database.Default() try: message_descriptor = descriptor_pool.FindMessageTypeByName(type_name) except KeyError: return None message_type = database.GetPrototype(message_descriptor) return message_type()
Returns a protobuf message instance. Args: type_name: Fully-qualified protobuf message type name string. descriptor_pool: DescriptorPool instance. Returns: A Message instance of type matching type_name, or None if the a Descriptor wasn't found matching type_name.
381,497
def add_arguments(self, parser): test_command = TestCommand() test_command.add_arguments(parser) for option in OPTIONS: parser.add_argument(*option[0], **option[1])
Command line arguments for Django 1.8+
381,498
def load(self): self.meta.resolved_path = self.find_data(self.meta.path) if not self.meta.resolved_path: raise ImproperlyConfigured("Data file not found".format(self.meta.path)) print("Loading:", self.meta.path) with open(self.meta.resolved_path, ) as fd: return fd.read()
Load a file in text mode
381,499
def fit(self, X, y, groups=None, **fit_params): self.scorer_ = check_scoring(self.estimator, scoring=self.scoring) if pandas_available and isinstance(X, pd.DataFrame): self.scorer_ = self._wrap_scorer(self.scorer_, X.columns) if self.cv != "prefit" and self.refit: self.estimator_ = clone(self.estimator) self.estimator_.fit(X, y, **fit_params) X = check_array(X) if self.cv not in (None, "prefit"): si = self._cv_scores_importances(X, y, groups=groups, **fit_params) else: si = self._non_cv_scores_importances(X, y) scores, results = si self.scores_ = np.array(scores) self.results_ = results self.feature_importances_ = np.mean(results, axis=0) self.feature_importances_std_ = np.std(results, axis=0) return self
Compute ``feature_importances_`` attribute and optionally fit the base estimator. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like, shape (n_samples,) The target values (integers that correspond to classes in classification, real numbers in regression). groups : array-like, with shape (n_samples,), optional Group labels for the samples used while splitting the dataset into train/test set. **fit_params : Other estimator specific parameters Returns ------- self : object Returns self.