Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
6,400
def section(node): title = if node.first_child is not None: if node.first_child.t == u: title = node.first_child.first_child.literal o = nodes.section(ids=[title], names=[title]) for n in MarkDown(node): o += n return o
A section in reStructuredText, which needs a title (the first child) This is a custom type
6,401
def data(self, column, role): return self.columns[column](self._atype, role)
Return the data for the specified column and role The column addresses one attribute of the data. :param column: the data column :type column: int :param role: the data role :type role: QtCore.Qt.ItemDataRole :returns: data depending on the role :rtype: :raises: None
6,402
def cookies(self, url): part = urlparse(url) _domain = part.hostname cookie_dict = {} now = utc_now() for _, a in list(self.cookiejar._cookies.items()): for _, b in a.items(): for cookie in list(b.values()): if cookie.expires and cookie.expires <= now: continue if not re.search("%s$" % cookie.domain, _domain): continue if not re.match(cookie.path, part.path): continue cookie_dict[cookie.name] = cookie.value return cookie_dict
Return cookies that are matching the path and are still valid :param url: :return:
6,403
def get_or_create_stream(self, stream_id, try_create=True): stream_id = get_stream_id(stream_id) if stream_id in self.streams: logging.debug("found {}".format(stream_id)) return self.streams[stream_id] elif try_create: logging.debug("creating {}".format(stream_id)) return self.create_stream(stream_id=stream_id)
Helper function to get a stream or create one if it's not already defined :param stream_id: The stream id :param try_create: Whether to try to create the stream if not found :return: The stream object
6,404
def _fill_role_cache(self, principal, overwrite=False): if not self.app_state.use_cache: return None if not self._has_role_cache(principal) or overwrite: self._set_role_cache(principal, self._all_roles(principal)) return self._role_cache(principal)
Fill role cache for `principal` (User or Group), in order to avoid too many queries when checking role access with 'has_role'. Return role_cache of `principal`
6,405
def authenticate_client_id(self, client_id, request, *args, **kwargs): if client_id is None: client_id, _ = self._get_client_creds_from_request(request) log.debug(, client_id) client = request.client or self._clientgetter(client_id) if not client: log.debug() return False request.client = client return True
Authenticate a non-confidential client. :param client_id: Client ID of the non-confidential client :param request: The Request object passed by oauthlib
6,406
def select_template_from_string(arg): if in arg: tpl = loader.select_template( [tn.strip() for tn in arg.split()]) else: tpl = loader.get_template(arg) return tpl
Select a template from a string, which can include multiple template paths separated by commas.
6,407
def unshare_me(self, keys=None, auto_update=False, draw=None, update_other=True): auto_update = auto_update or not self.no_auto_update keys = self._set_sharing_keys(keys) to_update = [] for key in keys: fmto = getattr(self, key) try: other_fmto = self._shared.pop(key) except KeyError: pass else: other_fmto.shared.remove(fmto) if update_other: other_fmto.plotter._register_update( force=[other_fmto.key]) to_update.append(other_fmto.plotter) self.update(force=keys, draw=draw, auto_update=auto_update) if update_other and auto_update: for plotter in to_update: plotter.start_update(draw=draw)
Close the sharing connection of this plotter with others This method undoes the sharing connections made by the :meth:`share` method and release this plotter again. Parameters ---------- keys: string or iterable of strings The formatoptions to unshare, or group names of formatoptions to unshare all formatoptions of that group (see the :attr:`fmt_groups` property). If None, all formatoptions of this plotter are unshared. %(InteractiveBase.start_update.parameters.draw)s %(InteractiveBase.update.parameters.auto_update)s See Also -------- share, unshare
6,408
def load_plugins(self): from dyndnsc.plugins.builtin import PLUGINS for plugin in PLUGINS: self.add_plugin(plugin()) super(BuiltinPluginManager, self).load_plugins()
Load plugins from `dyndnsc.plugins.builtin`.
6,409
def get_authenticated_user(self, callback): args = dict((k, v) for k, v in request.args.items()) args["openid.mode"] = u"check_authentication" r = requests.post(self._OPENID_ENDPOINT, data=args) return self._on_authentication_verified(callback, r)
Fetches the authenticated user data upon redirect. This method should be called by the handler that receives the redirect from the authenticate_redirect() or authorize_redirect() methods.
6,410
def visit_literal_block(self, node): language = node.get(, None) is_code_node = False if not language: is_code_node = True classes = node.get() if in classes: language = classes[-1] else: return if language in self.ignore[]: return if language == or ( language == and node.rawsource.lstrip().startswith()): self.visit_doctest_block(node) raise docutils.nodes.SkipNode checker = { : bash_checker, : c_checker, : cpp_checker, : lambda source, _: lambda: check_json(source), : lambda source, _: lambda: check_xml(source), : lambda source, _: lambda: check_python(source), : lambda source, _: lambda: check_rst(source, ignore=self.ignore) }.get(language) if checker: run = checker(node.rawsource, self.working_directory) self._add_check(node=node, run=run, language=language, is_code_node=is_code_node) raise docutils.nodes.SkipNode
Check syntax of code block.
6,411
def check_model_permission(self, app, model): if self.apps_dict.get(app, False) and model in self.apps_dict[app][]: return True return False
Checks if model is listed in apps_dict Since apps_dict is derived from the app_list given by django admin, it lists only the apps and models the user can view
6,412
def mergeAllLayers(self): start = time.time() while(len(self.layers)>1): self.mergeBottomLayers() print(+str(time.time()-start)) return self.layers[0]
Merge all the layers together. :rtype: The result :py:class:`Layer` object.
6,413
def addObject(self, object, name=None): if name is None: name = len(self.objects) self.objects[name] = object
Adds an object to the Machine. Objects should be PhysicalObjects.
6,414
def eval_in_new(cls, expr, *args, **kwargs): ctx = cls(*args, **kwargs) ctx.env.rec_new(expr) return ctx.eval(expr)
:meth:`eval` an expression in a new, temporary :class:`Context`. This should be safe to use directly on user input. Args: expr (LispVal): The expression to evaluate. *args: Args for the :class:`Context` constructor. **kwargs: Kwargs for the :class:`Context` constructor.
6,415
def _on_merge(self, other): self._inputs.extend(other._inputs) self._outputs.extend(other._outputs) if self._sensitivity is not None: self._sensitivity.extend(other._sensitivity) else: assert other._sensitivity is None if self._enclosed_for is not None: self._enclosed_for.update(other._enclosed_for) else: assert other._enclosed_for is None other_was_top = other.parentStm is None if other_was_top: other._get_rtl_context().statements.remove(other) for s in other._inputs: s.endpoints.discard(other) s.endpoints.append(self) for s in other._outputs: s.drivers.discard(other) s.drivers.append(self)
After merging statements update IO, sensitivity and context :attention: rank is not updated
6,416
def systematic_resample(weights): N = len(weights) positions = (random() + np.arange(N)) / N indexes = np.zeros(N, ) cumulative_sum = np.cumsum(weights) i, j = 0, 0 while i < N: if positions[i] < cumulative_sum[j]: indexes[i] = j i += 1 else: j += 1 return indexes
Performs the systemic resampling algorithm used by particle filters. This algorithm separates the sample space into N divisions. A single random offset is used to to choose where to sample from for all divisions. This guarantees that every sample is exactly 1/N apart. Parameters ---------- weights : list-like of float list of weights as floats Returns ------- indexes : ndarray of ints array of indexes into the weights defining the resample. i.e. the index of the zeroth resample is indexes[0], etc.
6,417
def mass_enclosed_3d(self, r, kwargs_profile): kwargs = copy.deepcopy(kwargs_profile) try: del kwargs[] del kwargs[] except: pass out = integrate.quad(lambda x: self._profile.density(x, **kwargs)*4*np.pi*x**2, 0, r) return out[0]
computes the mass enclosed within a sphere of radius r :param r: radius (arcsec) :param kwargs_profile: keyword argument list with lens model parameters :return: 3d mass enclosed of r
6,418
async def try_sending(self,msg,timeout_secs, max_attempts): if timeout_secs is None: timeout_secs = self.timeout if max_attempts is None: max_attempts = self.retry_count attempts = 0 while attempts < max_attempts: if msg.seq_num not in self.message: return event = aio.Event() self.message[msg.seq_num][1]= event attempts += 1 if self.transport: self.transport.sendto(msg.packed_message) try: myresult = await aio.wait_for(event.wait(),timeout_secs) break except Exception as inst: if attempts >= max_attempts: if msg.seq_num in self.message: callb = self.message[msg.seq_num][2] if callb: callb(self, None) del(self.message[msg.seq_num]) self.unregister()
Coroutine used to send message to the device when a response or ack is needed. This coroutine will try to send up to max_attempts time the message, waiting timeout_secs for an answer. If no answer is received, it will consider that the device is no longer accessible and will unregister it. :param msg: The message to send :type msg: aiolifx.Message :param timeout_secs: Number of seconds to wait for a response or ack :type timeout_secs: int :param max_attempts: . :type max_attempts: int :returns: a coroutine to be scheduled :rtype: coroutine
6,419
def case(*, to, **kwargs): if len(kwargs) != 1: raise ValueError("expect exactly one source string argument") [(typ, string)] = kwargs.items() types = {, , , } if typ not in types: raise ValueError(f"source string keyword must be one of {types}") if to not in types: raise ValueError(f"\"to\" argument must be one of {types}") def pascal_iter(string): yield from (m.group(0) for m in re.finditer(r, string)) def snake_iter(string): yield from (m.group(2) for m in re.finditer(r, string)) inputs = { : pascal_iter, : pascal_iter, : snake_iter, : snake_iter, } def out_fun(sep, case=None, case_fst=None): if case is None: case = lambda x: x if case_fst is None: case_fst = case return lambda tokens: sep.join(case_fst(token) if i == 0 else case(token) for i, token in enumerate(tokens)) outputs = { : out_fun(, str.capitalize), : out_fun(, str.capitalize, str.lower), : out_fun(, str.lower), : out_fun(, str.upper), } tokens = inputs[typ](string) return outputs[to](tokens)
Converts an identifier from one case type to another. An identifier is an ASCII string consisting of letters, digits and underscores, not starting with a digit. The supported case types are camelCase, PascalCase, snake_case, and CONSTANT_CASE, identified as camel, pascal, snake, and constant. The input identifier is given as a keyword argument with one of these names, and the output type is given as a string in the `to` keyword argument. If a given string does not conform to the specified case type (such as underscores in camel or pascal case strings, or double__underscores in general), the result may not be as desired, although things like snaKe_casE or CONStaNT_CASe will generally work.
6,420
def next_listing(self, limit=None): if self.after: return self._reddit._limit_get(self._path, params={: self.after}, limit=limit or self._limit) elif self._has_literally_more: more = self[-1] data = dict( link_id=self[0].parent_id, id=more.name, children=.join(more.children) ) j = self._reddit.post(, , data=data) d = j[] d[] = d[][] = d[][] del d[][] return self._reddit._thingify(d, path=self._path) else: raise NoMoreError()
GETs next :class:`Listing` directed to by this :class:`Listing`. Returns :class:`Listing` object. :param limit: max number of entries to get :raise UnsupportedError: raised when trying to load more comments
6,421
def set_element(self, index, e): r if index > len(self._chain): raise IndexError("tried to access element %i, but chain has only %i" " elements" % (index, len(self._chain))) if type(index) is not int: raise ValueError( "index is not a integer but " % str(type(index))) if self._chain[index] is e: return replaced = self._chain.pop(index) if not replaced.is_reader: replaced.data_producer = None self._chain.insert(index, e) if index == 0: e.data_producer = e else: e.data_producer = self._chain[index - 1] try: successor = self._chain[index + 1] successor.data_producer = e except IndexError: pass self._chain[index]._estimated = False return replaced
r""" Replaces a pipeline stage. Replace an element in chain and return replaced element.
6,422
def exec_args(args, in_data=, chdir=None, shell=None, emulate_tty=False): LOG.debug(, args, chdir) assert isinstance(args, list) if emulate_tty: stderr = subprocess.STDOUT else: stderr = subprocess.PIPE proc = subprocess.Popen( args=args, stdout=subprocess.PIPE, stderr=stderr, stdin=subprocess.PIPE, cwd=chdir, ) stdout, stderr = proc.communicate(in_data) if emulate_tty: stdout = stdout.replace(b(), b()) return proc.returncode, stdout, stderr or b()
Run a command in a subprocess, emulating the argument handling behaviour of SSH. :param list[str]: Argument vector. :param bytes in_data: Optional standard input for the command. :param bool emulate_tty: If :data:`True`, arrange for stdout and stderr to be merged into the stdout pipe and for LF to be translated into CRLF, emulating the behaviour of a TTY. :return: (return code, stdout bytes, stderr bytes)
6,423
def model_ids(self, protocol=None, groups=None): return [client.subid for client in self.models(protocol, groups)]
Returns a list of model ids for the specific query by the user. Models correspond to Clients for the XM2VTS database (At most one model per identity). Keyword Parameters: protocol Ignored. groups The groups to which the subjects attached to the models belong ('dev', 'eval', 'world') Note that 'dev', 'eval' and 'world' are alias for 'client'. If no groups are specified, then both clients are impostors are listed. Returns: A list containing all the model ids (model <-> client in XM2VTS) belonging to the given group.
6,424
def melt(expr, id_vars=None, value_vars=None, var_name=, value_name=, ignore_nan=False): id_vars = id_vars or [] id_vars = [expr._get_field(r) for r in utils.to_list(id_vars)] if not value_vars: id_names = set([c.name for c in id_vars]) value_vars = [expr._get_field(c) for c in expr.schema.names if c not in id_names] else: value_vars = [expr._get_field(c) for c in value_vars] col_type = utils.highest_precedence_data_type(*[c.dtype for c in value_vars]) col_names = [c.name for c in value_vars] id_names = [r.name for r in id_vars] names = id_names + [var_name, value_name] dtypes = [r.dtype for r in id_vars] + [types.string, col_type] @output(names, dtypes) def mapper(row): for cn in col_names: col_value = getattr(row, cn) if ignore_nan and col_value is None: continue vals = [getattr(row, rn) for rn in id_names] yield tuple(vals + [cn, col_value]) return expr.map_reduce(mapper)
“Unpivots” a DataFrame from wide format to long format, optionally leaving identifier variables set. This function is useful to massage a DataFrame into a format where one or more columns are identifier variables (id_vars), while all other columns, considered measured variables (value_vars), are “unpivoted” to the row axis, leaving just two non-identifier columns, ‘variable’ and ‘value’. :param expr: collection :param id_vars: column(s) to use as identifier variables. :param value_vars: column(s) to unpivot. If not specified, uses all columns that are not set as id_vars. :param var_name: name to use for the ‘variable’ column. If None it uses frame.columns.name or ‘variable’. :param value_name: name to use for the ‘value’ column. :param ignore_nan: whether to ignore NaN values in data. :return: collection :Example: >>> df.melt(id_vars='id', value_vars=['col1', 'col2']) >>> df.melt(id_vars=['id', 'id2'], value_vars=['col1', 'col2'], var_name='variable')
6,425
def _convert_agg_to_wx_image(agg, bbox): if bbox is None: image = wx.EmptyImage(int(agg.width), int(agg.height)) image.SetData(agg.tostring_rgb()) return image else: return wx.ImageFromBitmap(_WX28_clipped_agg_as_bitmap(agg, bbox))
Convert the region of the agg buffer bounded by bbox to a wx.Image. If bbox is None, the entire buffer is converted. Note: agg must be a backend_agg.RendererAgg instance.
6,426
def one(self): result, count = self._get_buffered_response() if count == 0: raise NoResults("No records found") elif count > 1: raise MultipleResults("Expected single-record result, got multiple") return result[0]
Return exactly one record or raise an exception. :return: - Dictionary containing the only item in the response content :raise: - MultipleResults: If more than one records are present in the content - NoResults: If the result is empty
6,427
def on_ok(self): def popup(thr, add_type, title): thr.start() tool_str = if add_type == : tool_str = npyscreen.notify_wait(tool_str, title=title) while thr.is_alive(): time.sleep(1) return if self.image.value and self.link_name.value: api_action = Tools() api_image = Image(System().manifest) api_system = System() thr = threading.Thread(target=api_image.add, args=(), kwargs={: self.image.value, : self.link_name.value, : self.tag.value, : self.registry.value, : self.groups.value}) popup(thr, , ) npyscreen.notify_confirm(, title=) editor_args = {: self.image.value, : self.tag.value, : api_system.get_configure, : api_system.save_configure, : api_system.restart_tools, : api_action.start, : True, : True, : self.link_name.value, : self.groups.value} self.parentApp.addForm(, EditorForm, name= , **editor_args) self.parentApp.change_form() elif self.image.value: npyscreen.notify_confirm( , title=, form_color=) elif self.repo.value: self.parentApp.repo_value[] = self.repo.value.lower() api_repo = Repository(System().manifest) api_repo.repo = self.repo.value.lower() thr = threading.Thread(target=api_repo._clone, args=(), kwargs={: self.user.value, : self.pw.value}) popup(thr, , ) self.parentApp.addForm(, AddOptionsForm, name= , color=) self.parentApp.change_form() else: npyscreen.notify_confirm( , title=, form_color=) return
Add the repository
6,428
def quantiles(self, k=5): arr = self.array() q = list(np.linspace(0, 100, k)) return np.percentile(arr.compressed(), q)
Returns an ndarray of quantile breaks.
6,429
def getattr(self, key): if key in _MethodFactoryMeta[self.classId]: return self.__dict__[key] else: return None
This method gets the attribute value of external method object.
6,430
def cfn_viz(template, parameters={}, outputs={}, out=sys.stdout): known_sg, open_sg = _analyze_sg(template[]) (graph, edges) = _extract_graph(template.get(, ), template[], known_sg, open_sg) graph[].extend(edges) _handle_terminals(template, graph, , , parameters) _handle_terminals(template, graph, , , outputs) graph[].append(_handle_pseudo_params(graph[])) _render(graph, out=out)
Render dot output for cloudformation.template in json format.
6,431
def users(store): user_objs = list(store.users()) total_events = store.user_events().count() for user_obj in user_objs: if user_obj.get(): user_obj[] = [store.institute(inst_id) for inst_id in user_obj.get()] else: user_obj[] = [] user_obj[] = store.user_events(user_obj).count() user_obj[] = event_rank(user_obj[]) return dict( users=sorted(user_objs, key=lambda user: -user[]), total_events=total_events, )
Display a list of all users and which institutes they belong to.
6,432
def _inner_join(left, right, left_key_fn, right_key_fn, join_fn=union_join): joiner = defaultdict(list) for ele in right: joiner[right_key_fn(ele)].append(ele) joined = [] for ele in left: for other in joiner[left_key_fn(ele)]: joined.append(join_fn(ele, other)) return joined
Inner join using left and right key functions :param left: left iterable to be joined :param right: right iterable to be joined :param function left_key_fn: function that produces hashable value from left objects :param function right_key_fn: function that produces hashable value from right objects :param join_fn: function called on joined left and right iterable items to complete join :rtype: list
6,433
def absent(name, force=False): ret = {: name, : {}, : False, : } if name not in __salt__[](all=True): ret[] = True ret[] = {0}\.format(name) return ret pre_state = __salt__[](name) if pre_state != and not force: ret[] = ( ) return ret if __opts__[]: ret[] = None ret[] = ({0}\.format(name)) return ret try: ret[][] = __salt__[](name, force=force) except Exception as exc: ret[] = ({0}\ .format(name, exc)) return ret if name in __salt__[](all=True): ret[] = {0}\.format(name) else: if force and pre_state != : method = else: method = ret[] = {1}\.format(method, name) ret[] = True return ret
Ensure that a container is absent name Name of the container force : False Set to ``True`` to remove the container even if it is running Usage Examples: .. code-block:: yaml mycontainer: docker_container.absent multiple_containers: docker_container.absent: - names: - foo - bar - baz
6,434
def is_all_field_none(self): if self._id_ is not None: return False if self._created is not None: return False if self._updated is not None: return False if self._name is not None: return False if self._status is not None: return False if self._avatar is not None: return False if self._location is not None: return False if self._notification_filters is not None: return False if self._tab_text_waiting_screen is not None: return False return True
:rtype: bool
6,435
def fromSearch(text): terms = [] for term in nstr(text).split():
Generates a regular expression from 'simple' search terms. :param text | <str> :usage |>>> import projex.regex |>>> projex.regex.fromSearch('*cool*') |'^.*cool.*$' |>>> projex.projex.fromSearch('*cool*,*test*') |'^.*cool.*$|^.*test.*$' :return <str>
6,436
def delete(gandi, resource, force, background): output_keys = [, , , ] if not force: proceed = click.confirm( % resource) if not proceed: return opers = gandi.vhost.delete(resource, background) if background: for oper in opers: output_generic(gandi, oper, output_keys) return opers
Delete a vhost.
6,437
def dedicate(rh): rh.printSysLog("Enter changeVM.dedicate") parms = [ "-T", rh.userid, "-v", rh.parms[], "-r", rh.parms[], "-R", rh.parms[]] hideList = [] results = invokeSMCLI(rh, "Image_Device_Dedicate_DM", parms, hideInLog=hideList) if results[] != 0: rh.printLn("ES", results[]) rh.updateResults(results) if results[] == 0: results = isLoggedOn(rh, rh.userid) if (results[] == 0 and results[] == 0): parms = [ "-T", rh.userid, "-v", rh.parms[], "-r", rh.parms[], "-R", rh.parms[]] results = invokeSMCLI(rh, "Image_Device_Dedicate", parms) if results[] == 0: rh.printLn("N", "Dedicated device " + rh.parms[] + " to the active configuration.") else: rh.printLn("ES", results[]) rh.updateResults(results) rh.printSysLog("Exit changeVM.dedicate, rc: " + str(rh.results[])) return rh.results[]
Dedicate device. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'DEDICATEDM' userid - userid of the virtual machine parms['vaddr'] - Virtual address parms['raddr'] - Real address parms['mode'] - Read only mode or not. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error
6,438
def describe_file_extensions(self): (extensions, types) = self._call("describeFileExtensions") types = [DeviceType(a) for a in types] return (extensions, types)
Returns two arrays describing the supported file extensions. The first array contains the supported extensions and the seconds one the type each extension supports. Both have the same size. Note that some backends do not work on files, so this array may be empty. :py:func:`IMediumFormat.capabilities` out extensions of type str The array of supported extensions. out types of type :class:`DeviceType` The array which indicates the device type for every given extension.
6,439
def _config(key, mandatory=True, opts=None): name try: if opts: value = opts[.format(key)] else: value = __opts__[.format(key)] except KeyError: try: value = __defopts__[.format(key)] except KeyError: if mandatory: msg = .format(key) raise SaltInvocationError(msg) return False return value
Return a value for 'name' from master config file options or defaults.
6,440
def get_proficiency_search_session_for_objective_bank(self, objective_bank_id, proxy): if not objective_bank_id: raise NullArgument if not self.supports_proficiency_search(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.ProficiencySearchSession(objective_bank_id=objective_bank_id, proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the ``OsidSession`` associated with the proficiency search service for the given objective bank. :param objective_bank_id: the ``Id`` of the ``ObjectiveBank`` :type objective_bank_id: ``osid.id.Id`` :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: a ``ProficiencySearchSession`` :rtype: ``osid.learning.ProficiencySearchSession`` :raise: ``NotFound`` -- no objective bank found by the given ``Id`` :raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_proficiency_search()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_proficiency_search()`` and ``supports_visible_federation()`` are ``true``*
6,441
def create(cls, card_id, type_=None, custom_headers=None): if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_TYPE: type_ } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() request_bytes = security.encrypt(cls._get_api_context(), request_bytes, custom_headers) endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), card_id) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
Generate a new CVC2 code for a card. :type user_id: int :type card_id: int :param type_: The type of generated cvc2. Can be STATIC or GENERATED. :type type_: str :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
6,442
def _op_generic_StoU_saturation(self, value, min_value, max_value): return claripy.If( claripy.SGT(value, max_value), max_value, claripy.If(claripy.SLT(value, min_value), min_value, value))
Return unsigned saturated BV from signed BV. Min and max value should be unsigned.
6,443
def error(self, session=None): self.log.error("Recording the task instance as FAILED") self.state = State.FAILED session.merge(self) session.commit()
Forces the task instance's state to FAILED in the database.
6,444
def mute(self): response = self.rendering_control.GetMute(InstanceID=1, Channel=1) return response.CurrentMute == 1
get/set the current mute state
6,445
def greenlet_admin(self): if self.config["processes"] > 1: self.log.debug( "Admin server disabled because of multiple processes.") return class Devnull(object): def write(self, *_): pass from gevent import pywsgi def admin_routes(env, start_response): path = env["PATH_INFO"] status = "200 OK" res = "" if path in ["/", "/report", "/report_mem"]: report = self.get_worker_report(with_memory=(path == "/report_mem")) res = bytes(json_stdlib.dumps(report, cls=MongoJSONEncoder), ) elif path == "/wait_for_idle": self.wait_for_idle() res = bytes("idle", "utf-8") else: status = "404 Not Found" start_response(status, [(, )]) return [res] server = pywsgi.WSGIServer((self.config["admin_ip"], self.config["admin_port"]), admin_routes, log=Devnull()) try: self.log.debug("Starting admin server on port %s" % self.config["admin_port"]) server.serve_forever() except Exception as e: self.log.debug("Error in admin server : %s" % e)
This greenlet is used to get status information about the worker when --admin_port was given
6,446
def seed_aws_data(ctx, data): swag = create_swag_from_ctx(ctx) for k, v in json.loads(data.read()).items(): for account in v[]: data = { : .format(k), : account[], : [], : , : , : False, : , : k + + account[] } click.echo(click.style( .format(data[]), fg=) ) swag.create(data, dry_run=ctx.dry_run)
Seeds SWAG from a list of known AWS accounts.
6,447
def plot_raster(self, ax, xlim, x, y, pop_names=False, markersize=20., alpha=1., legend=True, marker=, rasterized=True): yoffset = [sum(self.N_X) if X== else 0 for X in self.X] for i, X in enumerate(self.X): if y[X].size > 0: ax.plot(x[X], y[X]+yoffset[i], marker, markersize=markersize, mfc=self.colors[i], mec= if marker in else self.colors[i], alpha=alpha, label=X, rasterized=rasterized, clip_on=True)
Plot network raster plot in subplot object. Parameters ---------- ax : `matplotlib.axes.AxesSubplot` object plot axes xlim : list List of floats. Spike time interval, e.g., [0., 1000.]. x : dict Key-value entries are population name and neuron spike times. y : dict Key-value entries are population name and neuron gid number. pop_names: bool If True, show population names on yaxis instead of gid number. markersize : float raster plot marker size alpha : float in [0, 1] transparency of marker legend : bool Switch on axes legends. marker : str marker symbol for matplotlib.pyplot.plot rasterized : bool if True, the scatter plot will be treated as a bitmap embedded in pdf file output Returns ------- None
6,448
def remove_my_api_key_from_groups(self, body, **kwargs): kwargs[] = True if kwargs.get(): return self.remove_my_api_key_from_groups_with_http_info(body, **kwargs) else: (data) = self.remove_my_api_key_from_groups_with_http_info(body, **kwargs) return data
Remove API key from groups. # noqa: E501 An endpoint for removing API key from groups. **Example usage:** `curl -X DELETE https://api.us-east-1.mbedcloud.com/v3/api-keys/me/groups -d '[0162056a9a1586f30242590700000000,0117056a9a1586f30242590700000000]' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.remove_my_api_key_from_groups(body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param list[str] body: A list of IDs of the groups to be updated. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
6,449
def enable(self, identifier, exclude_children=False): import_path = self._identifier2import_path(identifier=identifier) if import_path in self._disabled: self._enable_path(import_path) self._disabled.remove(import_path) if not exclude_children and import_path in self._children_disabled: self._children_disabled.remove(import_path) self._write_child_disabled()
Enable a previously disabled include type :param identifier: module or name of the include type :param exclude_children: disable the include type only for child processes, not the current process The ``identifier`` can be specified in multiple ways to disable an include type. See :py:meth:`~.DisabledIncludeTypes.disable` for details.
6,450
def _make_single_run(self): self._is_run = False self._new_nodes = OrderedDict() self._new_links = OrderedDict() self._is_run = True return self
Modifies the trajectory for single runs executed by the environment
6,451
def load_collection_from_url(resource, url, content_type=None): coll = create_staging_collection(resource) load_into_collection_from_url(coll, url, content_type=content_type) return coll
Creates a new collection for the registered resource and calls `load_into_collection_from_url` with it.
6,452
def close(self, force=True): if not self.closed: self.flush() self.fileobj.close() time.sleep(self.delayafterclose) if self.isalive(): if not self.terminate(force): raise PtyProcessError() self.fd = -1 self.closed = True
This closes the connection with the child application. Note that calling close() more than once is valid. This emulates standard Python behavior with files. Set force to True if you want to make sure that the child is terminated (SIGKILL is sent if the child ignores SIGHUP and SIGINT).
6,453
def shutdown_host(kwargs=None, call=None): if call != : raise SaltCloudSystemExit( ) host_name = kwargs.get() if kwargs and in kwargs else None force = _str_to_bool(kwargs.get()) if kwargs and in kwargs else False if not host_name: raise SaltCloudSystemExit( ) si = _get_si() host_ref = salt.utils.vmware.get_mor_by_property(si, vim.HostSystem, host_name) if not host_ref: raise SaltCloudSystemExit( ) if host_ref.runtime.connectionState == : raise SaltCloudSystemExit( s current state (not responding).Specified host system does not support shutdown.Specified host system is not in maintenance mode. Specify force=True to force reboot even if there are virtual machines running or other operations in progress.Error while shutting down host %s: %sfailed to shut down hostshut down host'}
Shut down the specified host system in this VMware environment .. note:: If the host system is not in maintenance mode, it will not be shut down. If you want to shut down the host system regardless of whether it is in maintenance mode, set ``force=True``. Default is ``force=False``. CLI Example: .. code-block:: bash salt-cloud -f shutdown_host my-vmware-config host="myHostSystemName" [force=True]
6,454
def update_counter(self, key, value, **kwargs): return self._client.update_counter(self, key, value, **kwargs)
Updates the value of a counter stored in this bucket. Positive values increment the counter, negative values decrement. See :meth:`RiakClient.update_counter() <riak.client.RiakClient.update_counter>` for options. .. deprecated:: 2.1.0 (Riak 2.0) Riak 1.4-style counters are deprecated in favor of the :class:`~riak.datatypes.Counter` datatype. :param key: the key of the counter :type key: string :param value: the amount to increment or decrement :type value: integer
6,455
def CopyFromDateTimeString(self, time_string): date_time_values = self._CopyDateTimeFromString(time_string) year = date_time_values.get(, 0) month = date_time_values.get(, 0) day_of_month = date_time_values.get(, 0) hours = date_time_values.get(, 0) minutes = date_time_values.get(, 0) seconds = date_time_values.get(, 0) microseconds = date_time_values.get(, 0) timestamp = self._GetNumberOfSecondsFromElements( year, month, day_of_month, hours, minutes, seconds) timestamp *= definitions.MILLISECONDS_PER_SECOND if microseconds: milliseconds, _ = divmod( microseconds, definitions.MILLISECONDS_PER_SECOND) timestamp += milliseconds self._timestamp = timestamp self.is_local_time = False
Copies a POSIX timestamp from a date and time string. Args: time_string (str): date and time value formatted as: YYYY-MM-DD hh:mm:ss.######[+-]##:## Where # are numeric digits ranging from 0 to 9 and the seconds fraction can be either 3 or 6 digits. The time of day, seconds fraction and time zone offset are optional. The default time zone is UTC.
6,456
def parse(self): try: data = toml.loads(self.obj.content, _dict=OrderedDict) if data: for package_type in [, ]: if package_type in data: for name, specs in data[package_type].items(): if not isinstance(specs, basestring): continue if specs == : specs = self.obj.dependencies.append( Dependency( name=name, specs=SpecifierSet(specs), dependency_type=filetypes.pipfile, line=.join([name, specs]), section=package_type ) ) except (toml.TomlDecodeError, IndexError) as e: pass
Parse a Pipfile (as seen in pipenv) :return:
6,457
def competition_submit(self, file_name, message, competition, quiet=False): if competition is None: competition = self.get_config_value(self.CONFIG_NAME_COMPETITION) if competition is not None and not quiet: print( + competition) if competition is None: raise ValueError() else: url_result = self.process_response( self.competitions_submissions_url_with_http_info( id=competition, file_name=os.path.basename(file_name), content_length=os.path.getsize(file_name), last_modified_date_utc=int(os.path.getmtime(file_name)))) if in url_result: url_result_list = url_result[].split() upload_result = self.process_response( self.competitions_submissions_upload_with_http_info( file=file_name, guid=url_result_list[-3], content_length=url_result_list[-2], last_modified_date_utc=url_result_list[-1])) upload_result_token = upload_result[] else: success = self.upload_complete(file_name, url_result[], quiet) if not success: return "Could not submit to competition" upload_result_token = url_result[] submit_result = self.process_response( self.competitions_submissions_submit_with_http_info( id=competition, blob_file_tokens=upload_result_token, submission_description=message)) return SubmitResult(submit_result)
submit a competition! Parameters ========== file_name: the competition metadata file message: the submission description competition: the competition name quiet: suppress verbose output (default is False)
6,458
def _compute_asset_ids(cls, inputs, marker_output_index, outputs, asset_quantities): if len(asset_quantities) > len(outputs) - 1: return None if len(inputs) == 0: return None result = [] issuance_asset_id = cls.hash_script(bytes(inputs[0].script)) for i in range(0, marker_output_index): value, script = outputs[i].nValue, outputs[i].scriptPubKey if i < len(asset_quantities) and asset_quantities[i] > 0: output = TransactionOutput(value, script, issuance_asset_id, asset_quantities[i], OutputType.issuance) else: output = TransactionOutput(value, script, None, 0, OutputType.issuance) result.append(output) issuance_output = outputs[marker_output_index] result.append(TransactionOutput( issuance_output.nValue, issuance_output.scriptPubKey, None, 0, OutputType.marker_output)) input_iterator = iter(inputs) input_units_left = 0 for i in range(marker_output_index + 1, len(outputs)): if i <= len(asset_quantities): output_asset_quantity = asset_quantities[i - 1] else: output_asset_quantity = 0 output_units_left = output_asset_quantity asset_id = None while output_units_left > 0: if input_units_left == 0: current_input = next(input_iterator, None) if current_input is None: return None else: input_units_left = current_input.asset_quantity if current_input.asset_id is not None: progress = min(input_units_left, output_units_left) output_units_left -= progress input_units_left -= progress if asset_id is None: asset_id = current_input.asset_id elif asset_id != current_input.asset_id: return None result.append(TransactionOutput( outputs[i].nValue, outputs[i].scriptPubKey, asset_id, output_asset_quantity, OutputType.transfer)) return result
Computes the asset IDs of every output in a transaction. :param list[TransactionOutput] inputs: The outputs referenced by the inputs of the transaction. :param int marker_output_index: The position of the marker output in the transaction. :param list[CTxOut] outputs: The outputs of the transaction. :param list[int] asset_quantities: The list of asset quantities of the outputs. :return: A list of outputs with asset ID and asset quantity information. :rtype: list[TransactionOutput]
6,459
async def connect(self): request = stun.Message(message_method=stun.Method.ALLOCATE, message_class=stun.Class.REQUEST) request.attributes[] = self.lifetime request.attributes[] = UDP_TRANSPORT try: response, _ = await self.request(request) except exceptions.TransactionFailed as e: response = e.response if response.attributes[][0] == 401: self.nonce = response.attributes[] self.realm = response.attributes[] self.integrity_key = make_integrity_key(self.username, self.realm, self.password) request.transaction_id = random_transaction_id() response, _ = await self.request(request) self.relayed_address = response.attributes[] logger.info(, self.relayed_address) self.refresh_handle = asyncio.ensure_future(self.refresh()) return self.relayed_address
Create a TURN allocation.
6,460
def set(self, r, g, b, intensity=None): self.r = r self.g = g self.b = b if intensity: self.intensity = intensity
Set the R/G/B and optionally intensity in one call
6,461
def json_worker(self, mask, cache_id=None, cache_method="string", cache_section="www"): use_cache = cache_id is not None def wrapper(fun): lock = threading.RLock() tasks = {} cargo = {} cargo_cleaner = [None] def is_done(cur_key): with lock: if cur_key not in tasks: return True if "running" not in tasks[cur_key]: return False return not tasks[cur_key]["running"] def start_cargo_cleaner(): def get_next_cargo(): with lock: next_ttl = None for value in cargo.values(): ttl, _ = value if next_ttl is None or ttl < next_ttl: next_ttl = ttl return next_ttl def clean_for(timestamp): with lock: keys = [] for (key, value) in cargo.items(): ttl, _ = value if ttl > timestamp: continue keys.append(key) for k in keys: cargo.pop(k) msg("purged cargo that was never read ({0})", k) def remove_cleaner(): with lock: if get_next_cargo() is not None: return False cargo_cleaner[0] = None return True def clean(): while True: next_ttl = get_next_cargo() if next_ttl is None: if remove_cleaner(): break else: continue time_until = next_ttl - time.time() if time_until > 0: time.sleep(time_until) clean_for(time.time()) with lock: if cargo_cleaner[0] is not None: return cleaner = self._thread_factory( target=clean, name="{0}-Cargo-Cleaner".format(self.__class__)) cleaner.daemon = True cargo_cleaner[0] = cleaner cleaner.start() def add_cargo(content): with lock: mcs = self.max_chunk_size if mcs < 1: raise ValueError("invalid chunk size: {0}".format(mcs)) ttl = time.time() + 10 * 60 chunks = [] while len(content) > 0: chunk = content[:mcs] content = content[mcs:] cur_key = get_key() cargo[cur_key] = (ttl, chunk) chunks.append(cur_key) start_cargo_cleaner() return chunks def remove_cargo(cur_key): with lock: _, result = cargo.pop(cur_key) return result def remove_worker(cur_key): with lock: task = tasks.pop(cur_key, None) if task is None: err_msg = "Task {0} not found!".format(cur_key) return None, (ValueError(err_msg), None) if task["running"]: th = task["thread"] if th.is_alive(): tid = None for tk, tobj in threading._active.items(): if tobj is th: tid = tk break if tid is not None: papi = ctypes.pythonapi pts_sae = papi.PyThreadState_SetAsyncExc res = pts_sae(ctypes.c_long(tid), ctypes.py_object(WorkerDeath)) if res == 0: msg("invalid thread id for " + "killing worker {0}", cur_key) elif res != 1: pts_sae(ctypes.c_long(tid), None) msg("killed too many ({0}) workers? {1}", res, cur_key) else: if self.verbose_workers: msg("killed worker {0}", cur_key) err_msg = "Task {0} is still running!".format(cur_key) return None, (ValueError(err_msg), None) return task["result"], task["exception"] def start_worker(args, cur_key, get_thread): try: with lock: task = { "running": True, "result": None, "exception": None, "thread": get_thread(), } tasks[cur_key] = task if use_cache: cache_obj = cache_id(args) if cache_obj is not None and self.cache is not None: with self.cache.get_hnd( cache_obj, section=cache_section, method=cache_method) as hnd: if hnd.has(): result = hnd.read() else: result = hnd.write(json_dumps(fun(args))) else: result = json_dumps(fun(args)) else: result = json_dumps(fun(args)) with lock: task["running"] = False task["result"] = result except (KeyboardInterrupt, SystemExit): raise except Exception as e: with lock: task["running"] = False task["exception"] = (e, traceback.format_exc()) return try: time.sleep(120) finally: _result, err = remove_worker(cur_key) if err is not None: e, tb = err if tb is not None: msg("Error in purged worker for {0}: {1}\n{2}", cur_key, e, tb) return msg("purged result that was never read ({0})", cur_key) def get_key(): with lock: crc32 = zlib.crc32(repr(get_time()).encode()) cur_key = int(crc32 & 0xFFFFFFFF) while cur_key in tasks or cur_key in cargo: key = int(cur_key + 1) if key == cur_key: key = 0 cur_key = key return cur_key def reserve_worker(): with lock: cur_key = get_key() tasks[cur_key] = {} return cur_key def run_worker(req, args): post = args["post"] try: action = post["action"] cur_key = None if action == "stop": cur_key = post["token"] remove_worker(cur_key) return { "token": cur_key, "done": True, "result": None, "continue": False, } if action == "start": cur_key = reserve_worker() inner_post = post.get("payload", {}) th = [] wname = "{0}-Worker-{1}".format(self.__class__, cur_key) worker = self._thread_factory( target=start_worker, name=wname, args=(inner_post, cur_key, lambda: th[0])) th.append(worker) worker.start() time.sleep(0.1) if action == "cargo": cur_key = post["token"] result = remove_cargo(cur_key) return { "token": cur_key, "result": result, } if action == "get": cur_key = post["token"] if cur_key is None: raise ValueError("invalid action: {0}".format(action)) if is_done(cur_key): result, exception = remove_worker(cur_key) if exception is not None: e, tb = exception if tb is None: return { "token": cur_key, "done": False, "result": None, "continue": False, } if isinstance(e, PreventDefaultResponse): raise e msg("Error in worker for {0}: {1}\n{2}", cur_key, e, tb) raise PreventDefaultResponse(500, "worker error") if len(result) > self.max_chunk_size: cargo_keys = add_cargo(result) return { "token": cur_key, "done": True, "result": cargo_keys, "continue": True, } return { "token": cur_key, "done": True, "result": result, "continue": False, } return { "token": cur_key, "done": False, "result": None, "continue": True, } except: msg("Error processing worker command: {0}", post) raise self.add_json_post_mask(mask, run_worker) self.set_file_argc(mask, 0) return fun return wrapper
A function annotation that adds a worker request. A worker request is a POST request that is computed asynchronously. That is, the actual task is performed in a different thread and the network request returns immediately. The client side uses polling to fetch the result and can also cancel the task. The worker javascript client side must be linked and used for accessing the request. Parameters ---------- mask : string The URL that must be matched to perform this request. cache_id : function(args) or None Optional function for caching the result. If set the worker must be idempotent. Requires a `cache` object for the server. The function needs to return an object constructed from the function arguments to uniquely identify the result. Results are cached verbatim. cache_method : string or None Optional cache method string. Gets passed to get_hnd() of the cache. Defaults to "string" which requires a JSON serializable cache_id. cache_section : string or None Optional cache section string. Gets passed to get_hnd() of the cache. Defaults to "www". fun : function(args); (The annotated function) A function returning a (JSON-able) object. The function takes one argument which is the dictionary containing the payload from the client side. If the result is None a 404 error is sent.
6,462
def get_bits_from_int(val_int, val_size=16): bits = [None] * val_size for i, item in enumerate(bits): bits[i] = bool((val_int >> i) & 0x01) return bits
Get the list of bits of val_int integer (default size is 16 bits) Return bits list, least significant bit first. Use list.reverse() if need. :param val_int: integer value :type val_int: int :param val_size: bit size of integer (word = 16, long = 32) (optional) :type val_size: int :returns: list of boolean "bits" (least significant first) :rtype: list
6,463
def generate_session_key(hmac_secret=b): session_key = random_bytes(32) encrypted_session_key = PKCS1_OAEP.new(UniverseKey.Public, SHA1)\ .encrypt(session_key + hmac_secret) return (session_key, encrypted_session_key)
:param hmac_secret: optional HMAC :type hmac_secret: :class:`bytes` :return: (session_key, encrypted_session_key) tuple :rtype: :class:`tuple`
6,464
def reconcile_extend(self, high): errors = [] if not in high: return high, errors ext = high.pop() for ext_chunk in ext: for name, body in six.iteritems(ext_chunk): if name not in high: state_type = next( x for x in body if not x.startswith() ) ids = find_name(name, state_type, high) if len(ids) != 1: errors.append( {0}\{1}:{2}\ {0}\ {1}\{2}\.format( name, body.get(, ), body.get(, )) ) continue else: name = ids[0][0] for state, run in six.iteritems(body): if state.startswith(): continue if state not in high[name]: high[name][state] = run continue for arg in run: update = False for hind in range(len(high[name][state])): if isinstance(arg, six.string_types) and isinstance(high[name][state][hind], six.string_types): high[name][state].pop(hind) high[name][state].insert(hind, arg) update = True continue if isinstance(arg, dict) and isinstance(high[name][state][hind], dict): argfirst = next(iter(arg)) if argfirst == next(iter(high[name][state][hind])): if argfirst in STATE_REQUISITE_KEYWORDS: high[name][state][hind][argfirst].extend(arg[argfirst]) else: high[name][state][hind] = arg update = True if (argfirst == and next(iter(high[name][state][hind])) == ): high[name][state][hind] = arg if not update: high[name][state].append(arg) return high, errors
Pull the extend data and add it to the respective high data
6,465
async def container_size( self, container_len=None, container_type=None, params=None ): if hasattr(container_type, "serialize_archive"): raise ValueError("not supported") if self.writing: return await self._dump_container_size( self.iobj, container_len, container_type, params ) else: raise ValueError("Not supported")
Container size :param container_len: :param container_type: :param params: :return:
6,466
def get_flair_choices(self, subreddit, link=None): data = {: six.text_type(subreddit), : link} return self.request_json(self.config[], data=data)
Return available flair choices and current flair. :param link: If link is given, return the flair options for this submission. Not normally given directly, but instead set by calling the flair_choices method for Submission objects. Use the default for the session's user. :returns: A dictionary with 2 keys. 'current' containing current flair settings for the authenticated user and 'choices' containing a list of possible flair choices.
6,467
def reorientate(self, override_exif=True): orientation = self.get_orientation() if orientation is None: return if orientation == 2: self.flip_horizontally() elif orientation == 3: self.rotate(180) elif orientation == 4: self.flip_vertically() elif orientation == 5: self.flip_vertically() self.rotate(270) elif orientation == 6: self.rotate(270) elif orientation == 7: self.flip_horizontally() self.rotate(270) elif orientation == 8: self.rotate(90) if orientation != 1 and override_exif: exif_dict = self._get_exif_segment() if exif_dict and piexif.ImageIFD.Orientation in exif_dict["0th"]: exif_dict["0th"][piexif.ImageIFD.Orientation] = 1 try: self.exif = piexif.dump(exif_dict) except Exception as e: msg = % e logger.error(msg)
Rotates the image in the buffer so that it is oriented correctly. If override_exif is True (default) then the metadata orientation is adjusted as well. :param override_exif: If the metadata should be adjusted as well. :type override_exif: Boolean
6,468
def get_pool_context(self): context = {self.current.lane_id: self.current.role, : self.current.role} for lane_id, role_id in self.current.pool.items(): if role_id: context[lane_id] = lazy_object_proxy.Proxy( lambda: self.role_model(super_context).objects.get(role_id)) return context
Builds context for the WF pool. Returns: Context dict.
6,469
def zpopmax(self, name, count=None): args = (count is not None) and [count] or [] options = { : True } return self.execute_command(, name, *args, **options)
Remove and return up to ``count`` members with the highest scores from the sorted set ``name``.
6,470
def perceptual_weighting(S, frequencies, **kwargs): A1A1A1cqt_hzLog CQT power%+2.0f dBcqt_hzA1timePerceptually weighted log CQT%+2.0f dB offset = time_frequency.A_weighting(frequencies).reshape((-1, 1)) return offset + power_to_db(S, **kwargs)
Perceptual weighting of a power spectrogram: `S_p[f] = A_weighting(f) + 10*log(S[f] / ref)` Parameters ---------- S : np.ndarray [shape=(d, t)] Power spectrogram frequencies : np.ndarray [shape=(d,)] Center frequency for each row of `S` kwargs : additional keyword arguments Additional keyword arguments to `power_to_db`. Returns ------- S_p : np.ndarray [shape=(d, t)] perceptually weighted version of `S` See Also -------- power_to_db Notes ----- This function caches at level 30. Examples -------- Re-weight a CQT power spectrum, using peak power as reference >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1'))) >>> freqs = librosa.cqt_frequencies(C.shape[0], ... fmin=librosa.note_to_hz('A1')) >>> perceptual_CQT = librosa.perceptual_weighting(C**2, ... freqs, ... ref=np.max) >>> perceptual_CQT array([[ -80.076, -80.049, ..., -104.735, -104.735], [ -78.344, -78.555, ..., -103.725, -103.725], ..., [ -76.272, -76.272, ..., -76.272, -76.272], [ -76.485, -76.485, ..., -76.485, -76.485]]) >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.subplot(2, 1, 1) >>> librosa.display.specshow(librosa.amplitude_to_db(C, ... ref=np.max), ... fmin=librosa.note_to_hz('A1'), ... y_axis='cqt_hz') >>> plt.title('Log CQT power') >>> plt.colorbar(format='%+2.0f dB') >>> plt.subplot(2, 1, 2) >>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz', ... fmin=librosa.note_to_hz('A1'), ... x_axis='time') >>> plt.title('Perceptually weighted log CQT') >>> plt.colorbar(format='%+2.0f dB') >>> plt.tight_layout()
6,471
def visit_Assign(self, node): self.generic_visit(node) if node.value not in self.fixed_size_list: return node node.value = self.convert(node.value) return node
Replace list calls by static_list calls when possible >>> import gast as ast >>> from pythran import passmanager, backend >>> node = ast.parse("def foo(n): x = __builtin__.list(n); x[0] = 0; return __builtin__.tuple(x)") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ListToTuple, node) >>> print(pm.dump(backend.Python, node)) def foo(n): x = __builtin__.pythran.static_list(n) x[0] = 0 return __builtin__.tuple(x) >>> node = ast.parse("def foo(n): x = __builtin__.list(n); x[0] = 0; return x") >>> pm = passmanager.PassManager("test") >>> _, node = pm.apply(ListToTuple, node) >>> print(pm.dump(backend.Python, node)) def foo(n): x = __builtin__.list(n) x[0] = 0 return x
6,472
def start(self, priority_queue, resource_queue): self._kill_event = threading.Event() self._priority_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal, args=( priority_queue, , self._kill_event,) ) self._priority_queue_pull_thread.start() self._resource_queue_pull_thread = threading.Thread(target=self._migrate_logs_to_internal, args=( resource_queue, , self._kill_event,) ) self._resource_queue_pull_thread.start() inserted_tasks = set() left_messages = {} while (not self._kill_event.is_set() or self.pending_priority_queue.qsize() != 0 or self.pending_resource_queue.qsize() != 0 or priority_queue.qsize() != 0 or resource_queue.qsize() != 0): self.logger.debug(.format( self._kill_event.is_set(), self.pending_priority_queue.qsize() != 0, self.pending_resource_queue.qsize() != 0, priority_queue.qsize() != 0, resource_queue.qsize() != 0)) first_messages = [] messages = self._get_messages_in_batch(self.pending_priority_queue, interval=self.batching_interval, threshold=self.batching_threshold) if messages: self.logger.debug( "Got {} messages from priority queue".format(len(messages))) update_messages, insert_messages, all_messages = [], [], [] for msg_type, msg in messages: if msg_type.value == MessageType.WORKFLOW_INFO.value: if "python_version" in msg: self.logger.debug( "Inserting workflow start info to WORKFLOW table") self._insert(table=WORKFLOW, messages=[msg]) else: self.logger.debug( "Updating workflow end info to WORKFLOW table") self._update(table=WORKFLOW, columns=[, , , , ], messages=[msg]) else: all_messages.append(msg) if msg[] is not None: update_messages.append(msg) else: inserted_tasks.add(msg[]) insert_messages.append(msg) if msg[] in left_messages: first_messages.append( left_messages.pop(msg[])) self.logger.debug( "Updating and inserting TASK_INFO to all tables") self._update(table=WORKFLOW, columns=[, , ], messages=update_messages) if insert_messages: self._insert(table=TASK, messages=insert_messages) self.logger.debug( "There are {} inserted task records".format(len(inserted_tasks))) if update_messages: self._update(table=TASK, columns=[, , , ], messages=update_messages) self._insert(table=STATUS, messages=all_messages) messages = self._get_messages_in_batch(self.pending_resource_queue, interval=self.batching_interval, threshold=self.batching_threshold) if messages or first_messages: self.logger.debug( "Got {} messages from resource queue".format(len(messages))) self._insert(table=RESOURCE, messages=messages) for msg in messages: if msg[]: msg[] = States.running.name msg[] = msg[] if msg[] in inserted_tasks: first_messages.append(msg) else: left_messages[msg[]] = msg if first_messages: self._insert(table=STATUS, messages=first_messages) self._update(table=TASK, columns=[, , ], messages=first_messages)
maintain a set to track the tasks that are already INSERTED into database to prevent race condition that the first resource message (indicate 'running' state) arrives before the first task message. If race condition happens, add to left_messages and operate them later
6,473
def params_size(event_size, num_components, name=None): with tf.compat.v1.name_scope( name, , [event_size, num_components]): return MixtureSameFamily.params_size( num_components, OneHotCategorical.params_size(event_size, name=name), name=name)
The number of `params` needed to create a single distribution.
6,474
def create_table(self, names=None): scan_shape = (1,) for src in self._srcs: scan_shape = max(scan_shape, src[].shape) tab = create_source_table(scan_shape) for s in self._srcs: if names is not None and s.name not in names: continue s.add_to_table(tab) return tab
Create an astropy Table object with the contents of the ROI model.
6,475
def get_preferred_path(self): if self.path in ("", "/"): return "/" if self.is_collection and not self.path.endswith("/"): return self.path + "/" return self.path
Return preferred mapping for a resource mapping. Different URLs may map to the same resource, e.g.: '/a/b' == '/A/b' == '/a/b/' get_preferred_path() returns the same value for all these variants, e.g.: '/a/b/' (assuming resource names considered case insensitive) @param path: a UTF-8 encoded, unquoted byte string. @return: a UTF-8 encoded, unquoted byte string.
6,476
def _prepare_facet_field_spies(self, facets): spies = [] for facet in facets: slot = self.column[facet] spy = xapian.ValueCountMatchSpy(slot) spy.slot = slot spies.append(spy) return spies
Returns a list of spies based on the facets used to count frequencies.
6,477
def java_potential_term(mesh, instructions): s description, not a series of descriptions. ' faces = to_java_ints(mesh.indexed_faces) edges = to_java_ints(mesh.indexed_edges) coords = to_java_doubles(mesh.coordinates) return _parse_field_arguments([instructions], faces, edges, coords)
java_potential_term(mesh, instructions) yields a Java object that implements the potential field described in the given list of instructions. Generally, this should not be invoked directly and should only be called by mesh_register. Note: this expects a single term's description, not a series of descriptions.
6,478
def scan(self, pattern): if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) self.last = self.match m = self._re_cache[pattern].match(self.data, self.pos) if m is None: return False self.start_pos = m.start() self.pos = m.end() self.match = m.group() return True
Scan the text for the given pattern and update pos/match and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``pos`` is the end position.
6,479
def use_dev_config_dir(use_dev_config_dir=USE_DEV_CONFIG_DIR): if use_dev_config_dir is not None: if use_dev_config_dir.lower() in {, }: use_dev_config_dir = False else: use_dev_config_dir = DEV or not is_stable_version(__version__) return use_dev_config_dir
Return whether the dev configuration directory should used.
6,480
def get_sequence_rules_for_assessment(self, assessment_id): def get_all_children_part_ids(part): child_ids = [] if part.has_children(): child_ids = list(part.get_child_assessment_part_ids()) for child in part.get_child_assessment_parts(): child_ids += get_all_children_part_ids(child) return child_ids all_assessment_part_ids = [] mgr = self._get_provider_manager(, local=True) lookup_session = mgr.get_assessment_lookup_session(proxy=self._proxy) lookup_session.use_federated_bank_view() assessment = lookup_session.get_assessment(assessment_id) if assessment.has_children(): mgr = self._get_provider_manager(, local=True) lookup_session = mgr.get_assessment_part_lookup_session(proxy=self._proxy) lookup_session.use_federated_bank_view() all_assessment_part_ids = list(assessment.get_child_ids()) for child_part_id in assessment.get_child_ids(): child_part = lookup_session.get_assessment_part(child_part_id) all_assessment_part_ids += get_all_children_part_ids(child_part) id_strs = [str(part_id) for part_id in all_assessment_part_ids] collection = JSONClientValidated(, collection=, runtime=self._runtime) result = collection.find( dict({: {: id_strs}}, **self._view_filter())) return objects.SequenceRuleList(result, runtime=self._runtime)
Gets a ``SequenceRuleList`` for an entire assessment. arg: assessment_id (osid.id.Id): an assessment ``Id`` return: (osid.assessment.authoring.SequenceRuleList) - the returned ``SequenceRule`` list raise: NullArgument - ``assessment_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
6,481
def configure(self, user_dn, group_dn, url=, case_sensitive_names=False, starttls=False, tls_min_version=, tls_max_version=, insecure_tls=False, certificate=None, bind_dn=None, bind_pass=None, user_attr=, discover_dn=False, deny_null_bind=True, upn_domain=None, group_filter=DEFAULT_GROUP_FILTER, group_attr=, mount_point=DEFAULT_MOUNT_POINT): params = { : user_dn, : group_dn, : url, : case_sensitive_names, : starttls, : tls_min_version, : tls_max_version, : insecure_tls, : certificate, : user_attr, : discover_dn, : deny_null_bind, : group_filter, : group_attr, } if upn_domain is not None: params[] = upn_domain if bind_dn is not None: params[] = bind_dn if bind_pass is not None: params[] = bind_pass if certificate is not None: params[] = certificate api_path = .format(mount_point=mount_point) return self._adapter.post( url=api_path, json=params, )
Configure the LDAP auth method. Supported methods: POST: /auth/{mount_point}/config. Produces: 204 (empty body) :param user_dn: Base DN under which to perform user search. Example: ou=Users,dc=example,dc=com :type user_dn: str | unicode :param group_dn: LDAP search base to use for group membership search. This can be the root containing either groups or users. Example: ou=Groups,dc=example,dc=com :type group_dn: str | unicode :param url: The LDAP server to connect to. Examples: ldap://ldap.myorg.com, ldaps://ldap.myorg.com:636. Multiple URLs can be specified with commas, e.g. ldap://ldap.myorg.com,ldap://ldap2.myorg.com; these will be tried in-order. :type url: str | unicode :param case_sensitive_names: If set, user and group names assigned to policies within the backend will be case sensitive. Otherwise, names will be normalized to lower case. Case will still be preserved when sending the username to the LDAP server at login time; this is only for matching local user/group definitions. :type case_sensitive_names: bool :param starttls: If true, issues a StartTLS command after establishing an unencrypted connection. :type starttls: bool :param tls_min_version: Minimum TLS version to use. Accepted values are tls10, tls11 or tls12. :type tls_min_version: str | unicode :param tls_max_version: Maximum TLS version to use. Accepted values are tls10, tls11 or tls12. :type tls_max_version: str | unicode :param insecure_tls: If true, skips LDAP server SSL certificate verification - insecure, use with caution! :type insecure_tls: bool :param certificate: CA certificate to use when verifying LDAP server certificate, must be x509 PEM encoded. :type certificate: str | unicode :param bind_dn: Distinguished name of object to bind when performing user search. Example: cn=vault,ou=Users,dc=example,dc=com :type bind_dn: str | unicode :param bind_pass: Password to use along with binddn when performing user search. :type bind_pass: str | unicode :param user_attr: Attribute on user attribute object matching the username passed when authenticating. Examples: sAMAccountName, cn, uid :type user_attr: str | unicode :param discover_dn: Use anonymous bind to discover the bind DN of a user. :type discover_dn: bool :param deny_null_bind: This option prevents users from bypassing authentication when providing an empty password. :type deny_null_bind: bool :param upn_domain: The userPrincipalDomain used to construct the UPN string for the authenticating user. The constructed UPN will appear as [username]@UPNDomain. Example: example.com, which will cause vault to bind as [email protected]. :type upn_domain: str | unicode :param group_filter: Go template used when constructing the group membership query. The template can access the following context variables: [UserDN, Username]. The default is `(|(memberUid={{.Username}})(member={{.UserDN}})(uniqueMember={{.UserDN}}))`, which is compatible with several common directory schemas. To support nested group resolution for Active Directory, instead use the following query: (&(objectClass=group)(member:1.2.840.113556.1.4.1941:={{.UserDN}})). :type group_filter: str | unicode :param group_attr: LDAP attribute to follow on objects returned by groupfilter in order to enumerate user group membership. Examples: for groupfilter queries returning group objects, use: cn. For queries returning user objects, use: memberOf. The default is cn. :type group_attr: str | unicode :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The response of the configure request. :rtype: requests.Response
6,482
def allowed_domains(self): if self._allowed_domains is None: uri = "/loadbalancers/alloweddomains" resp, body = self.method_get(uri) dom_list = body["allowedDomains"] self._allowed_domains = [itm["allowedDomain"]["name"] for itm in dom_list] return self._allowed_domains
This property lists the allowed domains for a load balancer. The allowed domains are restrictions set for the allowed domain names used for adding load balancer nodes. In order to submit a domain name as an address for the load balancer node to add, the user must verify that the domain is valid by using the List Allowed Domains call. Once verified, simply supply the domain name in place of the node's address in the add_nodes() call.
6,483
def rate_limit(self, name, limit=5, per=60, debug=False): return RateLimit(self, name, limit, per, debug)
Rate limit implementation. Allows up to `limit` of events every `per` seconds. See :ref:`rate-limit` for more information.
6,484
def Issue(self, state, results): result = CheckResult() if results and all(isinstance(r, CheckResult) for r in results): result.ExtendAnomalies(results) else: result.anomaly = [ rdf_anomaly.Anomaly( type=anomaly_pb2.Anomaly.AnomalyType.Name( anomaly_pb2.Anomaly.ANALYSIS_ANOMALY), symptom=self.hint.Problem(state), finding=self.hint.Render(results), explanation=self.hint.Fix()) ] return result
Collect anomalous findings into a CheckResult. Comparisons with anomalous conditions collect anomalies into a single CheckResult message. The contents of the result varies depending on whether the method making the comparison is a Check, Method or Probe. - Probes evaluate raw host data and generate Anomalies. These are condensed into a new CheckResult. - Checks and Methods evaluate the results of probes (i.e. CheckResults). If there are multiple probe results, all probe anomalies are aggregated into a single new CheckResult for the Check or Method. Args: state: A text description of what combination of results were anomalous (e.g. some condition was missing or present.) results: Anomalies or CheckResult messages. Returns: A CheckResult message.
6,485
def print_runs(query): if query is None: return for tup in query: print(("{0} @ {1} - {2} id: {3} group: {4}".format( tup.end, tup.experiment_name, tup.project_name, tup.experiment_group, tup.run_group)))
Print all rows in this result query.
6,486
def render_source(self): return SOURCE_TABLE_HTML % u.join(line.render() for line in self.get_annotated_lines())
Render the sourcecode.
6,487
def get_accessible_time(self, plugin_override=True): vals = self._hook_manager.call_hook(, course=self.get_course(), task=self, default=self._accessible) return vals[0] if len(vals) and plugin_override else self._accessible
Get the accessible time of this task
6,488
def _try_get_state_scope(name, mark_name_scope_used=True): tmp_scope_name = tf_v1.get_variable_scope().name if tmp_scope_name: tmp_scope_name += "/" with tf.name_scope(tmp_scope_name): with tf_v1.variable_scope( None, default_name=name, auxiliary_name_scope=False) as vs: abs_state_scope = vs.name + "/" graph = tf_v1.get_default_graph() unique_name_scope = graph.unique_name(name, mark_name_scope_used) + "/" if unique_name_scope != abs_state_scope: raise RuntimeError( "variable_scope %s was unused but the corresponding " "name_scope was already taken." % abs_state_scope) return abs_state_scope
Returns a fresh variable/name scope for a module's state. In order to import a module into a given scope without major complications we require the scope to be empty. This function deals with deciding an unused scope where to define the module state. This is non trivial in cases where name_scope and variable_scopes are out of sync, e.g. tpus or re-entering scopes. Args: name: A string with the name of the module as supplied by the client. mark_name_scope_used: a boolean, indicating whether to mark the name scope of the returned value as used. Raises: RuntimeError: if the name scope of the freshly created variable scope is already used.
6,489
def tmp_configuration_copy(chmod=0o600): cfg_dict = conf.as_dict(display_sensitive=True, raw=True) temp_fd, cfg_path = mkstemp() with os.fdopen(temp_fd, ) as temp_file: if chmod is not None: os.fchmod(temp_fd, chmod) json.dump(cfg_dict, temp_file) return cfg_path
Returns a path for a temporary file including a full copy of the configuration settings. :return: a path to a temporary file
6,490
def add_advisor(self, name, ids=None, degree_type=None, record=None, curated=False): new_advisor = {} new_advisor[] = normalize_name(name) if ids: new_advisor[] = force_list(ids) if degree_type: new_advisor[] = degree_type if record: new_advisor[] = record new_advisor[] = curated self._append_to(, new_advisor)
Add an advisor. Args: :param name: full name of the advisor. :type name: string :param ids: list with the IDs of the advisor. :type ids: list :param degree_type: one of the allowed types of degree the advisor helped with. :type degree_type: string :param record: URI for the advisor. :type record: string :param curated: if the advisor relation has been curated i.e. has been verified. :type curated: boolean
6,491
def parse_metadata(section): metadata = {} metadata_lines = section.split() for line in metadata_lines: colon_index = line.find() if colon_index != -1: key = line[:colon_index].strip() val = line[colon_index + 1:].strip() metadata[key] = val return metadata
Given the first part of a slide, returns metadata associated with it.
6,492
def take_while(self, predicate): if self.closed(): raise ValueError("Attempt to call take_while() on a closed " "Queryable.") if not is_callable(predicate): raise TypeError("take_while() parameter predicate={0} is " "not callable".format(repr(predicate))) return self._create(self._generate_take_while_result(predicate))
Returns elements from the start while the predicate is True. Note: This method uses deferred execution. Args: predicate: A function returning True or False with which elements will be tested. Returns: A Queryable over the elements from the beginning of the source sequence for which predicate is True. Raises: ValueError: If the Queryable is closed() TypeError: If the predicate is not callable.
6,493
def _ignore_path(cls, path, ignore_list=None, white_list=None): ignore_list = ignore_list or [] white_list = white_list or [] return (cls._matches_patterns(path, ignore_list) and not cls._matches_patterns(path, white_list))
Returns a whether a path should be ignored or not.
6,494
def split_cl_function(cl_str): class Semantics: def __init__(self): self._return_type = self._function_name = self._parameter_list = [] self._cl_body = def result(self, ast): return self._return_type, self._function_name, self._parameter_list, self._cl_body def address_space(self, ast): self._return_type = ast.strip() + return ast def data_type(self, ast): self._return_type += .join(ast).strip() return ast def function_name(self, ast): self._function_name = ast.strip() return ast def arglist(self, ast): if ast != : self._parameter_list = ast return ast def body(self, ast): def join(items): result = for item in items: if isinstance(item, str): result += item else: result += join(item) return result self._cl_body = join(ast).strip()[1:-1] return ast return _split_cl_function_parser.parse(cl_str, semantics=Semantics())
Split an CL function into a return type, function name, parameters list and the body. Args: cl_str (str): the CL code to parse and plit into components Returns: tuple: string elements for the return type, function name, parameter list and the body
6,495
def ListField(field): original_get_from_instance = field.get_from_instance def get_from_instance(self, instance): for value in original_get_from_instance(instance): yield value field.get_from_instance = MethodType(get_from_instance, field) return field
This wraps a field so that when get_from_instance is called, the field's values are iterated over
6,496
def _get_from_c_api(): from ctypes import pythonapi, py_object PyDictProxy_New = pythonapi.PyDictProxy_New PyDictProxy_New.argtypes = (py_object,) PyDictProxy_New.restype = py_object class dictproxy(object): def __new__(cls, d): if not isinstance(d, dict): raise TypeError("dictproxy can only proxy to a real dict") return PyDictProxy_New(d) dictproxy(dict()) return _add_isinstance_tomfoolery(dictproxy)
dictproxy does exist in previous versions, but the Python constructor refuses to create new objects, so we must be underhanded and sneaky with ctypes.
6,497
def group_variant(self): v_mapping = {symdata.index: symdata.variant for symdata in self._symboldata_list} return v_mapping[self.group_num] or ""
Current group variant (get-only). :getter: Returns current group variant :type: str
6,498
def savetofile(self, filelike, sortkey = True): filelike.writelines(k + + repr(v) + for k,v in self.config_items(sortkey))
Save configurations to a file-like object which supports `writelines`
6,499
def _prep_vcf(in_file, region_bed, sample, new_sample, stats, work_dir, data): in_file = vcfutils.bgzip_and_index(in_file, data, remove_orig=False) out_file = os.path.join(work_dir, "%s-vprep.vcf.gz" % utils.splitext_plus(os.path.basename(in_file))[0]) if not utils.file_uptodate(out_file, in_file): callable_bed = _prep_callable_bed(region_bed, work_dir, stats, data) with file_transaction(data, out_file) as tx_out_file: ann_remove = _get_anns_to_remove(in_file) ann_str = " | bcftools annotate -x {ann_remove}" if ann_remove else "" cmd = ("bcftools view -T {callable_bed} -f --min-ac -s {sample} {in_file} " + ann_str + r"| sed " "| bgzip -c > {out_file}") do.run(cmd.format(**locals()), "Create SV validation VCF for %s" % new_sample) return vcfutils.bgzip_and_index(out_file, data["config"])
Prepare VCF for SV validation: - Subset to passing variants - Subset to genotyped variants -- removes reference and no calls - Selects and names samples - Subset to callable regions - Remove larger annotations which slow down VCF processing