Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
7,800
def firsts(properties): result = {} for name in properties: elt_properties = properties[name] result[name] = elt_properties[0][1] return result
Transform a dictionary of {name: [(elt, value)+]} (resulting from get_properties) to a dictionary of {name, value} where names are first encountered in input properties. :param dict properties: properties to firsts. :return: dictionary of parameter values by names. :rtype: dict
7,801
def position_pnl(self): last_price = self._data_proxy.get_last_price(self._order_book_id) if self._direction == POSITION_DIRECTION.LONG: price_spread = last_price - self._last_price else: price_spread = self._last_price - last_price return self._logical_old_quantity * self._contract_multiplier * price_spread
[float] 昨仓盈亏,策略在当前交易日产生的盈亏中来源于昨仓的部分
7,802
def check_apps_permission(self, apps): for app in apps: if app in self.apps_dict: return True return False
Checks if one of apps is listed in apps_dict Since apps_dict is derived from the app_list given by django admin, it lists only the apps the user can view
7,803
def _png(code, version, file, scale=1, module_color=(0, 0, 0, 255), background=(255, 255, 255, 255), quiet_zone=4, debug=False): import png try: scale = int(scale) except ValueError: raise ValueError() def scale_code(size): black = [0] * scale white = [1] * scale colors = (white, black, (([2] * scale) if debug else black)) border_module = white * quiet_zone border_row = [[1] * size] * scale * quiet_zone w = png.Writer(width=size, height=size, greyscale=greyscale, transparent=transparent_color, palette=palette, bitdepth=bitdepth) try: w.write(f, code_rows) finally: if autoclose: f.close()
See: pyqrcode.QRCode.png() This function was abstracted away from QRCode to allow for the output of QR codes during the build process, i.e. for debugging. It works just the same except you must specify the code's version. This is needed to calculate the PNG's size. This method will write the given file out as a PNG file. Note, it depends on the PyPNG module to do this. :param module_color: Color of the QR code (default: ``(0, 0, 0, 255)`` (black)) :param background: Optional background color. If set to ``None`` the PNG will have a transparent background. (default: ``(255, 255, 255, 255)`` (white)) :param quiet_zone: Border around the QR code (also known as quiet zone) (default: ``4``). Set to zero (``0``) if the code shouldn't have a border. :param debug: Inidicates if errors in the QR code should be added (as red modules) to the output (default: ``False``).
7,804
def authenticate(self, request): try: oauth_request = oauth_provider.utils.get_oauth_request(request) except oauth.Error as err: raise exceptions.AuthenticationFailed(err.message) if not oauth_request: return None oauth_params = oauth_provider.consts.OAUTH_PARAMETERS_NAMES found = any(param for param in oauth_params if param in oauth_request) missing = list(param for param in oauth_params if param not in oauth_request) if not found: return None if missing: msg = % (.join(missing)) raise exceptions.AuthenticationFailed(msg) if not self.check_nonce(request, oauth_request): msg = raise exceptions.AuthenticationFailed(msg) try: consumer_key = oauth_request.get_parameter() consumer = oauth_provider_store.get_consumer(request, oauth_request, consumer_key) except oauth_provider.store.InvalidConsumerError: msg = % oauth_request.get_parameter() raise exceptions.AuthenticationFailed(msg) if consumer.status != oauth_provider.consts.ACCEPTED: msg = % consumer.get_status_display() raise exceptions.AuthenticationFailed(msg) try: token_param = oauth_request.get_parameter() token = oauth_provider_store.get_access_token(request, oauth_request, consumer, token_param) except oauth_provider.store.InvalidTokenError: msg = % oauth_request.get_parameter() raise exceptions.AuthenticationFailed(msg) try: self.validate_token(request, consumer, token) except oauth.Error as err: raise exceptions.AuthenticationFailed(err.message) user = token.user if not user.is_active: msg = % user.username raise exceptions.AuthenticationFailed(msg) return (token.user, token)
Returns two-tuple of (user, token) if authentication succeeds, or None otherwise.
7,805
def stonith_show(stonith_id, extra_args=None, cibfile=None): *eps_fence/tmp/2_node_cluster.cib return item_show(item=, item_id=stonith_id, extra_args=extra_args, cibfile=cibfile)
Show the value of a cluster stonith stonith_id name for the stonith resource extra_args additional options for the pcs stonith command cibfile use cibfile instead of the live CIB CLI Example: .. code-block:: bash salt '*' pcs.stonith_show stonith_id='eps_fence' cibfile='/tmp/2_node_cluster.cib'
7,806
def as_view(cls, action_map=None, **initkwargs): if not action_map: raise TypeError("action_map is a required argument.") def view(request): self = cls(**initkwargs) self.request = request self.lookup_url_kwargs = self.request.matchdict self.action_map = action_map self.action = self.action_map.get(self.request.method.lower()) for method, action in action_map.items(): handler = getattr(self, action) setattr(self, method, handler) return self.dispatch(self.request, **self.request.matchdict) return view
Allows custom request to method routing based on given ``action_map`` kwarg.
7,807
def find_lexer_class_by_name(_alias): if not _alias: raise ClassNotFound( % _alias) for module_name, name, aliases, _, _ in itervalues(LEXERS): if _alias.lower() in aliases: if name not in _lexer_cache: _load_lexers(module_name) return _lexer_cache[name] for cls in find_plugin_lexers(): if _alias.lower() in cls.aliases: return cls raise ClassNotFound( % _alias)
Lookup a lexer class by alias. Like `get_lexer_by_name`, but does not instantiate the class. .. versionadded:: 2.2
7,808
async def release_lease_async(self, lease): lease_id = None try: _logger.info("Releasing lease %r %r", self.host.guid, lease.partition_id) lease_id = lease.token released_copy = AzureBlobLease() released_copy.with_lease(lease) released_copy.token = None released_copy.owner = None released_copy.state = None await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.create_blob_from_text, self.lease_container_name, lease.partition_id, json.dumps(released_copy.serializable()), lease_id=lease_id)) await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.release_blob_lease, self.lease_container_name, lease.partition_id, lease_id)) except Exception as err: _logger.error("Failed to release lease %r %r %r", err, lease.partition_id, lease_id) return False return True
Give up a lease currently held by this host. If the lease has been stolen, or expired, releasing it is unnecessary, and will fail if attempted. :param lease: The stored lease to be released. :type lease: ~azure.eventprocessorhost.lease.Lease :return: `True` if the lease was released successfully, `False` if not. :rtype: bool
7,809
def get_customs_properties_by_inheritance(self, obj): for t_id in obj.templates: template = self.templates[t_id] tpl_cv = self.get_customs_properties_by_inheritance(template) if tpl_cv: for prop in tpl_cv: if prop not in obj.customs: value = tpl_cv[prop] else: value = obj.customs[prop] if obj.has_plus(prop): value.insert(0, obj.get_plus_and_delete(prop)) obj.customs[prop] = value for prop in obj.customs: value = obj.customs[prop] if obj.has_plus(prop): value.insert(0, obj.get_plus_and_delete(prop)) obj.customs[prop] = value cust_in_plus = obj.get_all_plus_and_delete() for prop in cust_in_plus: obj.customs[prop] = cust_in_plus[prop] return obj.customs
Get custom properties from the templates defined in this object :param obj: the oject to search the property :type obj: alignak.objects.item.Item :return: list of custom properties :rtype: list
7,810
def key_bytes(self): return self.key.private_bytes( encoding=serialization.Encoding.DER, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), )
Returns the raw signing key. :rtype: bytes
7,811
def add(self, host=None, f_community=None, f_access=None, f_version=None): return self.send.snmp_add(host, f_community, f_access, f_version)
Add an SNMP community string to a host :param host: t_hosts.id or t_hosts.f_ipaddr :param f_community: Community string to add :param f_access: READ or WRITE :param f_version: v1, v2c or v3 :return: (True/False, t_snmp.id/Error string)
7,812
def cwd (self): path = self.urlparts[2].encode(self.filename_encoding, ) dirname = path.strip() dirs = dirname.split() filename = dirs.pop() self.url_connection.cwd() for d in dirs: self.url_connection.cwd(d) return filename
Change to URL parent directory. Return filename of last path component.
7,813
def get_func(name, argtypes=None, restype=c_int, lib=libNLPIR): logger.debug("Getting NLPIR API function: : , : ," " : .".format(name, argtypes, restype)) func = getattr(lib, name) if argtypes is not None: func.argtypes = argtypes if restype is not c_int: func.restype = restype logger.debug("NLPIR API function retrieved.".format(name)) return func
Retrieves the corresponding NLPIR function. :param str name: The name of the NLPIR function to get. :param list argtypes: A list of :mod:`ctypes` data types that correspond to the function's argument types. :param restype: A :mod:`ctypes` data type that corresponds to the function's return type (only needed if the return type isn't :class:`ctypes.c_int`). :param lib: A :class:`ctypes.CDLL` instance for the NLPIR API library where the function will be retrieved from (defaults to :data:`libNLPIR`). :returns: The exported function. It can be called like any other Python callable.
7,814
def rot1(theta): return np.array([ [1, 0, 0], [0, np.cos(theta), np.sin(theta)], [0, -np.sin(theta), np.cos(theta)] ])
Args: theta (float): Angle in radians Return: Rotation matrix of angle theta around the X-axis
7,815
def _emit(self, s): if os.path.exists(self._html_dir): self._report_file.write(s) self._report_file.flush()
Append content to the main report file.
7,816
def scalarmult_B(e): e %= L P = IDENT for i in range(253): if e & 1: P = edwards_add(P=P, Q=Bpow[i]) e //= 2 assert e == 0, e return P
Implements scalarmult(B, e) more efficiently.
7,817
def _setup(self): role_directory = os.path.join(self._config.scenario.directory, self.options[]) if not os.path.isdir(role_directory): os.makedirs(role_directory)
Prepare the system for using ``ansible-galaxy`` and returns None. :return: None
7,818
def bkg_calc_interp1d(self, analytes=None, kind=1, n_min=10, n_max=None, cstep=None, bkg_filter=False, f_win=7, f_n_lim=3, focus_stage=): if analytes is None: analytes = self.analytes self.bkg = Bunch() elif isinstance(analytes, str): analytes = [analytes] self.get_background(n_min=n_min, n_max=n_max, bkg_filter=bkg_filter, f_win=f_win, f_n_lim=f_n_lim, focus_stage=focus_stage) def pad(a, lo=None, hi=None): if lo is None: lo = [a[0]] if hi is None: hi = [a[-1]] return np.concatenate((lo, a, hi)) if not in self.bkg.keys(): bkg_t = pad(self.bkg[].loc[:, (, )], [0], [self.max_time]) self.bkg[] = Bunch() self.bkg[][] = bkg_t d = self.bkg[] with self.pbar.set(total=len(analytes), desc=) as prog: for a in analytes: self.bkg[][a] = {: pad(d.loc[:, (a, )].values), : pad(d.loc[:, (a, )].values), : pad(d.loc[:, (a, )].values)} prog.update() self.bkg[] return
Background calculation using a 1D interpolation. scipy.interpolate.interp1D is used for interpolation. Parameters ---------- analytes : str or iterable Which analyte or analytes to calculate. kind : str or int Integer specifying the order of the spline interpolation used, or string specifying a type of interpolation. Passed to `scipy.interpolate.interp1D` n_min : int Background regions with fewer than n_min points will not be included in the fit. cstep : float or None The interval between calculated background points. filter : bool If true, apply a rolling filter to the isolated background regions to exclude regions with anomalously high values. If True, two parameters alter the filter's behaviour: f_win : int The size of the rolling window f_n_lim : float The number of standard deviations above the rolling mean to set the threshold. focus_stage : str Which stage of analysis to apply processing to. Defaults to 'despiked' if present, or 'rawdata' if not. Can be one of: * 'rawdata': raw data, loaded from csv file. * 'despiked': despiked data. * 'signal'/'background': isolated signal and background data. Created by self.separate, after signal and background regions have been identified by self.autorange. * 'bkgsub': background subtracted data, created by self.bkg_correct * 'ratios': element ratio data, created by self.ratio. * 'calibrated': ratio data calibrated to standards, created by self.calibrate.
7,819
def configure_delete(self, ns, definition): request_schema = definition.request_schema or Schema() @self.add_route(ns.instance_path, Operation.Delete, ns) @qs(request_schema) @wraps(definition.func) def delete(**path_data): headers = dict() request_data = load_query_string_data(request_schema) response_data = require_response_data(definition.func(**merge_data(path_data, request_data))) definition.header_func(headers, response_data) response_format = self.negotiate_response_content(definition.response_formats) return dump_response_data( "", None, status_code=Operation.Delete.value.default_code, headers=headers, response_format=response_format, ) delete.__doc__ = "Delete a {} by id".format(ns.subject_name)
Register a delete endpoint. The definition's func should be a delete function, which must: - accept kwargs for path data - return truthy/falsey :param ns: the namespace :param definition: the endpoint definition
7,820
def visitObjectDef(self, ctx: jsgParser.ObjectDefContext): name = as_token(ctx) self._context.grammarelts[name] = JSGObjectExpr(self._context, ctx.objectExpr(), name)
objectDef: ID objectExpr
7,821
def select_sample(in_file, sample, out_file, config, filters=None): if not utils.file_exists(out_file): with file_transaction(config, out_file) as tx_out_file: if len(get_samples(in_file)) == 1: shutil.copy(in_file, tx_out_file) else: if in_file.endswith(".gz"): bgzip_and_index(in_file, config) bcftools = config_utils.get_program("bcftools", config) output_type = "z" if out_file.endswith(".gz") else "v" filter_str = "-f %s" % filters if filters is not None else "" cmd = "{bcftools} view -O {output_type} {filter_str} {in_file} -s {sample} > {tx_out_file}" do.run(cmd.format(**locals()), "Select sample: %s" % sample) if out_file.endswith(".gz"): bgzip_and_index(out_file, config) return out_file
Select a single sample from the supplied multisample VCF file.
7,822
def find_external_urls(self, entry): soup = BeautifulSoup(entry.html_content, ) external_urls = [a[] for a in soup.find_all() if self.is_external_url( a[], self.ressources.site_url)] return external_urls
Find external URLs in an entry.
7,823
def _getCharWidths(self, xref, bfname, ext, ordering, limit, idx=0): if self.isClosed or self.isEncrypted: raise ValueError("operation illegal for closed / encrypted doc") return _fitz.Document__getCharWidths(self, xref, bfname, ext, ordering, limit, idx)
Return list of glyphs and glyph widths of a font.
7,824
def parsedeglat (latstr): deg = _parsesexagesimal (latstr, , True) if abs (deg) > 90: raise ValueError ( + latstr) return deg * D2R
Parse a latitude formatted as sexagesimal degrees into an angle. This function converts a textual representation of a latitude, measured in degrees, into a floating point value measured in radians. The format of *latstr* is very limited: it may not have leading or trailing whitespace, and the components of the sexagesimal representation must be separated by colons. The input must therefore resemble something like ``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does not resemble this template. Latitudes greater than 90 or less than -90 degrees are not allowed.
7,825
def apply_config(self, config): self.haproxy_config_path = config["config_file"] global_stanza = Stanza("global") global_stanza.add_lines(config.get("global", [])) global_stanza.add_lines([ "stats socket %s mode 600 level admin" % config["socket_file"], "stats timeout 2m" ]) defaults_stanza = Stanza("defaults") defaults_stanza.add_lines(config.get("defaults", [])) proxy_stanzas = [ ProxyStanza( name, proxy["port"], proxy["upstreams"], proxy.get("options", []), proxy.get("bind_address") ) for name, proxy in six.iteritems(config.get("proxies", {})) ] stats_stanza = None if "stats" in config: stats_stanza = StatsStanza( config["stats"]["port"], config["stats"].get("uri", "/") ) for timeout in ("client", "connect", "server"): if timeout in config["stats"].get("timeouts", {}): stats_stanza.add_line( "timeout %s %d" % ( timeout, config["stats"]["timeouts"][timeout] ) ) self.config_file = HAProxyConfig( global_stanza, defaults_stanza, proxy_stanzas=proxy_stanzas, stats_stanza=stats_stanza, meta_clusters=config.get("meta_clusters", {}), bind_address=config.get("bind_address") ) self.control = HAProxyControl( config["config_file"], config["socket_file"], config["pid_file"], )
Constructs HAProxyConfig and HAProxyControl instances based on the contents of the config. This is mostly a matter of constructing the configuration stanzas.
7,826
def rename_feature(self, old_feature, new_feature): self._check_label(new_feature) self._rename_label(, old_feature, new_feature)
Change the label of a feature attached to the Bundle :parameter str old_feature: the current name of the feature (must exist) :parameter str new_feature: the desired new name of the feature (must not exist) :return: None :raises ValueError: if the new_feature is forbidden
7,827
def _compute_scale(self, instruction_id, svg_dict): bbox = list(map(float, svg_dict["svg"]["@viewBox"].split())) scale = self._zoom / (bbox[3] - bbox[1]) self._symbol_id_to_scale[instruction_id] = scale
Compute the scale of an instruction svg. Compute the scale using the bounding box stored in the :paramref:`svg_dict`. The scale is saved in a dictionary using :paramref:`instruction_id` as key. :param str instruction_id: id identifying a symbol in the defs :param dict svg_dict: dictionary containing the SVG for the instruction currently processed
7,828
def write(self, writer): multiline = bool(self._children) newline_start = multiline and not bool(self.data) writer.start(self.tagname, self.attrs, newline=newline_start) if self.data: writer.data(self.data, newline=bool(self._children)) for c in self._children: c.write(writer) writer.end(self.tagname, indent=multiline)
Writes an XML representation of this node (including descendants) to the specified file-like object. :param writer: An :class:`XmlWriter` instance to write this node to
7,829
def get_install_names(filename): lines = _cmd_out_err([, , filename]) if not _line0_says_object(lines[0], filename): return () names = tuple(parse_install_name(line)[0] for line in lines[1:]) install_id = get_install_id(filename) if not install_id is None: assert names[0] == install_id return names[1:] return names
Return install names from library named in `filename` Returns tuple of install names tuple will be empty if no install names, or if this is not an object file. Parameters ---------- filename : str filename of library Returns ------- install_names : tuple tuple of install names for library `filename`
7,830
def add_access_list(self, loadbalancer, access_list): req_body = {"accessList": access_list} uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer) resp, body = self.api.method_post(uri, body=req_body) return body
Adds the access list provided to the load balancer. The 'access_list' should be a list of dicts in the following format: [{"address": "192.0.43.10", "type": "DENY"}, {"address": "192.0.43.11", "type": "ALLOW"}, ... {"address": "192.0.43.99", "type": "DENY"}, ] If no access list exists, it is created. If an access list already exists, it is updated with the provided list.
7,831
def make(parser): s = parser.add_subparsers( title=, metavar=, help=, ) def gen_pass_f(args): gen_pass() gen_pass_parser = s.add_parser(, help=) gen_pass_parser.set_defaults(func=gen_pass_f) def cmd_f(args): cmd(args.user, args.hosts.split(), args.key_filename, args.password, args.run) cmd_parser = s.add_parser(, help=) cmd_parser.add_argument(, help=, action=, default=None, dest=) cmd_parser.set_defaults(func=cmd_f)
DEPRECATED prepare OpenStack basic environment
7,832
def ReadSerializedDict(cls, json_dict): if json_dict: json_object = cls._ConvertDictToObject(json_dict) if not isinstance(json_object, containers_interface.AttributeContainer): raise TypeError(.format( type(json_object))) return json_object return None
Reads an attribute container from serialized dictionary form. Args: json_dict (dict[str, object]): JSON serialized objects. Returns: AttributeContainer: attribute container or None. Raises: TypeError: if the serialized dictionary does not contain an AttributeContainer.
7,833
def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context[] = self.poster return context
Returns the context data to provide to the template.
7,834
def _needed_markup_bot(self): if not isinstance(self.reply_markup, ( types.ReplyInlineMarkup, types.ReplyKeyboardMarkup)): return None for row in self.reply_markup.rows: for button in row.buttons: if isinstance(button, types.KeyboardButtonSwitchInline): if button.same_peer: bot = self.input_sender if not bot: raise ValueError() else: try: return self._client._entity_cache[self.via_bot_id] except KeyError: raise ValueError() from None
Returns the input peer of the bot that's needed for the reply markup. This is necessary for :tl:`KeyboardButtonSwitchInline` since we need to know what bot we want to start. Raises ``ValueError`` if the bot cannot be found but is needed. Returns ``None`` if it's not needed.
7,835
def more_like_this(self, query, fields, columns=None, start=0, rows=30): if isinstance(fields, basestring): mlt_fields = fields else: mlt_fields = ",".join(fields) if columns is None: columns = ["*", "score"] fields = { : query, : , : mlt_fields, : ",".join(columns), : str(start), : str(rows), : "json"} if len(self.endpoints) > 1: fields["shards"] = self._get_shards() assert self.default_endpoint in self.endpoints request_url = _get_url(self.endpoints[self.default_endpoint], "mlt") results = self._send_solr_query(request_url, fields) if not results: return None assert "responseHeader" in results if not results.get("responseHeader").get("status") == 0: logger.error("Server error while retrieving results: %s", results) return None assert "response" in results result_obj = self._parse_response(results) return result_obj
Retrieves "more like this" results for a passed query document query - query for a document on which to base similar documents fields - fields on which to base similarity estimation (either comma delimited string or a list) columns - columns to return (list of strings) start - start number for first result (used in pagination) rows - number of rows to return (used for pagination, defaults to 30)
7,836
def update_device(name, **kwargs): kwargs = __utils__[](**kwargs) nb_device = _get(, , auth_required=True, name=name) for k, v in kwargs.items(): setattr(nb_device, k, v) try: nb_device.save() return {: {: kwargs}} except RequestError as e: log.error(, e.req.request.headers, e.request_body, e.error) return False
.. versionadded:: 2019.2.0 Add attributes to an existing device, identified by name. name The name of the device, e.g., ``edge_router`` kwargs Arguments to change in device, e.g., ``serial=JN2932930`` CLI Example: .. code-block:: bash salt myminion netbox.update_device edge_router serial=JN2932920
7,837
def solubility_eutectic(T, Tm, Hm, Cpl=0, Cps=0, gamma=1): r dCp = Cpl-Cps x = exp(- Hm/R/T*(1-T/Tm) + dCp*(Tm-T)/R/T - dCp/R*log(Tm/T))/gamma return x
r'''Returns the maximum solubility of a solute in a solvent. .. math:: \ln x_i^L \gamma_i^L = \frac{\Delta H_{m,i}}{RT}\left( 1 - \frac{T}{T_{m,i}}\right) - \frac{\Delta C_{p,i}(T_{m,i}-T)}{RT} + \frac{\Delta C_{p,i}}{R}\ln\frac{T_m}{T} \Delta C_{p,i} = C_{p,i}^L - C_{p,i}^S Parameters ---------- T : float Temperature of the system [K] Tm : float Melting temperature of the solute [K] Hm : float Heat of melting at the melting temperature of the solute [J/mol] Cpl : float, optional Molar heat capacity of the solute as a liquid [J/mol/K] Cpls: float, optional Molar heat capacity of the solute as a solid [J/mol/K] gamma : float, optional Activity coefficient of the solute as a liquid [-] Returns ------- x : float Mole fraction of solute at maximum solubility [-] Notes ----- gamma is of the solute in liquid phase Examples -------- From [1]_, matching example >>> solubility_eutectic(T=260., Tm=278.68, Hm=9952., Cpl=0, Cps=0, gamma=3.0176) 0.24340068761677464 References ---------- .. [1] Gmehling, Jurgen. Chemical Thermodynamics: For Process Simulation. Weinheim, Germany: Wiley-VCH, 2012.
7,838
def rotate_x(self, deg): rad = math.radians(deg) mat = numpy.array([ [1, 0, 0, 0], [0, math.cos(rad), math.sin(rad), 0], [0, -math.sin(rad), math.cos(rad), 0], [0, 0, 0, 1] ]) self.vectors = self.vectors.dot(mat) return self
Rotate mesh around x-axis :param float deg: Rotation angle (degree) :return:
7,839
def remove_account(self, name): acc_to_remove = None for a in self.accounts: if a.name == name: acc_to_remove = a if acc_to_remove is not None: self.accounts.remove(acc_to_remove)
Remove an account from the account's sub accounts. :param name: The name of the account to remove.
7,840
def build(self, builder): params = dict(MetaDataVersionOID=str(self.metadata_version_oid), StudyOID="%s (%s)" % (self.projectname, self.environment,), ) self.mixin_params(params) builder.start("ClinicalData", params) if self.subject_data: for subject in self.subject_data: subject.build(builder) if self.annotations is not None: self.annotations.build(builder) builder.end("ClinicalData")
Build XML by appending to builder
7,841
def createdb(): manager.db.engine.echo = True manager.db.create_all() set_alembic_revision()
Create database tables from sqlalchemy models
7,842
def _parse_header(line): parts = _parseparam( + line) key = parts.next() pdict = {} for p in parts: i = p.find() if i >= 0: name = p[:i].strip().lower() value = p[i+1:].strip() if len(value) >= 2 and value[0] == value[-1] == : value = value[1:-1] value = value.replace(, ).replace(, ) pdict[name] = value return key, pdict
Parse a Content-type like header. Return the main content-type and a dictionary of options.
7,843
def _write_section(self, fp, section_name, section_items, delimiter): fp.write("[{0}]\n".format(section_name)) for key, value in section_items: value = self._interpolation.before_write(self, section_name, key, value) if value is not None or not self._allow_no_value: value = delimiter + str(value).replace(, ) else: value = "" fp.write("{0}{1}\n".format(key, value)) fp.write("\n")
Write a single section to the specified `fp'.
7,844
def coupling_efficiency(mode_solver, fibre_mfd, fibre_offset_x=0, fibre_offset_y=0, n_eff_fibre=1.441): etas = [] gaus = _make_gaussian(mode_solver._structure.xc, mode_solver._structure.yc, fibre_mfd, fibre_offset_x, fibre_offset_y) for mode, n_eff in zip(mode_solver.modes, mode_solver.n_effs): o = abs(_overlap(mode, gaus)) t = abs(transmission(n_eff, n_eff_fibre)) eta = o * t etas.append(eta) return etas
Finds the coupling efficiency between a solved fundamental mode and a fibre of given MFD. Args: mode_solver (_ModeSolver): Mode solver that has found a fundamental mode. fibre_mfd (float): The mode-field diameter (MFD) of the fibre. fibre_offset_x (float): Offset the fibre from the centre position of the window in x. Default is 0 (no offset). fibre_offset_y (float): Offset the fibre from the centre position of the window in y. Default is 0 (no offset). n_eff_fibre (float): The effective index of the fibre mode. Default is 1.441. Returns: float: The power coupling efficiency.
7,845
def get_default_config(self): config = super(NetfilterAccountingCollector, self).get_default_config() config.update({ : , : , : False, : True, : , : }) return config
Returns default configuration options.
7,846
def fingerprint(dirnames, prefix=None, previous=[]): results = [] for dirname in dirnames: for filename in os.listdir(dirname): fullpath = os.path.join(dirname, filename) if os.path.isdir(fullpath): results += fingerprint( [fullpath], prefix=filename, previous=previous) else: fullname = fullpath if prefix and fullname.startswith(prefix): fullname = fullname[len(prefix):] found = False for prevpath in previous: if fullname == prevpath[]: found = True break if not found: mtime = datetime.datetime.fromtimestamp( os.path.getmtime(fullpath), tz=utc) results += [{"Key": fullname, "LastModified": mtime.strftime( )}] return results
Returns a list of paths available from *dirname*. When previous is specified, returns a list of additional files only. Example: [{ "Key": "abc.txt", "LastModified": "Mon, 05 Jan 2015 12:00:00 UTC"}, { "Key": "def.txt", "LastModified": "Mon, 05 Jan 2015 12:00:001 UTC"}, ]
7,847
def using(self, client): s = self._clone() s._using = client return s
Associate the search request with an elasticsearch client. A fresh copy will be returned with current instance remaining unchanged. :arg client: an instance of ``elasticsearch.Elasticsearch`` to use or an alias to look up in ``elasticsearch_dsl.connections``
7,848
def getWorkDirs(): caller_fullurl = inspect.stack()[1][1] caller_relurl = os.path.relpath(caller_fullurl) caller_modurl = os.path.splitext(caller_relurl)[0] dirs = caller_modurl.split() dirs[0] = outDir = os.path.join(*([] + dirs[1:])) if not os.path.exists(outDir): os.makedirs(outDir) dirs.append() inDir = os.path.join(*dirs) if not os.path.exists(inDir): logging.critical( % inDir) sys.exit(1) return inDir, outDir
get input/output dirs (same input/output layout as for package)
7,849
def get_name_history( self, name, offset=None, count=None, reverse=False): cur = self.db.cursor() name_hist = namedb_get_history( cur, name, offset=offset, count=count, reverse=reverse ) return name_hist
Get the historic states for a name, grouped by block height.
7,850
def remove_root_bank(self, bank_id): if self._catalog_session is not None: return self._catalog_session.remove_root_catalog(catalog_id=bank_id) return self._hierarchy_session.remove_root(id_=bank_id)
Removes a root bank from this hierarchy. arg: bank_id (osid.id.Id): the ``Id`` of a bank raise: NotFound - ``bank_id`` not a parent of ``child_id`` raise: NullArgument - ``bank_id`` or ``child_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
7,851
async def minizinc( mzn, *dzn_files, args=None, data=None, include=None, stdlib_dir=None, globals_dir=None, declare_enums=True, allow_multiple_assignments=False, keep=False, output_vars=None, output_base=None, output_mode=, solver=None, timeout=None, two_pass=None, pre_passes=None, output_objective=False, non_unique=False, all_solutions=False, num_solutions=None, free_search=False, parallel=None, seed=None, rebase_arrays=True, keep_solutions=True, return_enums=False, max_queue_size=0, **kwargs ): mzn_file, dzn_files, data_file, data, keep, _output_mode, types = \ _minizinc_preliminaries( mzn, *dzn_files, args=args, data=data, include=include, stdlib_dir=stdlib_dir, globals_dir=globals_dir, output_vars=output_vars, keep=keep, output_base=output_base, output_mode=output_mode, declare_enums=declare_enums, allow_multiple_assignments=allow_multiple_assignments ) if not solver: solver = config.get(, gecode) solver_args = {**kwargs, **config.get(, {})} proc = await solve( solver, mzn_file, *dzn_files, data=data, include=include, stdlib_dir=stdlib_dir, globals_dir=globals_dir, output_mode=_output_mode, timeout=timeout, two_pass=two_pass, pre_passes=pre_passes, output_objective=output_objective, non_unique=non_unique, all_solutions=all_solutions, num_solutions=num_solutions, free_search=free_search, parallel=parallel, seed=seed, allow_multiple_assignments=allow_multiple_assignments, **solver_args ) if output_mode == : solns = asyncio.Queue(maxsize=max_queue_size) task = asyncio.create_task(_collect(proc, solns)) else: parser = AsyncSolutionParser( solver, output_mode=output_mode, rebase_arrays=rebase_arrays, types=types, keep_solutions=keep_solutions, return_enums=return_enums, max_queue_size=max_queue_size ) solns = await parser.parse(proc) task = parser.parse_task if not keep: task.add_done_callback(partial(_cleanup_cb, [mzn_file, data_file])) return solns
Coroutine version of the ``pymzn.minizinc`` function. Parameters ---------- max_queue_size : int Maximum number of solutions in the queue between the solution parser and the returned solution stream. When the queue is full, the solver execution will halt untill an item of the queue is consumed. This option is useful for memory management in cases where the solution stream gets very large and the caller cannot consume solutions as fast as they are produced. Use with care, if the full solution stream is not consumed before the execution of the Python program ends it may result in the solver becoming a zombie process. Default is ``0``, meaning an infinite queue.
7,852
def single_run_arrays(spanning_cluster=True, **kwargs): rNMmax_cluster_sizeMhas_spanning_clusterMmomentsM kwargs[] = False ret = dict() for n, state in enumerate(sample_states( spanning_cluster=spanning_cluster, **kwargs )): if in ret: assert ret[] == state[] else: ret[] = state[] if in ret: assert ret[] == state[] else: ret[] = state[] number_of_states = state[] + 1 max_cluster_size = np.empty(number_of_states) if spanning_cluster: has_spanning_cluster = np.empty(number_of_states, dtype=np.bool) moments = np.empty((5, number_of_states)) max_cluster_size[n] = state[] for k in range(5): moments[k, n] = state[][k] if spanning_cluster: has_spanning_cluster[n] = state[] ret[] = max_cluster_size ret[] = moments if spanning_cluster: ret[] = has_spanning_cluster return ret
r''' Generate statistics for a single run This is a stand-alone helper function to evolve a single sample state (realization) and return the cluster statistics. Parameters ---------- spanning_cluster : bool, optional Whether to detect a spanning cluster or not. Defaults to ``True``. kwargs : keyword arguments Piped through to :func:`sample_states` Returns ------- ret : dict Cluster statistics ret['N'] : int Total number of sites ret['M'] : int Total number of bonds ret['max_cluster_size'] : 1-D :py:class:`numpy.ndarray` of int, size ``ret['M'] + 1`` Array of the sizes of the largest cluster (absolute number of sites) at the respective occupation number. ret['has_spanning_cluster'] : 1-D :py:class:`numpy.ndarray` of bool, size ``ret['M'] + 1`` Array of booleans for each occupation number. The respective entry is ``True`` if there is a spanning cluster, ``False`` otherwise. Only exists if `spanning_cluster` argument is set to ``True``. ret['moments'] : 2-D :py:class:`numpy.ndarray` of int Array of shape ``(5, ret['M'] + 1)``. The ``(k, m)``-th entry is the ``k``-th raw moment of the (absolute) cluster size distribution, with ``k`` ranging from ``0`` to ``4``, at occupation number ``m``. See Also -------- sample_states
7,853
def get_transcript(self, gene_pk, refseq_id): "Get a transcript from the cache or add a new record." if not refseq_id: return transcript_pk = self.transcripts.get(refseq_id) if transcript_pk: return transcript_pk gene = Gene(pk=gene_pk) transcript = Transcript(refseq_id=refseq_id, gene=gene) try: transcript.save() except IntegrityError: transcript = Transcript.objects.get(refseq_id=refseq_id, gene=gene) self.transcripts[refseq_id] = transcript.pk return transcript.pk
Get a transcript from the cache or add a new record.
7,854
def get_partstudio_tessellatededges(self, did, wid, eid): return self._api.request(, + did + + wid + + eid + )
Gets the tessellation of the edges of all parts in a part studio. Args: - did (str): Document ID - wid (str): Workspace ID - eid (str): Element ID Returns: - requests.Response: Onshape response data
7,855
def defer(self, func, *args, **kwargs): if thread.get_ident() == self.broker_ident: _vv and IOLOG.debug(, self) return func(*args, **kwargs) if self._broker._exitted: raise Error(self.broker_shutdown_msg) _vv and IOLOG.debug(, self, self.transmit_side.fd) self._lock.acquire() try: if not self._deferred: self._wake() self._deferred.append((func, args, kwargs)) finally: self._lock.release()
Arrange for `func()` to execute on the broker thread. This function returns immediately without waiting the result of `func()`. Use :meth:`defer_sync` to block until a result is available. :raises mitogen.core.Error: :meth:`defer` was called after :class:`Broker` has begun shutdown.
7,856
def from_data(room, conn, data): files = list() rooms = dict() msg = str() for part in data["message"]: ptype = part["type"] if ptype == "text": val = part["value"] msg += val elif ptype == "break": msg += "\n" elif ptype == "file": fileid = part["id"] fileobj = room.filedict.get(fileid) if fileobj: files += (fileobj,) fileid = f"@{fileid}" msg += fileid elif ptype == "room": roomid = part["id"] rooms[roomid] = part["name"] roomid = f" msg += roomid elif ptype == "url": msg += part["text"] elif ptype == "raw": msg += html_to_text(part["value"]) else: import warnings warnings.warn(f"unknown message type ", Warning) nick = data.get("nick") or data.get("user") options = data.get("options", dict()) data = data.get("data", dict()) message = ChatMessage( room, conn, nick, msg, roles=Roles.from_options(options), options=options, data=data, files=files, rooms=rooms, ) return message
Construct a ChatMessage instance from raw protocol data
7,857
def create_record_sets(self, record_set_dicts): record_set_objects = [] for record_set_dict in record_set_dicts: if record_set_dict.pop(, True): record_set_objects.append( self.create_record_set(record_set_dict) ) return record_set_objects
Accept list of record_set dicts. Return list of record_set objects.
7,858
def create_route(self, item, routes): for route in routes: self._routes.setdefault(route, set()).add(item) return item
Stores a new item in routing map
7,859
def parse_conditional_derived_variable(self, node): if in node.lattrib: name = node.lattrib[] elif in node.lattrib: name = node.lattrib[] else: self.raise_error() if in node.lattrib: exposure = node.lattrib[] else: exposure = None if in node.lattrib: dimension = node.lattrib[] else: dimension = None conditional_derived_variable = ConditionalDerivedVariable(name, dimension, exposure) self.current_regime.add_conditional_derived_variable(conditional_derived_variable) self.current_conditional_derived_variable = conditional_derived_variable self.process_nested_tags(node)
Parses <ConditionalDerivedVariable> @param node: Node containing the <ConditionalDerivedVariable> element @type node: xml.etree.Element @raise ParseError: Raised when no name or value is specified for the conditional derived variable.
7,860
def _groupby_consecutive(txn, max_delta=pd.Timedelta()): def vwap(transaction): if transaction.amount.sum() == 0: warnings.warn() return np.nan return (transaction.amount * transaction.price).sum() / \ transaction.amount.sum() out = [] for sym, t in txn.groupby(): t = t.sort_index() t.index.name = t = t.reset_index() t[] = t.amount > 0 t[] = (t.order_sign.shift( 1) != t.order_sign).astype(int).cumsum() t[] = ((t.dt.sub(t.dt.shift(1))) > max_delta).astype(int).cumsum() grouped_price = (t.groupby((, )) .apply(vwap)) grouped_price.name = grouped_rest = t.groupby((, )).agg({ : , : , : }) grouped = grouped_rest.join(grouped_price) out.append(grouped) out = pd.concat(out) out = out.set_index() return out
Merge transactions of the same direction separated by less than max_delta time duration. Parameters ---------- transactions : pd.DataFrame Prices and amounts of executed round_trips. One row per trade. - See full explanation in tears.create_full_tear_sheet max_delta : pandas.Timedelta (optional) Merge transactions in the same direction separated by less than max_delta time duration. Returns ------- transactions : pd.DataFrame
7,861
async def plonks(self, ctx): plonks = self.config.get(, {}) guild = ctx.message.server db = plonks.get(guild.id, []) members = .join(map(str, filter(None, map(guild.get_member, db)))) if members: await self.bot.responses.basic(title="Plonked Users:", message=members) else: await self.bot.responses.failure(message=)
Shows members banned from the bot.
7,862
def get_schema(self, dataset_id, table_id): tables_resource = self.service.tables() \ .get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \ .execute(num_retries=self.num_retries) return tables_resource[]
Get the schema for a given datset.table. see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource :param dataset_id: the dataset ID of the requested table :param table_id: the table ID of the requested table :return: a table schema
7,863
def remove(self, interval): done = [] return self.remove_interval_helper(interval, done, should_raise_error=True)
Returns self after removing the interval and balancing. If interval is not present, raise ValueError.
7,864
def getFixedStars(self): IDs = const.LIST_FIXED_STARS return ephem.getFixedStarList(IDs, self.date)
Returns a list with all fixed stars.
7,865
def _set_target_root_count_in_runtracker(self): target_count = len(self._target_roots) self.run_tracker.pantsd_stats.set_target_root_size(target_count) return target_count
Sets the target root count in the run tracker's daemon stats object.
7,866
def addSuccess(self, test: unittest.case.TestCase) -> None: self.add_result(TestState.success, test)
Transforms the test in a serializable version of it and sends it to a queue for further analysis :param test: the test to save
7,867
def register_work(self, work, deps=None, manager=None, workdir=None): if getattr(self, "workdir", None) is not None: work_workdir = None if workdir is None: work_workdir = os.path.join(self.workdir, "w" + str(len(self))) else: work_workdir = os.path.join(self.workdir, os.path.basename(workdir)) work.set_workdir(work_workdir) if manager is not None: work.set_manager(manager) self.works.append(work) if deps: deps = [Dependency(node, exts) for node, exts in deps.items()] work.add_deps(deps) return work
Register a new :class:`Work` and add it to the internal list, taking into account possible dependencies. Args: work: :class:`Work` object. deps: List of :class:`Dependency` objects specifying the dependency of this node. An empy list of deps implies that this node has no dependencies. manager: The :class:`TaskManager` responsible for the submission of the task. If manager is None, we use the `TaskManager` specified during the creation of the work. workdir: The name of the directory used for the :class:`Work`. Returns: The registered :class:`Work`.
7,868
def restore_app_connection(self, port=None): self.host_port = port or utils.get_available_host_port() self._adb.forward( [ % self.host_port, % self.device_port]) try: self.connect() except: self.log.exception() raise jsonrpc_client_base.AppRestoreConnectionError( self._ad, ( ) % (self.package, self.host_port, self.device_port)) self._proc = None self._restore_event_client()
Restores the app after device got reconnected. Instead of creating new instance of the client: - Uses the given port (or find a new available host_port if none is given). - Tries to connect to remote server with selected port. Args: port: If given, this is the host port from which to connect to remote device port. If not provided, find a new available port as host port. Raises: AppRestoreConnectionError: When the app was not able to be started.
7,869
def export_account_state(self, account_state): return { : account_state[], : account_state[], : .format(account_state[]), : .format(account_state[]), : account_state[], : account_state[], : account_state[], : account_state[], }
Make an account state presentable to external consumers
7,870
def grantSystemPermission(self, login, user, perm): self.send_grantSystemPermission(login, user, perm) self.recv_grantSystemPermission()
Parameters: - login - user - perm
7,871
def survey_loader(sur_dir=SUR_DIR, sur_file=SUR_FILE): survey_path = os.path.join(sur_dir, sur_file) survey = None with open(survey_path) as survey_file: survey = Survey(survey_file.read()) return survey
Loads up the given survey in the given dir.
7,872
def list_settings(self): return [ (self.SETTING_FLAG_PLAIN, False), (self.SETTING_FLAG_ASCII, False), (self.SETTING_WIDTH, 0), (self.SETTING_ALIGN, ), (self.SETTING_TEXT_FORMATING, {}), (self.SETTING_DATA_FORMATING, ), (self.SETTING_DATA_TYPE, None), (self.SETTING_PADDING, None), (self.SETTING_PADDING_CHAR, ), (self.SETTING_PADDING_LEFT, None), (self.SETTING_PADDING_RIGHT, None), (self.SETTING_MARGIN, None), (self.SETTING_MARGIN_CHAR, ), (self.SETTING_MARGIN_LEFT, None), (self.SETTING_MARGIN_RIGHT, None), ]
Get list of all appropriate settings and their default values. The returned list is then used in setup() and get_setup() methods to setup the widget internal settings.
7,873
def stdout(self): stdout = self._args[] if stdout: return True stdout_env = os.getenv(, None) if stdout_env is not None: return True return False
Флаг --stdout может быть взят из переменной окружения CROSSPM_STDOUT. Если есть любое значение в CROSSPM_STDOUT - оно понимается как True :return:
7,874
def _actionsFreqsAngles(self,*args,**kwargs): delta= kwargs.pop(,self._delta) order= kwargs.get(,self._order) if ((self._c and not ( in kwargs and not kwargs[]))\ or (ext_loaded and (( in kwargs and kwargs[])))) \ and _check_c(self._pot): if len(args) == 5: raise IOError("Must specify phi") elif len(args) == 6: R,vR,vT, z, vz, phi= args else: self._parse_eval_args(*args) R= self._eval_R vR= self._eval_vR vT= self._eval_vT z= self._eval_z vz= self._eval_vz phi= self._eval_phi if isinstance(R,float): R= nu.array([R]) vR= nu.array([vR]) vT= nu.array([vT]) z= nu.array([z]) vz= nu.array([vz]) phi= nu.array([phi]) Lz= R*vT if self._useu0: if in kwargs: u0= nu.asarray(kwargs[]) else: E= nu.array([_evaluatePotentials(self._pot,R[ii],z[ii]) +vR[ii]**2./2.+vz[ii]**2./2.+vT[ii]**2./2. for ii in range(len(R))]) u0= actionAngleStaeckel_c.actionAngleStaeckel_calcu0(\ E,Lz,self._pot,delta)[0] kwargs.pop(,None) else: u0= None jr, jz, Omegar, Omegaphi, Omegaz, angler, anglephi,anglez, err= actionAngleStaeckel_c.actionAngleFreqAngleStaeckel_c(\ self._pot,delta,R,vR,vT,z,vz,phi,u0=u0,order=order) indx= nu.isnan(Omegar)*(jr < 10.**-3.)+nu.isnan(Omegaz)*(jz < 10.**-3.) if nu.sum(indx) > 0: Omegar[indx]= [epifreq(self._pot,r,use_physical=False) for r in R[indx]] Omegaphi[indx]= [omegac(self._pot,r,use_physical=False) for r in R[indx]] Omegaz[indx]= [verticalfreq(self._pot,r,use_physical=False) for r in R[indx]] if err == 0: return (jr,Lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez) else: raise RuntimeError("C-code for calculation actions failed; try with c=False") else: if in kwargs and kwargs[] and not self._c: warnings.warn("C module not used because potential does not have a C implementation",galpyWarning) raise NotImplementedError("actionsFreqs with c=False not implemented")
NAME: actionsFreqsAngles (_actionsFreqsAngles) PURPOSE: evaluate the actions, frequencies, and angles (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez) INPUT: Either: a) R,vR,vT,z,vz[,phi]: 1) floats: phase-space value for single object (phi is optional) (each can be a Quantity) 2) numpy.ndarray: [N] phase-space values for N objects (each can be a Quantity) b) Orbit instance: initial condition used if that's it, orbit(t) if there is a time given as well as the second argument delta= (object-wide default) can be used to override the object-wide focal length; can also be an array with length N to allow different delta for different phase-space points u0= (None) if object-wide option useu0 is set, u0 to use (if useu0 and useu0 is None, a good value will be computed) c= (object-wide default, bool) True/False to override the object-wide setting for whether or not to use the C implementation order= (10) number of points to use in the Gauss-Legendre numerical integration of the relevant action, frequency, and angle integrals When not using C: fixed_quad= (False) if True, use Gaussian quadrature (scipy.integrate.fixed_quad instead of scipy.integrate.quad) scipy.integrate.fixed_quad or .quad keywords OUTPUT: (jr,lz,jz,Omegar,Omegaphi,Omegaz,angler,anglephi,anglez) HISTORY: 2013-08-28 - Written - Bovy (IAS)
7,875
def mask_by_linear_ind(self, linear_inds): inds = self.linear_to_ij(linear_inds) return self.mask_by_ind(inds)
Create a new image by zeroing out data at locations not in the given indices. Parameters ---------- linear_inds : :obj:`numpy.ndarray` of int A list of linear coordinates. Returns ------- :obj:`Image` A new Image of the same type, with data not indexed by inds set to zero.
7,876
def add_comes_from(self, basic_block): if basic_block is None: return if self.lock: return if basic_block in self.comes_from: return self.lock = True self.comes_from.add(basic_block) basic_block.add_goes_to(self) self.lock = False
This simulates a set. Adds the basic_block to the comes_from list if not done already.
7,877
def libvlc_video_set_key_input(p_mi, on): f = _Cfunctions.get(, None) or \ _Cfunction(, ((1,), (1,),), None, None, MediaPlayer, ctypes.c_uint) return f(p_mi, on)
Enable or disable key press events handling, according to the LibVLC hotkeys configuration. By default and for historical reasons, keyboard events are handled by the LibVLC video widget. @note: On X11, there can be only one subscriber for key press and mouse click events per window. If your application has subscribed to those events for the X window ID of the video widget, then LibVLC will not be able to handle key presses and mouse clicks in any case. @warning: This function is only implemented for X11 and Win32 at the moment. @param p_mi: the media player. @param on: true to handle key press events, false to ignore them.
7,878
def extract(self, doc): if isinstance(self.jsonpaths, JSONPath): input_field = self.extractor.get_renamed_input_fields() if isinstance(self.extractor.get_renamed_input_fields(), list): input_field = input_field[0] jsonpath = self.jsonpaths renamed_inputs = dict() if self.flat_map_inputs: flat_mapped = itertools.chain.from_iterable( [iter(match.value) if hasattr(match.value, ) and not isinstance(match.value, dict) and not isinstance(match.value, basestring) else iter([match.value]) for match in jsonpath.find(doc)]) renamed_inputs[input_field] = flat_mapped if input_field in renamed_inputs: self.extract_from_renamed_inputs(doc, renamed_inputs) else: for value in [match.value for match in jsonpath.find(doc)]: renamed_inputs[input_field] = value self.extract_from_renamed_inputs(doc, renamed_inputs) elif isinstance(self.jsonpaths, types.ListType): renamed_inputs_lists = dict() for jsonpath, renamed_input in \ itertools.izip( iter(self.jsonpaths), iter(self.extractor.get_renamed_input_fields())): renamed_inputs_lists[renamed_input] = [ match.value for match in jsonpath.find(doc)] if self.flat_map_inputs: renamed_inputs_tuple_lists = [ (x, itertools.chain.from_iterable( [iter(z) if hasattr(z, ) and not isinstance(z, dict) and not isinstance(z, basestring) else iter([z])for z in y])) for x, y in renamed_inputs_lists.iteritems()] renamed_inputs = reduce( ExtractorProcessor.add_tuple_to_doc, renamed_inputs_tuple_lists, dict()) self.extract_from_renamed_inputs(doc, renamed_inputs) else: renamed_inputs_lists_lists = [[(x, z) for z in y] for x, y in renamed_inputs_lists.iteritems()] for i in itertools.product(*renamed_inputs_lists_lists): renamed_inputs = reduce( ExtractorProcessor.add_tuple_to_doc, i, dict()) self.extract_from_renamed_inputs(doc, renamed_inputs) else: raise ValueError("input_fields must be a string or a list") return doc
From the defined JSONPath(s), pull out the values and insert them into a document with renamed field(s) then apply the Extractor and return the doc with the extracted values
7,879
def finished(experiment_name, reset=True): if _exclude_visitor(): return redis = _get_redis_connection() try: experiment = Experiment.find(redis, experiment_name) if not experiment: return alternative_name = _get_session().get(experiment.key) if alternative_name: split_finished = set(session.get(, [])) if experiment.key not in split_finished: alternative = Alternative( redis, alternative_name, experiment_name) alternative.increment_completion() if reset: _get_session().pop(experiment.key, None) try: split_finished.remove(experiment.key) except KeyError: pass else: split_finished.add(experiment.key) session[] = list(split_finished) except ConnectionError: if not current_app.config[]: raise
Track a conversion. :param experiment_name: Name of the experiment. :param reset: If set to `True` current user's session is reset so that they may start the test again in the future. If set to `False` the user will always see the alternative they started with. Defaults to `True`.
7,880
def set_copy_mode(self, use_copy: bool): for group in self.rootItem.children: for proto in group.children: proto.copy_data = use_copy
Set all protocols in copy mode. They will return a copy of their protocol. This is used for writable mode in CFC. :param use_copy: :return:
7,881
def stop(self): if self._stop_event.ready(): return self._stop_event.set() self._global_send_event.set() for retrier in self._address_to_retrier.values(): if retrier: retrier.notify() self._client.set_presence_state(UserPresence.OFFLINE.value) self._client.stop_listener_thread() del self.log
Try to gracefully stop the greenlet synchronously Stop isn't expected to re-raise greenlet _run exception (use self.greenlet.get() for that), but it should raise any stop-time exception
7,882
def parse_args(args=None): parser = argparse.ArgumentParser(description=ds.ARGPARSER[]) parser.add_argument(, help=ds.ARGPARSE_INPUT[]) parser.add_argument(, nargs=, help=ds.ARGPARSE_OUTPUT[], default=ds.ARGPARSE_OUTPUT[]) parser.add_argument(, , help=ds.ARGPARSE_OVERWRITE[], action=) parser.add_argument(, , nargs=, default=ds.ARGPARSE_EXTENSION[], help=ds.ARGPARSE_EXTENSION[]) parser.add_argument(, , help=ds.ARGPARSE_WRAPPER[], default=ds.ARGPARSE_WRAPPER[], ) parser.add_argument(, , help=ds.ARGPARSE_VERBOSE[], action=) parser.add_argument(, , help=ds.ARGPARSE_RECURSIVE[], action=, dest=) parser.add_argument(, action=, version=ah.__version__) if args is not None: return parser.parse_args(args) else: return parser.parse_args()
Parse arguments provided as a list of strings, and return a namespace with parameter names matching the arguments :param args: List of strings to be parsed as command-line arguments. If none, reads in sys.argv as the values. :return: a namespace containing arguments values
7,883
def emit(self, record): try: if sys.version_info >= (2, 7): record.__setattr__("name", record.name.replace("rafcon.", "")) msg = self.format(record) fs = "%s" try: ufs = u try: entry = ufs % msg except UnicodeEncodeError: entry = fs % msg except UnicodeError: entry = fs % msg for logging_view in self._logging_views.values(): logging_view.print_message(entry, record.levelno) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
Logs a new record If a logging view is given, it is used to log the new record to. The code is partially copied from the StreamHandler class. :param record: :return:
7,884
def setaty(self, content): name = ns = (None, ) aty = content.node.get(name, ns) if aty is not None: content.aty = aty parts = aty.split() ref = parts[0] if len(parts) == 2: self.applyaty(content, ref) else: pass return self
Grab the (aty) soap-enc:arrayType and attach it to the content for proper array processing later in end(). @param content: The current content being unmarshalled. @type content: L{Content} @return: self @rtype: L{Encoded}
7,885
def Main(url, similarity_mode="TfIdfCosine", similarity_limit=0.75): web_scrape = WebScraping() web_scrape.readable_web_pdf = WebPDFReading() document = web_scrape.scrape(url) if similarity_mode == "TfIdfCosine": similarity_filter = TfIdfCosine() elif similarity_mode == "Dice": similarity_filter = Dice() elif similarity_mode == "Jaccard": similarity_filter = Jaccard() elif similarity_mode == "Simpson": similarity_filter = Simpson() else: raise ValueError() nlp_base = NlpBase() nlp_base.tokenizable_doc = MeCabTokenizer() similarity_filter.nlp_base = nlp_base similarity_filter.similarity_limit = similarity_limit auto_abstractor = AutoAbstractor() auto_abstractor.tokenizable_doc = MeCabTokenizer() abstractable_doc = TopNRankAbstractor() result_dict = auto_abstractor.summarize(document, abstractable_doc, similarity_filter) [print(result_dict["summarize_result"][i]) for i in range(len(result_dict["summarize_result"])) if i < 3]
Entry Point. Args: url: PDF url.
7,886
def predraw(self): self.cam = self.view.cam super(LayerWorld,self).predraw()
Sets up the attributes used by :py:class:`Layer3D()` and calls :py:meth:`Layer3D.predraw()`\ .
7,887
def ratelimit_remaining(self): json = self._json(self._get(self._github_url + ), 200) core = json.get(, {}).get(, {}) self._remaining = core.get(, 0) return self._remaining
Number of requests before GitHub imposes a ratelimit. :returns: int
7,888
def hlen(key, host=None, port=None, db=None, password=None): * server = _connect(host, port, db, password) return server.hlen(key)
Returns number of fields of a hash. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' redis.hlen foo_hash
7,889
def addLineWidget( self, query = None ): widget = XQueryLineWidget(self) widget.setTerms(sorted(self._rules.keys())) widget.setQuery(query) index = self._container.layout().count() - 1 self._container.layout().insertWidget(index, widget) widget.addRequested.connect( self.addLineWidget ) widget.removeRequested.connect( self.removeLineWidget ) self.updateRemoveEnabled()
Adds a new line widget to the system with the given values. :param query | (<str> term, <str> operator, <str> vlaue) || None
7,890
def _merge_defaults(self, config): fn = resource_filename(, join(, )) with open(fn) as f: default = parse(f) return reduce(dict_merge, [default, config])
The config object loads its values from two sources, with the following precedence: 1. data/default_config.yaml 2. The config file itself, passed in to this object in the constructor as `path`. in case of conflict, the config file dominates.
7,891
def create_vlan(self, id_vlan): vlan_map = dict() vlan_map[] = id_vlan code, xml = self.submit({: vlan_map}, , ) return self.response(code, xml)
Set column 'ativada = 1'. :param id_vlan: VLAN identifier. :return: None
7,892
def fetch_chunk_data(self): data = [] counter = (relativedelta(self.end_date, self.start_date).months / 6) + 1 months = 0 for month in range(counter): chunk_start_date = self.start_date + relativedelta(months=months) chunk_end_date = self.start_date + relativedelta(months=months + 6) months += 6 if chunk_end_date > self.end_date: chunk_end_date = self.end_date data = data + self.request.send(self.symbol, chunk_start_date, chunk_end_date) return data
If period of time between start end end is bigger then one year We have to create and fetch chunks dates (6 months chunks).
7,893
def _setup_serializers(self): acceptable_offers = self.request.accept.acceptable_offers(self.response.supported_mime_types) if len(acceptable_offers) > 0: best_accept_match = acceptable_offers[0][0] else: best_accept_match = self.response.default_serializer.content_type() self.logger.info("%s determined as best match for accept header: %s" % ( best_accept_match, self.request.accept )) self.response.content_type = best_accept_match
Auto set the return serializer based on Accept headers http://docs.webob.org/en/latest/reference.html#header-getters Intersection of requested types and supported types tells us if we can in fact respond in one of the request formats
7,894
def list_dir(self): bucket = self.blob.bucket prefix = self.blob.name if not prefix.endswith(): prefix += for blob in bucket.list_blobs(prefix=prefix, delimiter=): yield .format(blob.bucket.name, blob.name)
Non-recursive file listing. :returns: A generator over files in this "directory" for efficiency.
7,895
def pack(header, s): header = IRHeader(*header) if isinstance(header.label, numbers.Number): header = header._replace(flag=0) else: label = np.asarray(header.label, dtype=np.float32) header = header._replace(flag=label.size, label=0) s = label.tostring() + s s = struct.pack(_IR_FORMAT, *header) + s return s
Pack a string into MXImageRecord. Parameters ---------- header : IRHeader Header of the image record. ``header.label`` can be a number or an array. See more detail in ``IRHeader``. s : str Raw image string to be packed. Returns ------- s : str The packed string. Examples -------- >>> label = 4 # label can also be a 1-D array, for example: label = [1,2,3] >>> id = 2574 >>> header = mx.recordio.IRHeader(0, label, id, 0) >>> with open(path, 'r') as file: ... s = file.read() >>> packed_s = mx.recordio.pack(header, s)
7,896
def clear_learning_objectives(self): if (self.get_learning_objectives_metadata().is_read_only() or self.get_learning_objectives_metadata().is_required()): raise errors.NoAccess() self._my_map[] = self._learning_objectives_default
Clears the learning objectives. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.*
7,897
def edges_unique(self): unique, inverse = grouping.unique_rows(self.edges_sorted) edges_unique = self.edges_sorted[unique] self._cache[] = unique self._cache[] = inverse return edges_unique
The unique edges of the mesh. Returns ---------- edges_unique : (n, 2) int Vertex indices for unique edges
7,898
def get_changes(self, extracted_name, similar=False, global_=False): info = _ExtractInfo( self.project, self.resource, self.start_offset, self.end_offset, extracted_name, variable=self.kind == , similar=similar, make_global=global_) new_contents = _ExtractPerformer(info).extract() changes = ChangeSet( % (self.kind, extracted_name)) changes.add_change(ChangeContents(self.resource, new_contents)) return changes
Get the changes this refactoring makes :parameters: - `similar`: if `True`, similar expressions/statements are also replaced. - `global_`: if `True`, the extracted method/variable will be global.
7,899
def schedule(self, task: Schedulable, *args, **kwargs): at = datetime.now(timezone.utc) self.schedule_at(task, at, *args, **kwargs)
Add a job to be executed ASAP to the batch. :arg task: the task or its name to execute in the background :arg args: args to be passed to the task function :arg kwargs: kwargs to be passed to the task function