Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
26,600
def get_umi_consensus(data): consensus_choices = (["fastq_name"]) umi = tz.get_in(["config", "algorithm", "umi_type"], data) if tz.get_in(["analysis"], data, "").lower() == "scrna-seq": return False if umi and (umi in consensus_choices or os.path.exists(umi)): assert tz.get_in(["config", "algorithm", "mark_duplicates"], data, True), \ "Using consensus UMI inputs requires marking duplicates" return umi
Retrieve UMI for consensus based preparation. We specify this either as a separate fastq file or embedded in the read name as `fastq_name`.`
26,601
def node_is_on_list(self, node): next = self.node_next(node) if next == node or next is None: assert(self.node_prev(node) is next) return False return True
Returns True if this node is on *some* list. A node is not on any list if it is linked to itself, or if it does not have the next and/prev attributes at all.
26,602
def precip(self, start, end, **kwargs): r self._check_geo_param(kwargs) kwargs[] = start kwargs[] = end kwargs[] = self.token return self._get_response(, kwargs)
r""" Returns precipitation observations at a user specified location for a specified time. Users must specify at least one geographic search parameter ('stid', 'state', 'country', 'county', 'radius', 'bbox', 'cwa', 'nwsfirezone', 'gacc', or 'subgacc') to obtain observation data. Other parameters may also be included. See below mandatory and optional parameters. Also see the metadata() function for station IDs. Arguments: ---------- start: string, mandatory Start date in form of YYYYMMDDhhmm. MUST BE USED WITH THE END PARAMETER. Default time is UTC e.g., start='201306011800' end: string, mandatory End date in form of YYYYMMDDhhmm. MUST BE USED WITH THE START PARAMETER. Default time is UTC e.g., end='201306011800' obtimezone: string, optional Set to either UTC or local. Sets timezone of obs. Default is UTC. e.g. obtimezone='local' showemptystations: string, optional Set to '1' to show stations even if no obs exist that match the time period. Stations without obs are omitted by default. stid: string, optional Single or comma separated list of MesoWest station IDs. e.g. stid='kden,kslc,wbb' county: string, optional County/parish/borough (US/Canada only), full name e.g. county='Larimer' state: string, optional US state, 2-letter ID e.g. state='CO' country: string, optional Single or comma separated list of abbreviated 2 or 3 character countries e.g. country='us,ca,mx' radius: list, optional Distance from a lat/lon pt or stid as [lat,lon,radius (mi)] or [stid, radius (mi)]. e.g. radius="-120,40,20" bbox: list, optional Stations within a [lon/lat] box in the order [lonmin,latmin,lonmax,latmax] e.g. bbox="-120,40,-119,41" cwa: string, optional NWS county warning area. See http://www.nws.noaa.gov/organization.php for CWA list. e.g. cwa='LOX' nwsfirezone: string, optional NWS fire zones. See http://www.nws.noaa.gov/geodata/catalog/wsom/html/firezone.htm for a shapefile containing the full list of zones. e.g. nwsfirezone='LOX241' gacc: string, optional Name of Geographic Area Coordination Center e.g. gacc='EBCC' See http://gacc.nifc.gov/ for a list of GACCs. subgacc: string, optional Name of Sub GACC e.g. subgacc='EB07' vars: string, optional Single or comma separated list of sensor variables. Will return all stations that match one of provided variables. Useful for filtering all stations that sense only certain vars. Do not request vars twice in the query. e.g. vars='wind_speed,pressure' Use the variables function to see a list of sensor vars. status: string, optional A value of either active or inactive returns stations currently set as active or inactive in the archive. Omitting this param returns all stations. e.g. status='active' units: string, optional String or set of strings and pipes separated by commas. Default is metric units. Set units='ENGLISH' for FREEDOM UNITS ;) Valid other combinations are as follows: temp|C, temp|F, temp|K; speed|mps, speed|mph, speed|kph, speed|kts; pres|pa, pres|mb; height|m, height|ft; precip|mm, precip|cm, precip|in; alti|pa, alti|inhg. e.g. units='temp|F,speed|kph,metric' groupby: string, optional Results can be grouped by key words: state, county, country, cwa, nwszone, mwsfirezone, gacc, subgacc e.g. groupby='state' timeformat: string, optional A python format string for returning customized date-time groups for observation times. Can include characters. e.g. timeformat='%m/%d/%Y at %H:%M' Returns: -------- Dictionary of precipitation observations. Raises: ------- None.
26,603
def console_init_root( w: int, h: int, title: Optional[str] = None, fullscreen: bool = False, renderer: Optional[int] = None, order: str = "C", ) -> tcod.console.Console: if title is None: title = os.path.basename(sys.argv[0]) if renderer is None: warnings.warn( "A renderer should be given, see the online documentation.", DeprecationWarning, stacklevel=2, ) renderer = tcod.constants.RENDERER_SDL elif renderer in ( tcod.constants.RENDERER_SDL, tcod.constants.RENDERER_OPENGL, tcod.constants.RENDERER_GLSL, ): warnings.warn( "The SDL, OPENGL, and GLSL renderers are deprecated.", DeprecationWarning, stacklevel=2, ) lib.TCOD_console_init_root(w, h, _bytes(title), fullscreen, renderer) console = tcod.console.Console._get_root(order) console.clear() return console
Set up the primary display and return the root console. `w` and `h` are the columns and rows of the new window (in tiles.) `title` is an optional string to display on the windows title bar. `fullscreen` determines if the window will start in fullscreen. Fullscreen mode is unreliable unless the renderer is set to `tcod.RENDERER_SDL2` or `tcod.RENDERER_OPENGL2`. `renderer` is the rendering back-end that libtcod will use. If you don't know which to pick, then use `tcod.RENDERER_SDL2`. Options are: * `tcod.RENDERER_SDL`: A deprecated software/SDL2 renderer. * `tcod.RENDERER_OPENGL`: A deprecated SDL2/OpenGL1 renderer. * `tcod.RENDERER_GLSL`: A deprecated SDL2/OpenGL2 renderer. * `tcod.RENDERER_SDL2`: The recommended SDL2 renderer. Rendering is decided by SDL2 and can be changed by using an SDL2 hint. * `tcod.RENDERER_OPENGL2`: An SDL2/OPENGL2 renderer. Usually faster than regular SDL2. Requires OpenGL 2.0 Core. `order` will affect how the array attributes of the returned root console are indexed. `order='C'` is the default, but `order='F'` is recommended. .. versionchanged:: 4.3 Added `order` parameter. `title` parameter is now optional. .. versionchanged:: 8.0 The default `renderer` is now automatic instead of always being `RENDERER_SDL`.
26,604
def DeleteCronJob(self, cronjob_id): if cronjob_id not in self.cronjobs: raise db.UnknownCronJobError("Cron job %s not known." % cronjob_id) del self.cronjobs[cronjob_id] try: del self.cronjob_leases[cronjob_id] except KeyError: pass for job_run in self.ReadCronJobRuns(cronjob_id): del self.cronjob_runs[(cronjob_id, job_run.run_id)]
Deletes a cronjob along with all its runs.
26,605
def _sectors(self, ignore_chunk=None): sectorsize = self._bytes_to_sector(self.size) sectors = [[] for s in range(sectorsize)] sectors[0] = True sectors[1] = True for m in self.metadata.values(): if not m.is_created(): continue if ignore_chunk == m: continue if m.blocklength and m.blockstart: blockend = m.blockstart + max(m.blocklength, m.requiredblocks()) for b in range(max(m.blockstart, 2), min(blockend, sectorsize)): sectors[b].append(m) return sectors
Return a list of all sectors, each sector is a list of chunks occupying the block.
26,606
def related_records2marc(self, key, value): if value.get(): return { : value.get(), : get_recid_from_ref(value.get()), } elif value.get() == : self.setdefault(, []).append({ : , : get_recid_from_ref(value.get()), }) elif value.get() == : self.setdefault(, []).append({ : , : get_recid_from_ref(value.get()), }) else: raise NotImplementedError(u"Unhandled relation in related_records: {}".format(value.get()))
Populate the ``78708`` MARC field Also populates the ``78002``, ``78502`` MARC fields through side effects.
26,607
def print_msg(contentlist): if isinstance(contentlist, list) or isinstance(contentlist, tuple): return .join(contentlist) else: if len(contentlist) > 1 and contentlist[-1] != : contentlist += return contentlist
concatenate message list as single string with line feed.
26,608
def count(self, axis=): i = self._get_axis_number(axis) values = self.values mask = np.isfinite(values) result = mask.sum(axis=i, dtype=) return self._wrap_result(result, axis)
Return number of observations over requested axis. Parameters ---------- axis : {'items', 'major', 'minor'} or {0, 1, 2} Returns ------- count : DataFrame
26,609
def _get_stream_id(self, text): m = self._image_re.search(text) if m: return m.group("stream_id")
Try to find a stream_id
26,610
async def input(dev: Device, input, output): inputs = await dev.get_inputs() if input: click.echo("Activating %s" % input) try: input = next((x for x in inputs if x.title == input)) except StopIteration: click.echo("Unable to find input %s" % input) return zone = None if output: zone = await dev.get_zone(output) if zone.uri not in input.outputs: click.echo("Input %s not valid for zone %s" % (input.title, output)) return await input.activate(zone) else: click.echo("Inputs:") for input in inputs: act = False if input.active: act = True click.echo(" * " + click.style(str(input), bold=act)) for out in input.outputs: click.echo(" - %s" % out)
Get and change outputs.
26,611
def groupby_with_null(data, *args, **kwargs): by = kwargs.get(, args[0]) altered_columns = {} if not isinstance(by, (list, tuple)): by = [by] for col in by: bool_idx = pd.isnull(data[col]) idx = bool_idx.index[bool_idx] if idx.size: altered_columns[col] = (idx, data[col].dtype) data.loc[idx, col] = for group, df in data.groupby(*args, **kwargs): for col, (orig_idx, orig_dtype) in altered_columns.items(): sub_idx = orig_idx.intersection(df[col].index) if sub_idx.size: df.loc[sub_idx, col] = None if df[col].dtype != orig_dtype: df[col] = df[col].astype(orig_dtype) yield group, df for col, (orig_idx, orig_dtype) in altered_columns.items(): data.loc[orig_idx, col] = None if data[col].dtype != orig_dtype: data[col] = data[col].astype(orig_dtype)
Groupby on columns with NaN/None/Null values Pandas currently does have proper support for groupby on columns with null values. The nulls are discarded and so not grouped on.
26,612
def from_xml(cls, xml_val): if xml_val not in cls._xml_to_member: raise InvalidXmlError( "attribute value not valid for this type" % xml_val ) return cls._xml_to_member[xml_val]
Return the enumeration member corresponding to the XML value *xml_val*.
26,613
def schema_to_command( p, name: str, callback: callable, add_message: bool ) -> click.Command: params = params_factory(p.schema["properties"], add_message=add_message) help = p.__doc__ cmd = click.Command(name=name, callback=callback, params=params, help=help) return cmd
Generates a ``notify`` :class:`click.Command` for :class:`~notifiers.core.Provider` :param p: Relevant Provider :param name: Command name :return: A ``notify`` :class:`click.Command`
26,614
def _rm_udf_link(self, rec): if not rec.is_file() and not rec.is_symlink(): raise pycdlibexception.PyCdlibInvalidInput() logical_block_size = self.pvd.logical_block_size() num_bytes_to_remove = 0 if rec.inode is not None: found_index = None for index, link in enumerate(rec.inode.linked_records): if id(link) == id(rec): found_index = index break else: raise pycdlibexception.PyCdlibInternalError() del rec.inode.linked_records[found_index] rec.inode.num_udf -= 1 if not rec.inode.linked_records: found_index = None for index, ino in enumerate(self.inodes): if id(ino) == id(rec.inode): found_index = index break else: raise pycdlibexception.PyCdlibInternalError() del self.inodes[found_index] num_bytes_to_remove += rec.get_data_length() if rec.inode.num_udf == 0: num_bytes_to_remove += logical_block_size else: num_bytes_to_remove += logical_block_size if rec.parent is None: raise pycdlibexception.PyCdlibInternalError() if rec.file_ident is None: raise pycdlibexception.PyCdlibInternalError() return num_bytes_to_remove + self._rm_udf_file_ident(rec.parent, rec.file_ident.fi)
An internal method to remove a UDF File Entry link. Parameters: rec - The UDF File Entry to remove. Returns: The number of bytes to remove from the ISO.
26,615
def analisar(retorno): resposta = analisar_retorno(forcar_unicode(retorno), funcao=, classe_resposta=RespostaExtrairLogs, campos=RespostaSAT.CAMPOS + ( (, unicode), ), campos_alternativos=[ RespostaSAT.CAMPOS, ] ) if resposta.EEEEE not in (,): raise ExcecaoRespostaSAT(resposta) return resposta
Constrói uma :class:`RespostaExtrairLogs` a partir do retorno informado. :param unicode retorno: Retorno da função ``ExtrairLogs``.
26,616
def _setup_simplejson(self, responder): responder.whitelist_prefix() compat_path = os.path.join(os.path.dirname(__file__), ) sys.path.append(compat_path) for fullname, is_pkg, suffix in ( (u, True, ), (u, False, ), (u, False, ), (u, False, ), ): path = os.path.join(compat_path, , suffix) fp = open(path, ) try: source = fp.read() finally: fp.close() responder.add_source_override( fullname=fullname, path=path, source=source, is_pkg=is_pkg, )
We support serving simplejson for Python 2.4 targets on Ansible 2.3, at least so the package's own CI Docker scripts can run without external help, however newer versions of simplejson no longer support Python 2.4. Therefore override any installed/loaded version with a 2.4-compatible version we ship in the compat/ directory.
26,617
def splitroot(self, path, sep=None): if sep is None: sep = self.filesystem.path_separator if self.filesystem.is_windows_fs: return self._splitroot_with_drive(path, sep) return self._splitroot_posix(path, sep)
Split path into drive, root and rest.
26,618
def stop_tensorboard(args): experiment_id = check_experiment_id(args) experiment_config = Experiments() experiment_dict = experiment_config.get_all_experiments() config_file_name = experiment_dict[experiment_id][] nni_config = Config(config_file_name) tensorboard_pid_list = nni_config.get_config() if tensorboard_pid_list: for tensorboard_pid in tensorboard_pid_list: try: cmds = [, , str(tensorboard_pid)] call(cmds) except Exception as exception: print_error(exception) nni_config.set_config(, []) print_normal() else: print_error()
stop tensorboard
26,619
def show_G_distribution(data): Xs, t = fitting.preprocess_data(data) Theta, Phi = np.meshgrid(np.linspace(0, np.pi, 50), np.linspace(0, 2 * np.pi, 50)) G = [] for i in range(len(Theta)): G.append([]) for j in range(len(Theta[i])): w = fitting.direction(Theta[i][j], Phi[i][j]) G[-1].append(fitting.G(w, Xs)) plt.imshow(G, extent=[0, np.pi, 0, 2 * np.pi], origin=) plt.show()
Show the distribution of the G function.
26,620
def smkdirs(dpath, mode=0o777): if not os.path.exists(dpath): os.makedirs(dpath, mode=mode)
Safely make a full directory path if it doesn't exist. Parameters ---------- dpath : str Path of directory/directories to create mode : int [default=0777] Permissions for the new directories See also -------- os.makedirs
26,621
async def renew_lease_async(self, lease): try: await self.host.loop.run_in_executor( self.executor, functools.partial( self.storage_client.renew_blob_lease, self.lease_container_name, lease.partition_id, lease_id=lease.token, timeout=self.lease_duration)) except Exception as err: if "LeaseIdMismatchWithLeaseOperation" in str(err): _logger.info("LeaseLost on partition %r", lease.partition_id) else: _logger.error("Failed to renew lease on partition %r with token %r %r", lease.partition_id, lease.token, err) return False return True
Renew a lease currently held by this host. If the lease has been stolen, or expired, or released, it is not possible to renew it. You will have to call getLease() and then acquireLease() again. :param lease: The stored lease to be renewed. :type lease: ~azure.eventprocessorhost.lease.Lease :return: `True` if the lease was renewed successfully, `False` if not. :rtype: bool
26,622
def cli(ctx, amount, index, stage, stepresult, formattype, select, where, order, outputfile, showkeys, showvalues, showalways, position): if not ctx.bubble: msg = ctx.say_yellow(msg) raise click.Abort() path = ctx.home + if stage not in STAGES: ctx.say_yellow( + stage) raise click.Abort() if stepresult not in exportables: ctx.say_yellow( + .join(exportables)) raise click.Abort() data_gen = bubble_lod_load(ctx, stepresult, stage) ctx.gbc.say(, stuff=data_gen, verbosity=20) part = get_gen_slice(ctx.gbc, data_gen, amount, index) ctx.gbc.say(, stuff=part, verbosity=20) aliases = get_pairs(ctx.gbc, select, missing_colon=True) if position or len(aliases) == 0: ctx.gbc.say(, stuff=aliases, verbosity=20) aliases.insert(0, {: buts(), : }) ctx.gbc.say(, stuff=aliases, verbosity=20) wheres = get_pairs(ctx.gbc, where) data = tablib.Dataset() data.headers = [sel[] for sel in aliases] ctx.gbc.say( + str(wheres), verbosity=20) ctx.gbc.say( + str(aliases), verbosity=20) ctx.gbc.say( + str(data.headers), verbosity=20) not_shown = True try: for ditem in part: row = [] ctx.gbc.say(, stuff=ditem, verbosity=101) flitem = flat(ctx, ditem) ctx.gbc.say(, stuff=flitem, verbosity=101) row_ok = True for wp in wheres: wcheck_key=True if wp[] not in flitem: row_ok = False wcheck_key=False if wcheck_key and wp[] not in str(flitem[wp[]]): row_ok = False if not row_ok: continue for sel in aliases: if sel[] in flitem: row.append(flitem[sel[]]) else: bnp = tempv = get_flat_path(ctx, flitem, sel[] + , bnp) if tempv != bnp: row.append(tempv) else: row.append() data.append(row) if not_shown and showkeys: if not showalways: not_shown = False ks = list(flitem.keys()) ks.sort() ctx.say( , verbosity=0) for k in ks: ctx.say( + k, verbosity=0) if showvalues: ctx.say( + str(flitem[k]) + , verbosity=0) except Exception as excpt: ctx.say_red(, stuff=excpt) raise click.Abort() if not outputfile: outputfile = path + + \ stepresult + + stage + + formattype if order: olast2 = order[-2:] ctx.gbc.say( + order + + olast2, verbosity=100) if olast2 not in [, ]: data = data.sort(order, False) else: if olast2 == : data = data.sort(order[:-2], False) if olast2 == : data = data.sort(order[:-2], True) formatted = None if formattype == : formatted = data.yaml if formattype == : formatted = data.json if formattype == : formatted = data.csv if formattype == : print(data) if formatted: enc_formatted = formatted.encode() of_path = opath.Path(outputfile) of_dir = of_path.dirname() if not of_dir.exists(): of_dir.makedirs_p() with open(outputfile, ) as f: f.write(enc_formatted) ctx.say_green( + outputfile)
Export from memory to format supported by tablib
26,623
def setColor(self, personID, color): self._connection._beginMessage( tc.CMD_SET_PERSON_VARIABLE, tc.VAR_COLOR, personID, 1 + 1 + 1 + 1 + 1) self._connection._string += struct.pack("!BBBBB", tc.TYPE_COLOR, int( color[0]), int(color[1]), int(color[2]), int(color[3])) self._connection._sendExact()
setColor(string, (integer, integer, integer, integer)) sets color for person with the given ID. i.e. (255,0,0,0) for the color red. The fourth integer (alpha) is only used when drawing persons with raster images
26,624
def check_type(self, type): if type in TYPES: return type tdict = dict(zip(TYPES,TYPES)) tdict.update({ : , : , : , : , : , : , : , }) assert type in tdict, %type return tdict[type]
Check to see if the type is either in TYPES or fits type name Returns proper type
26,625
def accept_format(*, version: str = "v3", media: Optional[str] = None, json: bool = True) -> str: accept = f"application/vnd.github.{version}" if media is not None: accept += f".{media}" if json: accept += "+json" return accept
Construct the specification of the format that a request should return. The version argument defaults to v3 of the GitHub API and is applicable to all requests. The media argument along with 'json' specifies what format the request should return, e.g. requesting the rendered HTML of a comment. Do note that not all of GitHub's API supports alternative formats. The default arguments of this function will always return the latest stable version of the GitHub API in the default format that this library is designed to support.
26,626
def note_hz_to_midi(annotation): annotation.namespace = data = annotation.pop_data() for obs in data: annotation.append(time=obs.time, duration=obs.duration, confidence=obs.confidence, value=12 * (np.log2(obs.value) - np.log2(440.0)) + 69) return annotation
Convert a pitch_hz annotation to pitch_midi
26,627
def transaction_abort(self, transaction_id, **kwargs): if transaction_id not in self.__transactions: raise workflows.Error("Attempting to abort unknown transaction") self.log.debug("Aborting transaction %s", transaction_id) self.__transactions.remove(transaction_id) self._transaction_abort(transaction_id, **kwargs)
Abort a transaction and roll back all operations. :param transaction_id: ID of transaction to be aborted. :param **kwargs: Further parameters for the transport layer.
26,628
def clone(self, opts): topt = self.opts.copy() topt.update(opts) return self.__class__(self.modl, self.name, self.info, topt)
Create a new instance of this type with the specified options. Args: opts (dict): The type specific options for the new instance.
26,629
def _parse_docline(self, line, container): match = self.RE_DECOR.match(line) if match is not None: return "{}.{}".format(container.name, match.group("name")) else: return container.name
Parses a single line of code following a docblock to see if it as a valid code element that can be decorated. If so, return the name of the code element.
26,630
def cache_git_tag(): try: version = __get_git_tag() with __open_cache_file() as vf: vf.write(version) except Exception: version = __default_version__ return version
Try to read the current version from git and, if read successfully, cache it into the version cache file. If the git folder doesn't exist or if git isn't installed, this is a no-op. I.E. it won't blank out a pre-existing version cache file upon failure. :return: Project version string
26,631
def urls(self): url_bases = self._url_module.url_bases unformatted_paths = self._url_module.url_paths urls = {} for url_base in url_bases: for url_path, handler in unformatted_paths.items(): url = url_path.format(url_base) urls[url] = handler return urls
A dictionary of the urls to be mocked with this service and the handlers that should be called in their place
26,632
def streamweigths_get(self, session): request = TOPRequest() self.create(self.execute(request, session)) return self.staff_stream_weights
taobao.wangwang.eservice.streamweigths.get 获取分流权重接口 获取当前登录用户自己的店铺内的分流权重设置
26,633
def comp_srcmdl_xml(self, **kwargs): kwargs_copy = self.base_dict.copy() kwargs_copy.update(**kwargs) kwargs_copy[] = kwargs.get(, self.dataset(**kwargs)) kwargs_copy[] = kwargs.get( , self.component(**kwargs)) self._replace_none(kwargs_copy) localpath = NameFactory.comp_srcmdl_xml_format.format(**kwargs_copy) if kwargs.get(, False): return self.fullpath(localpath=localpath) return localpath
return the name of a source model file
26,634
def delete_container(container_name, profile, **libcloud_kwargs): s delete_container method :type libcloud_kwargs: ``dict`` :return: True if an object container has been successfully deleted, False otherwise. :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.delete_container MyFolder profile1 ' conn = _get_driver(profile=profile) libcloud_kwargs = salt.utils.args.clean_kwargs(**libcloud_kwargs) container = conn.get_container(container_name) return conn.delete_container(container, **libcloud_kwargs)
Delete an object container in the cloud :param container_name: Container name :type container_name: ``str`` :param profile: The profile key :type profile: ``str`` :param libcloud_kwargs: Extra arguments for the driver's delete_container method :type libcloud_kwargs: ``dict`` :return: True if an object container has been successfully deleted, False otherwise. :rtype: ``bool`` CLI Example: .. code-block:: bash salt myminion libcloud_storage.delete_container MyFolder profile1
26,635
def from_pycode(cls, co): sparse_instrs = tuple( _sparse_args( Instruction.from_opcode( b.opcode, Instruction._no_arg if b.arg is None else _RawArg(b.arg), ) for b in Bytecode(co) ), ) for idx, instr in enumerate(sparse_instrs): if instr is None: continue if instr.absjmp: instr.arg = sparse_instrs[instr.arg] elif instr.reljmp: instr.arg = sparse_instrs[instr.arg + idx + argsize + 1] elif isinstance(instr, LOAD_CONST): instr.arg = co.co_consts[instr.arg] elif instr.uses_name: instr.arg = co.co_names[instr.arg] elif instr.uses_varname: instr.arg = co.co_varnames[instr.arg] elif instr.uses_free: instr.arg = _freevar_argname( instr.arg, co.co_freevars, co.co_cellvars, ) elif instr.have_arg and isinstance(instr.arg, _RawArg): instr.arg = int(instr.arg) flags = Flag.unpack(co.co_flags) has_vargs = flags[] has_kwargs = flags[] paramnames = co.co_varnames[ :(co.co_argcount + co.co_kwonlyargcount + has_vargs + has_kwargs) ] new_paramnames = list(paramnames[:co.co_argcount]) if has_vargs: new_paramnames.append( + paramnames[-1 - has_kwargs]) new_paramnames.extend(paramnames[ co.co_argcount:co.co_argcount + co.co_kwonlyargcount ]) if has_kwargs: new_paramnames.append( + paramnames[-1]) return cls( filter(bool, sparse_instrs), argnames=new_paramnames, cellvars=co.co_cellvars, freevars=co.co_freevars, name=co.co_name, filename=co.co_filename, firstlineno=co.co_firstlineno, lnotab={ lno: sparse_instrs[off] for off, lno in findlinestarts(co) }, flags=flags, )
Create a Code object from a python code object. Parameters ---------- co : CodeType The python code object. Returns ------- code : Code The codetransformer Code object.
26,636
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name): if keyfunc is not cls._default: return "{}.{}[{}, {}, {}]".format( cls.__module__, cls.__name__, cls._get_fullname(type_), cls._get_bound_repr(bound), keyfunc_name, ) return "{}.{}[{}, {}]".format( cls.__module__, cls.__name__, cls._get_fullname(type_), cls._get_bound_repr(bound), )
Return a class representation using the slice parameters. Args: type_: The type the class was sliced with. bound: The boundaries specified for the values of type_. keyfunc: The comparison function used to check the value boundaries. keyfunc_name: The name of keyfunc. Returns: A string representing the class.
26,637
def simulate(self): self.N = int(self.simTime.days * 24) n = 0 self.ph = self.simTime.dt/3600. time = range(self.N) self.WeatherData = [None for x in range(self.N)] self.UCMData = [None for x in range(self.N)] self.UBLData = [None for x in range(self.N)] self.RSMData = [None for x in range(self.N)] self.USMData = [None for x in range(self.N)] print(.format( int(self.nDay), int(self.Month), int(self.Day))) self.logger.info("Start simulation") for it in range(1, self.simTime.nt, 1): if self.nSoil < 3: self.forc.deepTemp = sum(self.forcIP.temp)/float(len(self.forcIP.temp)) self.forc.waterTemp = sum( self.forcIP.temp)/float(len(self.forcIP.temp)) - 10. else: self.forc.deepTemp = self.Tsoil[self.soilindex1][self.simTime.month-1] self.forc.waterTemp = self.Tsoil[2][self.simTime.month-1] self.simTime.UpdateDate() self.logger.info("\n{0} m={1}, d={2}, h={3}, s={4}".format( __name__, self.simTime.month, self.simTime.day, self.simTime.secDay/3600., self.simTime.secDay)) self.ceil_time_step = int(math.ceil(it * self.ph))-1 self.forc.infra = self.forcIP.infra[self.ceil_time_step] self.forc.wind = max(self.forcIP.wind[self.ceil_time_step], self.geoParam.windMin) self.forc.uDir = self.forcIP.uDir[self.ceil_time_step] self.forc.hum = self.forcIP.hum[self.ceil_time_step] self.forc.pres = self.forcIP.pres[self.ceil_time_step] self.forc.temp = self.forcIP.temp[self.ceil_time_step] self.forc.rHum = self.forcIP.rHum[self.ceil_time_step] self.forc.prec = self.forcIP.prec[self.ceil_time_step] self.forc.dif = self.forcIP.dif[self.ceil_time_step] self.forc.dir = self.forcIP.dir[self.ceil_time_step] self.UCM.canHum = copy.copy(self.forc.hum) self.solar = SolarCalcs(self.UCM, self.BEM, self.simTime, self.RSM, self.forc, self.geoParam, self.rural) self.rural, self.UCM, self.BEM = self.solar.solarcalcs() if self.is_near_zero(self.simTime.julian % 7): self.dayType = 3 elif self.is_near_zero(self.simTime.julian % 7 - 6.): self.dayType = 2 else: self.dayType = 1 self.UCM.sensAnthrop = self.sensAnth * (self.SchTraffic[self.dayType-1][self.simTime.hourDay]) for i in range(len(self.BEM)): self.BEM[i].building.coolSetpointDay = self.Sch[i].Cool[self.dayType - 1][self.simTime.hourDay] + 273.15 self.BEM[i].building.coolSetpointNight = self.BEM[i].building.coolSetpointDay self.BEM[i].building.heatSetpointDay = self.Sch[i].Heat[self.dayType - 1][self.simTime.hourDay] + 273.15 self.BEM[i].building.heatSetpointNight = self.BEM[i].building.heatSetpointDay self.BEM[i].Elec = self.Sch[i].Qelec * self.Sch[i].Elec[self.dayType - 1][self.simTime.hourDay] self.BEM[i].Light = self.Sch[i].Qlight * self.Sch[i].Light[self.dayType - 1][self.simTime.hourDay] self.BEM[i].Nocc = self.Sch[i].Nocc * self.Sch[i].Occ[self.dayType - 1][self.simTime.hourDay] self.BEM[i].Qocc = self.sensOcc * (1 - self.LatFOcc) * self.BEM[i].Nocc self.BEM[i].SWH = self.Sch[i].Vswh * self.Sch[i].SWH[self.dayType - 1][self.simTime.hourDay] self.BEM[i].building.vent = self.Sch[i].Vent self.BEM[i].Gas = self.Sch[i].Qgas * self.Sch[i].Gas[self.dayType - 1][self.simTime.hourDay] intHeat = self.BEM[i].Light + self.BEM[i].Elec + self.BEM[i].Qocc self.BEM[i].building.intHeatDay = intHeat self.BEM[i].building.intHeatNight = intHeat self.BEM[i].building.intHeatFRad = ( self.RadFLight * self.BEM[i].Light + self.RadFEquip * self.BEM[i].Elec) / intHeat self.BEM[i].building.intHeatFLat = self.LatFOcc * \ self.sensOcc * self.BEM[i].Nocc/intHeat self.BEM[i].T_wallex = self.BEM[i].wall.layerTemp[0] self.BEM[i].T_wallin = self.BEM[i].wall.layerTemp[-1] self.BEM[i].T_roofex = self.BEM[i].roof.layerTemp[0] self.BEM[i].T_roofin = self.BEM[i].roof.layerTemp[-1] self.rural.infra = self.forc.infra - self.rural.emissivity * self.SIGMA * \ self.rural.layerTemp[0]**4. self.rural.SurfFlux(self.forc, self.geoParam, self.simTime, self.forc.hum, self.forc.temp, self.forc.wind, 2., 0.) self.RSM.VDM(self.forc, self.rural, self.geoParam, self.simTime) self.UCM, self.UBL, self.BEM = urbflux( self.UCM, self.UBL, self.BEM, self.forc, self.geoParam, self.simTime, self.RSM) self.UCM.UCModel(self.BEM, self.UBL.ublTemp, self.forc, self.geoParam) self.UBL.UBLModel(self.UCM, self.RSM, self.rural, self.forc, self.geoParam, self.simTime) self.logger.info("dbT = {}".format(self.UCM.canTemp-273.15)) if n > 0: logging.info("dpT = {}".format(self.UCM.Tdp)) logging.info("RH = {}".format(self.UCM.canRHum)) if self.is_near_zero(self.simTime.secDay % self.simTime.timePrint) and n < self.N: self.logger.info("{0} ----sim time step = {1}----\n\n".format(__name__, n)) self.WeatherData[n] = copy.copy(self.forc) _Tdb, _w, self.UCM.canRHum, _h, self.UCM.Tdp, _v = psychrometrics( self.UCM.canTemp, self.UCM.canHum, self.forc.pres) self.UBLData[n] = copy.copy(self.UBL) self.UCMData[n] = copy.copy(self.UCM) self.RSMData[n] = copy.copy(self.RSM) self.logger.info("dbT = {}".format(self.UCMData[n].canTemp-273.15)) self.logger.info("dpT = {}".format(self.UCMData[n].Tdp)) self.logger.info("RH = {}".format(self.UCMData[n].canRHum)) n += 1
Section 7 - uwg main section self.N # Total hours in simulation self.ph # per hour self.dayType # 3=Sun, 2=Sat, 1=Weekday self.ceil_time_step # simulation timestep (dt) fitted to weather file timestep # Output of object instance vector self.WeatherData # Nx1 vector of forc instance self.UCMData # Nx1 vector of UCM instance self.UBLData # Nx1 vector of UBL instance self.RSMData # Nx1 vector of RSM instance self.USMData # Nx1 vector of USM instance
26,638
def query(self): pairs = parse_qsl(self.query_string, keep_blank_values=True) get = self.environ[] = FormsDict() for key, value in pairs[:self.MAX_PARAMS]: get[key] = value return get
The :attr:`query_string` parsed into a :class:`FormsDict`. These values are sometimes called "URL arguments" or "GET parameters", but not to be confused with "URL wildcards" as they are provided by the :class:`Router`.
26,639
def parrep(self, parfile=None,enforce_bounds=True): if parfile is None: parfile = self.filename.replace(".pst", ".par") par_df = pst_utils.read_parfile(parfile) self.parameter_data.index = self.parameter_data.parnme par_df.index = par_df.parnme self.parameter_data.parval1 = par_df.parval1 self.parameter_data.scale = par_df.scale self.parameter_data.offset = par_df.offset if enforce_bounds: par = self.parameter_data idx = par.loc[par.parval1 > par.parubnd,"parnme"] par.loc[idx,"parval1"] = par.loc[idx,"parubnd"] idx = par.loc[par.parval1 < par.parlbnd,"parnme"] par.loc[idx, "parval1"] = par.loc[idx, "parlbnd"]
replicates the pest parrep util. replaces the parval1 field in the parameter data section dataframe Parameters ---------- parfile : str parameter file to use. If None, try to use a parameter file that corresponds to the case name. Default is None enforce_hounds : bool flag to enforce parameter bounds after parameter values are updated. This is useful because PEST and PEST++ round the parameter values in the par file, which may cause slight bound violations
26,640
def _srm(self, data): samples = data[0].shape[1] subjects = len(data) np.random.seed(self.rand_seed) w, voxels = _init_w_transforms(data, self.features) x, mu, rho2, trace_xtx = self._init_structures(data, subjects) shared_response = np.zeros((self.features, samples)) sigma_s = np.identity(self.features) for iteration in range(self.n_iter): logger.info( % (iteration + 1)) rho0 = (1 / rho2).sum() (chol_sigma_s, lower_sigma_s) = scipy.linalg.cho_factor( sigma_s, check_finite=False) inv_sigma_s = scipy.linalg.cho_solve( (chol_sigma_s, lower_sigma_s), np.identity(self.features), check_finite=False) sigma_s_rhos = inv_sigma_s + np.identity(self.features) * rho0 (chol_sigma_s_rhos, lower_sigma_s_rhos) = scipy.linalg.cho_factor( sigma_s_rhos, check_finite=False) inv_sigma_s_rhos = scipy.linalg.cho_solve( (chol_sigma_s_rhos, lower_sigma_s_rhos), np.identity(self.features), check_finite=False) wt_invpsi_x = np.zeros((self.features, samples)) trace_xt_invsigma2_x = 0.0 for subject in range(subjects): wt_invpsi_x += (w[subject].T.dot(x[subject])) / rho2[subject] trace_xt_invsigma2_x += trace_xtx[subject] / rho2[subject] log_det_psi = np.sum(np.log(rho2) * voxels) shared_response = sigma_s.dot( np.identity(self.features) - rho0 * inv_sigma_s_rhos).dot( wt_invpsi_x) sigma_s = (inv_sigma_s_rhos + shared_response.dot(shared_response.T) / samples) trace_sigma_s = samples * np.trace(sigma_s) return sigma_s, w, mu, rho2, shared_response
Expectation-Maximization algorithm for fitting the probabilistic SRM. Parameters ---------- data : list of 2D arrays, element i has shape=[voxels_i, samples] Each element in the list contains the fMRI data of one subject. Returns ------- sigma_s : array, shape=[features, features] The covariance :math:`\\Sigma_s` of the shared response Normal distribution. w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. mu : list of array, element i has shape=[voxels_i] The voxel means :math:`\\mu_i` over the samples for each subject. rho2 : array, shape=[subjects] The estimated noise variance :math:`\\rho_i^2` for each subject s : array, shape=[features, samples] The shared response.
26,641
def compute(self, write_to_tar=True): data = self._get_all_data(self.start_date, self.end_date) logging.info( .format(self.start_date, self.end_date)) full, full_dt = self._compute_full_ts(data) full_out = self._full_to_yearly_ts(full, full_dt) reduced = self._apply_all_time_reductions(full_out) logging.info("Writing desired gridded outputs to disk.") for dtype_time, data in reduced.items(): data = _add_metadata_as_attrs(data, self.var.units, self.var.description, self.dtype_out_vert) self.save(data, dtype_time, dtype_out_vert=self.dtype_out_vert, save_files=True, write_to_tar=write_to_tar) return self
Perform all desired calculations on the data and save externally.
26,642
def export(self, last_checkpoint, output_dir): logging.info(, output_dir) with tf.Session(graph=tf.Graph()) as sess: inputs, outputs = self.build_prediction_graph() signature_def_map = { : signature_def_utils.predict_signature_def(inputs, outputs) } init_op = tf.global_variables_initializer() sess.run(init_op) self.restore_from_checkpoint(sess, self.inception_checkpoint_file, last_checkpoint) init_op_serving = control_flow_ops.group( variables.local_variables_initializer(), tf.tables_initializer()) builder = saved_model_builder.SavedModelBuilder(output_dir) builder.add_meta_graph_and_variables( sess, [tag_constants.SERVING], signature_def_map=signature_def_map, legacy_init_op=init_op_serving) builder.save(False)
Builds a prediction graph and xports the model. Args: last_checkpoint: Path to the latest checkpoint file from training. output_dir: Path to the folder to be used to output the model.
26,643
def value_from_ast_untyped( value_node: ValueNode, variables: Dict[str, Any] = None ) -> Any: func = _value_from_kind_functions.get(value_node.kind) if func: return func(value_node, variables) raise TypeError( f"Unexpected value node: ." )
Produce a Python value given a GraphQL Value AST. Unlike `value_from_ast()`, no type is provided. The resulting Python value will reflect the provided GraphQL value AST. | GraphQL Value | JSON Value | Python Value | | -------------------- | ---------- | ------------ | | Input Object | Object | dict | | List | Array | list | | Boolean | Boolean | bool | | String / Enum | String | str | | Int / Float | Number | int / float | | Null | null | None |
26,644
def _pop(self): if not self.canPop(): raise IndexError() priority = self.prioritySet[-1] ret = self.queues[priority]._pop() self.outputStat = self.outputStat + 1 self.totalSize = self.totalSize - 1 if self.isWaited and self.canAppend(): self.isWaited = False ret[1].append(QueueCanWriteEvent(self)) if self.isWaitEmpty and not self: self.isWaitEmpty = False ret[2].append(QueueIsEmptyEvent(self)) return ret
Actual pop
26,645
def browse_in_qt5_ui(self): self._render_type = "browse" self._tree.show(tree_style=self._get_tree_style())
Browse and edit the SubjectInfo in a simple Qt5 based UI.
26,646
def sys_call(cmd): p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True) return p.stdout.readlines(), p.stderr.readlines()
Execute cmd and capture stdout and stderr :param cmd: command to be executed :return: (stdout, stderr)
26,647
def read_column(self, column, where=None, start=None, stop=None): self.validate_version() if not self.infer_axes(): return False if where is not None: raise TypeError("read_column does not currently accept a where " "clause") for a in self.axes: if column == a.name: if not a.is_data_indexable: raise ValueError( "column [{column}] can not be extracted individually; " "it is not data indexable".format(column=column)) c = getattr(self.table.cols, column) a.set_info(self.info) return Series(_set_tz(a.convert(c[start:stop], nan_rep=self.nan_rep, encoding=self.encoding, errors=self.errors ).take_data(), a.tz, True), name=column) raise KeyError( "column [{column}] not found in the table".format(column=column))
return a single column from the table, generally only indexables are interesting
26,648
def remove_by_rank(self, low, high=None): if high is None: high = low return self.database.zremrangebyrank(self.key, low, high)
Remove elements from the ZSet by their rank (relative position). :param low: Lower bound. :param high: Upper bound.
26,649
def get_priority_rules(db) -> Iterable[PriorityRule]: cur = db.cursor() cur.execute() for row in cur: yield PriorityRule(*row)
Get file priority rules.
26,650
def execute(self, cacheable=False): if self.network.is_caching_enabled() and cacheable: response = self._get_cached_response() else: response = self._download_response() return minidom.parseString(_string(response).replace("opensearch:", ""))
Returns the XML DOM response of the POST Request from the server
26,651
def _server_begin_response_callback(self, response: Response): self._item_session.response = response if self._cookie_jar: self._cookie_jar.extract_cookies(response, self._item_session.request) action = self._result_rule.handle_pre_response(self._item_session) self._file_writer_session.process_response(response) return action == Actions.NORMAL
Pre-response callback handler.
26,652
def qwe(rtol, atol, maxint, inp, intervals, lambd=None, off=None, factAng=None): r def getweights(i, inpint): r return (np.atleast_2d(inpint)[:, i+1] - np.atleast_2d(inpint)[:, i])/2 if hasattr(inp, ): EM0 = inp(0, lambd, off, factAng) else: EM0 = inp[:, 0] EM0 *= getweights(0, intervals) EM = np.zeros(EM0.size, dtype=EM0.dtype) om = np.ones(EM0.size, dtype=bool) S = np.zeros((EM0.size, maxint), dtype=EM0.dtype) relErr = np.zeros((EM0.size, maxint)) extrap = np.zeros((EM0.size, maxint), dtype=EM0.dtype) kcount = 1 for i in range(1, maxint): if hasattr(inp, ): EMi = inp(i, lambd[om, :], off[om], factAng[om]) kcount += 1 else: EMi = inp[om, i] EMi *= getweights(i, intervals[om, :]) S[:, i][om] = S[:, i-1][om] + EMi aux2 = np.zeros(om.sum(), dtype=EM0.dtype) for k in range(i, 0, -1): aux1, aux2 = aux2, S[om, k-1] ddff = S[om, k] - aux2 S[om, k-1] = np.where(np.abs(ddff) < np.finfo(np.double).tiny, np.finfo(np.double).max, aux1 + 1/ddff) extrap[om, i-1] = S[om, np.mod(i, 2)] + EM0[om] if i > 1: rErr = (extrap[om, i-1] - extrap[om, i-2])/extrap[om, i-1] relErr[om, i-1] = np.abs(rErr) abserr = atol/np.abs(extrap[om, i-1]) om[om] *= relErr[om, i-1] >= rtol + abserr EM[om] = extrap[om, i-1] if (~om).all(): break conv = i+1 != maxint EM[om] = extrap[om, i-1] EM.real[EM.real == np.finfo(np.double).max] = 0 return EM, kcount, conv
r"""Quadrature-With-Extrapolation. This is the kernel of the QWE method, used for the Hankel (``hqwe``) and the Fourier (``fqwe``) Transforms. See ``hqwe`` for an extensive description. This function is based on ``qwe.m`` from the source code distributed with [Key12]_.
26,653
def get(self, subscription_id=None, stream=None, historics_id=None, page=None, per_page=None, order_by=None, order_dir=None, include_finished=None): params = {} if subscription_id: params[] = subscription_id if stream: params[] = stream if historics_id: params[] = historics_id if page: params[] = page if per_page: params[] = per_page if order_by: params[] = order_by if order_dir: params[] = order_dir if include_finished: params[] = 1 if include_finished else 0 return self.request.get(, params=params)
Show details of the Subscriptions belonging to this user. Uses API documented at http://dev.datasift.com/docs/api/rest-api/endpoints/pushget :param subscription_id: optional id of an existing Push Subscription :type subscription_id: str :param hash: optional hash of a live stream :type hash: str :param playback_id: optional playback id of a Historics query :type playback_id: str :param page: optional page number for pagination :type page: int :param per_page: optional number of items per page, default 20 :type per_page: int :param order_by: field to order by, default request_time :type order_by: str :param order_dir: direction to order by, asc or desc, default desc :type order_dir: str :param include_finished: boolean indicating if finished Subscriptions for Historics should be included :type include_finished: bool :returns: dict with extra response data :rtype: :class:`~datasift.request.DictResponse` :raises: :class:`~datasift.exceptions.DataSiftApiException`, :class:`requests.exceptions.HTTPError`
26,654
def regex(expression, flags=re.IGNORECASE): return re.compile(expression, flags=flags)
Convenient shortcut to ``re.compile()`` for fast, easy to use regular expression compilation without an extra import statement. Arguments: expression (str): regular expression value. flags (int): optional regular expression flags. Defaults to ``re.IGNORECASE`` Returns: expression (str): string based regular expression. Raises: Exception: in case of regular expression compilation error Example:: (pook .get('api.com/foo') .header('Content-Type', pook.regex('[a-z]{1,4}')))
26,655
def use_args( self, argmap: ArgMap, req: typing.Optional[Request] = None, locations: typing.Iterable = None, as_kwargs: bool = False, validate: Validate = None, error_status_code: typing.Optional[int] = None, error_headers: typing.Union[typing.Mapping[str, str], None] = None, ) -> typing.Callable[..., typing.Callable]: locations = locations or self.locations request_obj = req if isinstance(argmap, Mapping): argmap = core.dict2schema(argmap, self.schema_class)() def decorator(func: typing.Callable) -> typing.Callable: req_ = request_obj if inspect.iscoroutinefunction(func): @functools.wraps(func) async def wrapper(*args, **kwargs): req_obj = req_ if not req_obj: req_obj = self.get_request_from_view_args(func, args, kwargs) parsed_args = await self.parse( argmap, req=req_obj, locations=locations, validate=validate, error_status_code=error_status_code, error_headers=error_headers, ) if as_kwargs: kwargs.update(parsed_args or {}) return await func(*args, **kwargs) else: new_args = args + (parsed_args,) return await func(*new_args, **kwargs) else: @functools.wraps(func) def wrapper(*args, **kwargs): req_obj = req_ if not req_obj: req_obj = self.get_request_from_view_args(func, args, kwargs) parsed_args = yield from self.parse( argmap, req=req_obj, locations=locations, validate=validate, error_status_code=error_status_code, error_headers=error_headers, ) if as_kwargs: kwargs.update(parsed_args) return func(*args, **kwargs) else: new_args = args + (parsed_args,) return func(*new_args, **kwargs) return wrapper return decorator
Decorator that injects parsed arguments into a view function or method. Receives the same arguments as `webargs.core.Parser.use_args`.
26,656
def is_now(s, dt=None): if dt is None: dt = datetime.now() minute, hour, dom, month, dow = s.split() weekday = dt.isoweekday() return _parse_arg(minute, dt.minute) \ and _parse_arg(hour, dt.hour) \ and _parse_arg(dom, dt.day) \ and _parse_arg(month, dt.month) \ and _parse_arg(dow, 0 if weekday == 7 else weekday, True)
A very simple cron-like parser to determine, if (cron-like) string is valid for this date and time. @input: s = cron-like string (minute, hour, day of month, month, day of week) dt = datetime to use as reference time, defaults to now @output: boolean of result
26,657
def determine_chan_detect_threshold(kal_out): channel_detect_threshold = "" while channel_detect_threshold == "": for line in kal_out.splitlines(): if "channel detect threshold: " in line: channel_detect_threshold = str(line.split()[-1]) if channel_detect_threshold == "": print("Unable to parse sample rate") channel_detect_threshold = None return channel_detect_threshold
Return channel detect threshold from kal output.
26,658
def get_all(limit=): *** limit = limit.lower() if limit == : return sorted(_upstart_services()) elif limit == : return sorted(_sysv_services()) else: return sorted(_sysv_services() + _upstart_services())
Return all installed services. Use the ``limit`` param to restrict results to services of that type. CLI Example: .. code-block:: bash salt '*' service.get_all salt '*' service.get_all limit=upstart salt '*' service.get_all limit=sysvinit
26,659
def query(self): self._p4dict = self._connection.run([, , , self._p4dict[]])[0] self._head = HeadRevision(self._p4dict) self._filename = self.depotFile
Runs an fstat for this file and repopulates the data
26,660
def paste_mashes(sketches, pasted_mash, force = False): if os.path.isfile(pasted_mash): if force: subprocess.Popen([, pasted_mash]).wait() else: return pasted_mash = pasted_mash.rsplit()[0] mash_cmd = [, , pasted_mash] mash_cmd.extend(sketches) process = subprocess.Popen(mash_cmd) process.wait() return
Combine mash files into single sketch Input: sketches <list[str]> -- paths to sketch files pasted_mash <str> -- path to output mash file force <boolean> -- force overwrite of all mash file
26,661
async def destroy_models(self, *models, destroy_storage=False): uuids = await self.model_uuids() models = [uuids[model] if model in uuids else model for model in models] model_facade = client.ModelManagerFacade.from_connection( self.connection()) log.debug( , if len(models) == 1 else , .join(models) ) if model_facade.version >= 5: params = [ client.DestroyModelParams(model_tag=tag.model(model), destroy_storage=destroy_storage) for model in models] else: params = [client.Entity(tag.model(model)) for model in models] await model_facade.DestroyModels(params)
Destroy one or more models. :param str *models: Names or UUIDs of models to destroy :param bool destroy_storage: Whether or not to destroy storage when destroying the models. Defaults to false.
26,662
def askopenfilename(**kwargs): try: from Tkinter import Tk import tkFileDialog as filedialog except ImportError: from tkinter import Tk, filedialog root = Tk() root.withdraw() root.update() filenames = filedialog.askopenfilename(**kwargs) root.destroy() return filenames
Return file name(s) from Tkinter's file open dialog.
26,663
def make_nylas_blueprint( client_id=None, client_secret=None, scope="email", redirect_url=None, redirect_to=None, login_url=None, authorized_url=None, session_class=None, storage=None, ): nylas_bp = OAuth2ConsumerBlueprint( "nylas", __name__, client_id=client_id, client_secret=client_secret, scope=scope, base_url="https://api.nylas.com/", authorization_url="https://api.nylas.com/oauth/authorize", token_url="https://api.nylas.com/oauth/token", redirect_url=redirect_url, redirect_to=redirect_to, login_url=login_url, authorized_url=authorized_url, session_class=session_class, storage=storage, ) nylas_bp.from_config["client_id"] = "NYLAS_OAUTH_CLIENT_ID" nylas_bp.from_config["client_secret"] = "NYLAS_OAUTH_CLIENT_SECRET" @nylas_bp.before_app_request def set_applocal_session(): ctx = stack.top ctx.nylas_oauth = nylas_bp.session return nylas_bp
Make a blueprint for authenticating with Nylas using OAuth 2. This requires an API ID and API secret from Nylas. You should either pass them to this constructor, or make sure that your Flask application config defines them, using the variables :envvar:`NYLAS_OAUTH_CLIENT_ID` and :envvar:`NYLAS_OAUTH_CLIENT_SECRET`. Args: client_id (str): The client ID for your developer account on Nylas. client_secret (str): The client secret for your developer account on Nylas. scope (str, optional): comma-separated list of scopes for the OAuth token. Defaults to "email". redirect_url (str): the URL to redirect to after the authentication dance is complete redirect_to (str): if ``redirect_url`` is not defined, the name of the view to redirect to after the authentication dance is complete. The actual URL will be determined by :func:`flask.url_for` login_url (str, optional): the URL path for the ``login`` view. Defaults to ``/nylas`` authorized_url (str, optional): the URL path for the ``authorized`` view. Defaults to ``/nylas/authorized``. session_class (class, optional): The class to use for creating a Requests session. Defaults to :class:`~flask_dance.consumer.requests.OAuth2Session`. storage: A token storage class, or an instance of a token storage class, to use for this blueprint. Defaults to :class:`~flask_dance.consumer.storage.session.SessionStorage`. :rtype: :class:`~flask_dance.consumer.OAuth2ConsumerBlueprint` :returns: A :ref:`blueprint <flask:blueprints>` to attach to your Flask app.
26,664
def _prune_hit(hit, model): hit_id = hit["_id"] hit_index = hit["_index"] if model.objects.in_search_queryset(hit_id, index=hit_index): logger.debug( "%s with id=%s exists in the index queryset.", model, hit_id, hit_index ) return None else: logger.debug( "%s with id=%s does not exist in the index queryset and will be pruned.", model, hit_id, hit_index, ) return model(pk=hit_id)
Check whether a document should be pruned. This method uses the SearchDocumentManagerMixin.in_search_queryset method to determine whether a 'hit' (search document) should be pruned from an index, and if so it returns the hit as a Django object(id=hit_id). Args: hit: dict object the represents a document as returned from the scan_index function. (Contains object id and index.) model: the Django model (not object) from which the document was derived. Used to get the correct model manager and bulk action. Returns: an object of type model, with id=hit_id. NB this is not the object itself, which by definition may not exist in the underlying database, but a temporary object with the document id - which is enough to create a 'delete' action.
26,665
def _compute_error(self): sum_x = sum(self.x_transforms) err = sum((self.y_transform - sum_x) ** 2) / len(sum_x) return err
Compute unexplained error.
26,666
def transform(self, X): inverser_tranformer = self.dict_vectorizer_ if self.feature_selection: inverser_tranformer = self.clone_dict_vectorizer_ return inverser_tranformer.inverse_transform( self.transformer.transform( self.dict_vectorizer_.transform(X)))
:param X: features.
26,667
def setup(self, target=None, strict=False, minify=False, line_numbers=False, keep_lines=False, no_tco=False): if target is None: target = "" else: target = str(target).replace(".", "") if target in pseudo_targets: target = pseudo_targets[target] if target not in targets: raise CoconutException( "unsupported target Python version " + ascii(target), extra="supported targets are " + .join(ascii(t) for t in specific_targets) + ", or leave blank for universal", ) logger.log_vars("Compiler args:", locals()) self.target, self.strict, self.minify, self.line_numbers, self.keep_lines, self.no_tco = ( target, strict, minify, line_numbers, keep_lines, no_tco, )
Initializes parsing parameters.
26,668
def use_sequestered_assessment_part_view(self): self._containable_views[] = SEQUESTERED self._get_sub_package_provider_session(, ) for session in self._provider_sessions: for provider_session_name, provider_session in self._provider_sessions[session].items(): try: provider_session.use_sequestered_assessment_part_view() except AttributeError: pass
Pass through to provider AssessmentPartLookupSession.use_sequestered_assessment_part_view
26,669
def extend(self, item): if self.meta_type == : raise AssertionError() if self.meta_type == : self._list.extend(item) return
Extend list from object, if object is list.
26,670
def to_proper_radians(theta): if theta > pi or theta < -pi: theta = theta % pi return theta
Converts theta (radians) to be within -pi and +pi.
26,671
def GetSavename(default=None, **kwargs): args = [] if default: args.append( % default) for generic_args in kwargs_helper(kwargs): args.append( % generic_args) p = run_zenity(, *args) if p.wait() == 0: return p.stdout.read().strip().split()
Prompt the user for a filename to save as. This will raise a Zenity Save As Dialog. It will return the name to save a file as or None if the user hit cancel. default - The default name that should appear in the save as dialog. kwargs - Optional command line parameters for Zenity such as height, width, etc.
26,672
def move_saved_issue_data(self, issue, ns, other_ns): if isinstance(issue, int): issue_number = str(issue) elif isinstance(issue, basestring): issue_number = issue else: issue_number = issue.number issue_data_key = self._issue_data_key(ns) other_issue_data_key = self._issue_data_key(other_ns) issue_data = self.data.get(issue_data_key, {}) other_issue_data = self.data.get(other_issue_data_key, {}) _id = issue_data.pop(issue_number, None) if _id: other_issue_data[issue_number] = _id self.data[other_issue_data_key] = other_issue_data self.data[issue_data_key] = issue_data
Moves an issue_data from one namespace to another.
26,673
def close(self): files = self.__dict__.get("files") for _key, value in iter_multi_items(files or ()): value.close()
Closes associated resources of this request object. This closes all file handles explicitly. You can also use the request object in a with statement which will automatically close it. .. versionadded:: 0.9
26,674
def generate_name_variations(name): def _update_name_variations_with_product(set_a, set_b): name_variations.update([ unidecode((names_variation[0] + separator + names_variation[1]).strip(.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower() for names_variation in product(set_a, set_b) for separator in _LASTNAME_NON_LASTNAME_SEPARATORS ]) parsed_name = ParsedName.loads(name) if len(parsed_name) == 1: return [parsed_name.dumps().lower()] name_variations = set() non_lastnames = [ non_lastname for non_lastname in parsed_name.first_list + parsed_name.suffix_list if non_lastname ] if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD: LOGGER.error(, name) return [name] non_lastnames_variations = \ _generate_non_lastnames_variations(non_lastnames) lastnames_variations = _generate_lastnames_variations(parsed_name.last_list) _update_name_variations_with_product(lastnames_variations, non_lastnames_variations) _update_name_variations_with_product(non_lastnames_variations, lastnames_variations) return list(name_variations)
Generate name variations for a given name. Args: name (six.text_type): The name whose variations are to be generated. Returns: list: All the name variations for the given name. Notes: Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
26,675
def add_snippet_client(self, name, package): if name in self._snippet_clients: raise Error( self, % (name, self._snippet_clients[name].client.package)) for snippet_name, client in self._snippet_clients.items(): if package == client.package: raise Error( self, % (package, snippet_name)) client = snippet_client.SnippetClient(package=package, ad=self._device) client.start_app_and_connect() self._snippet_clients[name] = client
Adds a snippet client to the management. Args: name: string, the attribute name to which to attach the snippet client. E.g. `name='maps'` attaches the snippet client to `ad.maps`. package: string, the package name of the snippet apk to connect to. Raises: Error, if a duplicated name or package is passed in.
26,676
def parse_proposal_data(self, proposal_data, dossier_pk): proposal_display = .format(proposal_data[].encode( ), proposal_data.get(, ).encode()) if not in proposal_data.keys(): logger.debug(, proposal_data[]) return changed = False try: proposal = Proposal.objects.get(title=proposal_data[]) except Proposal.DoesNotExist: proposal = Proposal(title=proposal_data[]) changed = True data_map = dict( title=proposal_data[], datetime=_parse_date(proposal_data[]), dossier_id=dossier_pk, reference=proposal_data.get(), kind=proposal_data.get() ) for position in (, , ): position_data = proposal_data.get(position, {}) position_total = position_data.get(, 0) if isinstance(position_total, str) and position_total.isdigit(): position_total = int(position_total) data_map[ % position.lower()] = position_total for key, value in data_map.items(): if value != getattr(proposal, key, None): setattr(proposal, key, value) changed = True if changed: proposal.save() responses = vote_pre_import.send(sender=self, vote_data=proposal_data) for receiver, response in responses: if response is False: logger.debug( , proposal_data.get( , proposal_data[])) return positions = [, , ] logger.info( .format(proposal_display)) for position in positions: for group_vote_data in proposal_data.get( position, {}).get( , {}): for vote_data in group_vote_data[]: if not isinstance(vote_data, dict): logger.error(, vote_data, proposal_data[]) continue representative_pk = self.get_representative(vote_data) if representative_pk is None: logger.error(, vote_data) continue representative_name = vote_data.get(, ) changed = False try: vote = Vote.objects.get( representative_id=representative_pk, proposal_id=proposal.pk) except Vote.DoesNotExist: vote = Vote(proposal_id=proposal.pk, representative_id=representative_pk) changed = True if vote.position != position.lower(): changed = True vote.position = position.lower() if vote.representative_name != representative_name: changed = True vote.representative_name = representative_name if changed: vote.save() logger.debug(, vote.pk, representative_pk, proposal_data[], proposal.pk, position) return proposal
Get or Create a proposal model from raw data
26,677
def sync_sources(self): get_sources = lambda x: (id(x.current_frame.data), x) filter_fn = lambda x: (x.shared_datasource and x.current_frame is not None and not isinstance(x.current_frame.data, np.ndarray) and in x.handles) data_sources = self.traverse(get_sources, [filter_fn]) grouped_sources = groupby(sorted(data_sources, key=lambda x: x[0]), lambda x: x[0]) shared_sources = [] source_cols = {} plots = [] for _, group in grouped_sources: group = list(group) if len(group) > 1: source_data = {} for _, plot in group: source_data.update(plot.handles[].data) new_source = ColumnDataSource(source_data) for _, plot in group: renderer = plot.handles.get() for callback in plot.callbacks: callback.reset() if renderer is None: continue elif in renderer.properties(): renderer.update(data_source=new_source) else: renderer.update(source=new_source) if hasattr(renderer, ): renderer.view.update(source=new_source) plot.handles[] = plot.handles[] = new_source plots.append(plot) shared_sources.append(new_source) source_cols[id(new_source)] = [c for c in new_source.data] for plot in plots: if plot.hooks and plot.finalize_hooks: self.param.warning( "Supply either hooks or finalize_hooks not both; " "using hooks and ignoring finalize_hooks.") hooks = plot.hooks or plot.finalize_hooks for hook in hooks: hook(plot, plot.current_frame) for callback in plot.callbacks: callback.initialize(plot_id=self.id) self.handles[] = shared_sources self.handles[] = source_cols
Syncs data sources between Elements, which draw data from the same object.
26,678
def get_soup_response(self): if self.response is not None: if self.__response_soup is None: result = BeautifulSoup(self.response.text, "lxml") if self.decomposed: return result else: self.__response_soup = BeautifulSoup(self.response.text, "lxml") return self.__response_soup
Get the response as a cached BeautifulSoup container. Returns: obj: The BeautifulSoup container.
26,679
def center_eigenvalue_diff(mat): N = len(mat) evals = np.sort(la.eigvals(mat)) diff = np.abs(evals[N/2] - evals[N/2-1]) return diff
Compute the eigvals of mat and then find the center eigval difference.
26,680
def get_disk_cache(self, key=None): key = self.model.hash if key is None else key if not getattr(self, , False): self.init_disk_cache() disk_cache = shelve.open(self.disk_cache_location) self._results = disk_cache.get(key) disk_cache.close() return self._results
Return result in disk cache for key 'key' or None if not found.
26,681
def timethis(func): func_module, func_name = func.__module__, func.__name__ @functools.wraps(func) def wrapper(*args, **kwargs): start = _time_perf_counter() r = func(*args, **kwargs) end = _time_perf_counter() print(.format(func_module, func_name, end - start)) return r return wrapper
A wrapper use for timeit.
26,682
def dsa_sign(private_key, data, hash_algorithm): if private_key.algorithm != : raise ValueError() return _sign(private_key, data, hash_algorithm)
Generates a DSA signature :param private_key: The PrivateKey to generate the signature with :param data: A byte string of the data the signature is for :param hash_algorithm: A unicode string of "md5", "sha1", "sha256", "sha384" or "sha512" :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by the OS crypto library :return: A byte string of the signature
26,683
def copy_from_dict(self, attributes): for attribute_name, attribute_value in attributes.items(): if attribute_name[0] == : continue setattr(self, attribute_name, attribute_value)
Copies the attribute container from a dictionary. Args: attributes (dict[str, object]): attribute values per name.
26,684
def ModifyInstance(self, ModifiedInstance, IncludeQualifiers=None, PropertyList=None, **extra): exc = None method_name = if self._operation_recorders: self.operation_recorder_reset() self.operation_recorder_stage_pywbem_args( method=method_name, ModifiedInstance=ModifiedInstance, IncludeQualifiers=IncludeQualifiers, PropertyList=PropertyList, **extra) try: stats = self.statistics.start_timer() if ModifiedInstance.path is None: raise ValueError( ) if ModifiedInstance.path.classname is None: raise ValueError( ) if ModifiedInstance.classname is None: raise ValueError( ) namespace = self._iparam_namespace_from_objectname( ModifiedInstance.path, ) PropertyList = _iparam_propertylist(PropertyList) instance = ModifiedInstance.copy() instance.path.namespace = None instance.path.host = None self._imethodcall( method_name, namespace, ModifiedInstance=instance, IncludeQualifiers=IncludeQualifiers, PropertyList=PropertyList, has_return_value=False, **extra) return except (CIMXMLParseError, XMLParseError) as exce: exce.request_data = self.last_raw_request exce.response_data = self.last_raw_reply exc = exce raise except Exception as exce: exc = exce raise finally: self._last_operation_time = stats.stop_timer( self.last_request_len, self.last_reply_len, self.last_server_response_time, exc) if self._operation_recorders: self.operation_recorder_stage_result(None, exc)
Modify the property values of an instance. This method performs the ModifyInstance operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. The `PropertyList` parameter determines the set of properties that are designated to be modified (see its description for details). The properties provided in the `ModifiedInstance` parameter specify the new property values for the properties that are designated to be modified. Pywbem sends the property values provided in the `ModifiedInstance` parameter to the WBEM server as provided; it does not add any default values for properties not provided but designated to be modified, nor does it reduce the properties by those not designated to be modified. The properties that are actually modified by the WBEM server as a result of this operation depend on a number of things: * The WBEM server will reject modification requests for key properties and for properties that are not exposed by the creation class of the target instance. * The WBEM server may consider some properties as read-only, as a result of requirements at the CIM modeling level (schema or management profiles), or as a result of an implementation decision. Note that the WRITE qualifier on a property is not a safe indicator as to whether the property can actually be modified. It is an expression at the level of the CIM schema that may or may not be considered in DMTF management profiles or in implementations. Specifically, a qualifier value of True on a property does not guarantee modifiability of the property, and a value of False does not prevent modifiability. * The WBEM server may detect invalid new values or conflicts resulting from the new property values and may reject modification of a property for such reasons. If the WBEM server rejects modification of a property for any reason, it will cause this operation to fail and will not modify any property on the target instance. If this operation succeeds, all properties designated to be modified have their new values (see the description of the `ModifiedInstance` parameter for details on how the new values are determined). Note that properties (including properties not designated to be modified) may change their values as an indirect result of this operation. For example, a property that was not designated to be modified may be derived from another property that was modified, and may show a changed value due to that. If the operation succeeds, this method returns. Otherwise, this method raises an exception. Parameters: ModifiedInstance (:class:`~pywbem.CIMInstance`): A representation of the modified instance, also indicating its instance path. The `path` attribute of this object identifies the instance to be modified. Its `keybindings` attribute is required. If its `namespace` attribute is `None`, the default namespace of the connection will be used. Its `host` attribute will be ignored. The `classname` attribute of the instance path and the `classname` attribute of the instance must specify the same class name. The properties defined in this object specify the new property values (including `None` for NULL). If a property is designated to be modified but is not specified in this object, the WBEM server will use the default value of the property declaration if specified (including `None`), and otherwise may update the property to any value (including `None`). Typically, this object has been retrieved by other operations, such as :meth:`~pywbem.WBEMConnection.GetInstance`. IncludeQualifiers (:class:`py:bool`): Indicates that qualifiers are to be modified as specified in the `ModifiedInstance` parameter, as follows: * If `False`, qualifiers not modified. * If `True`, qualifiers are modified if the WBEM server implements support for this parameter. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `True`. This parameter has been deprecated in :term:`DSP0200`. Clients cannot rely on qualifiers to be modified. PropertyList (:term:`string` or :term:`py:iterable` of :term:`string`): This parameter defines which properties are designated to be modified. This parameter is an iterable specifying the names of the properties, or a string that specifies a single property name. In all cases, the property names are matched case insensitively. The specified properties are designated to be modified. Properties not specified are not designated to be modified. An empty iterable indicates that no properties are designated to be modified. If `None`, DSP0200 states that the properties with values different from the current values in the instance are designated to be modified, but for all practical purposes this is equivalent to stating that all properties exposed by the instance are designated to be modified. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`.
26,685
def close(self, discard=False): if self.pool is not None: self.pool._put(self.connection, discard) self.pool = None conn, self.connection = self.connection, None return conn
Close this pool connection by releasing the underlying :attr:`connection` back to the :attr:`pool`.
26,686
def setup_environment(config: Dict[str, Any], environment_type: Environment) -> None: if environment_type == Environment.PRODUCTION: config[][][] = True config[] = environment_type print(f)
Sets the config depending on the environment type
26,687
def do_drag_data_received(self, drag_context, x, y, data, info, time): if not self.app.profile: return if info == TargetInfo.URI_LIST: uris = data.get_uris() source_paths = util.uris_to_paths(uris) if source_paths: self.app.upload_page.upload_files(source_paths, self.path)
从其它程序拖放目录/文件, 以便上传. 这里, 会直接把文件上传到当前目录(self.path). 拖放事件已经被处理, 所以不会触发self.app.window的拖放动作.
26,688
def get_count_sql(self): sql = + self.tables if len(self.where_clauses) > 0: sql += sql += .join(self.where_clauses) return sql
Build a SELECT query which returns the count of items for an unlimited SELECT :return: A SQL SELECT query which returns the count of items for an unlimited query based on this SQLBuilder
26,689
def log_histogram(self, name, value, step=None): if isinstance(value, six.string_types): raise TypeError( .format(type(value))) self._check_step(step) tf_name = self._ensure_tf_name(name) summary = self._histogram_summary(tf_name, value, step=step) self._log_summary(tf_name, summary, value, step=step)
Log a histogram for given name on given step. Args: name (str): name of the variable (it will be converted to a valid tensorflow summary name). value (tuple or list): either list of numbers to be summarized as a histogram, or a tuple of bin_edges and bincounts that directly define a histogram. step (int): non-negative integer used for visualization
26,690
def login(self, user=None, password=None, restrict_login=None): if self.api_key: raise ValueError("cannot login when using an API key") if user: self.user = user if password: self.password = password if not self.user: raise ValueError("missing username") if not self.password: raise ValueError("missing password") if restrict_login: log.info("logging in with restrict_login=True") try: ret = self._login(self.user, self.password, restrict_login) self.password = log.info("login successful for user=%s", self.user) return ret except Fault as e: raise BugzillaError("Login failed: %s" % str(e.faultString))
Attempt to log in using the given username and password. Subsequent method calls will use this username and password. Returns False if login fails, otherwise returns some kind of login info - typically either a numeric userid, or a dict of user info. If user is not set, the value of Bugzilla.user will be used. If *that* is not set, ValueError will be raised. If login fails, BugzillaError will be raised. The login session can be restricted to current user IP address with restrict_login argument. (Bugzilla 4.4+) This method will be called implicitly at the end of connect() if user and password are both set. So under most circumstances you won't need to call this yourself.
26,691
def plot_sens_center(self, frequency=2): try: colors = np.loadtxt(self.volt_file, skiprows=1) except IOError: print(.format(volt_file)) exit() if(len(colors.shape) > 1): print() colors = colors[:, frequency].flatten() colors = colors[~np.isnan(colors)] elem.load_elem_file(self.elem_file) elem.load_elec_file(self.elec_file) nr_elements = len(elem.element_type_list[0]) elem.element_data = np.zeros((nr_elements, 1)) * np.nan elem.plt_opt.title = elem.plt_opt.reverse = True elem.plt_opt.cbmin = -1 elem.plt_opt.cbmax = 1 elem.plt_opt.cblabel = self.cblabel elem.plt_opt.xlabel = elem.plt_opt.ylabel = fig = plt.figure(figsize=(5, 7)) ax = fig.add_subplot(111) ax, pm, cb = elem.plot_element_data_to_ax(0, ax, scale=, no_cb=True) ax.scatter(self.sens_centers[:, 0], self.sens_centers[:, 1], c=colors, s=100, edgecolors=) cb_pos = mpl_get_cb_bound_next_to_plot(ax) ax1 = fig.add_axes(cb_pos, frame_on=True) cmap = mpl.cm.jet_r norm = mpl.colors.Normalize(vmin=np.nanmin(colors), vmax=np.nanmax(colors)) mpl.colorbar.ColorbarBase(ax1, cmap=cmap, norm=norm, orientation=) fig.savefig(self.output_file, bbox_inches=, dpi=300)
plot sensitivity center distribution for all configurations in config.dat. The centers of mass are colored by the data given in volt_file.
26,692
def list_contains(list_of_strings, substring, return_true_false_array=False): key_tf = [keyi.find(substring) != -1 for keyi in list_of_strings] if return_true_false_array: return key_tf keys_to_remove = list_of_strings[key_tf] return keys_to_remove
Get strings in list which contains substring.
26,693
def dist_mlipns(src, tar, threshold=0.25, max_mismatches=2): return MLIPNS().dist(src, tar, threshold, max_mismatches)
Return the MLIPNS distance between two strings. This is a wrapper for :py:meth:`MLIPNS.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison threshold : float A number [0, 1] indicating the maximum similarity score, below which the strings are considered 'similar' (0.25 by default) max_mismatches : int A number indicating the allowable number of mismatches to remove before declaring two strings not similar (2 by default) Returns ------- float MLIPNS distance Examples -------- >>> dist_mlipns('cat', 'hat') 0.0 >>> dist_mlipns('Niall', 'Neil') 1.0 >>> dist_mlipns('aluminum', 'Catalan') 1.0 >>> dist_mlipns('ATCG', 'TAGC') 1.0
26,694
def ISBNValidator(raw_isbn): isbn_to_check = raw_isbn.replace(, ).replace(, ) if not isinstance(isbn_to_check, string_types): raise ValidationError(_(u)) if len(isbn_to_check) != 10 and len(isbn_to_check) != 13: raise ValidationError(_(u)) if not isbn.is_valid(isbn_to_check): raise ValidationError(_(u)) if isbn_to_check != isbn_to_check.upper(): raise ValidationError(_(u)) return True
Check string is a valid ISBN number
26,695
def _generate_AES_CBC_cipher(cek, iv): backend = default_backend() algorithm = AES(cek) mode = CBC(iv) return Cipher(algorithm, mode, backend)
Generates and returns an encryption cipher for AES CBC using the given cek and iv. :param bytes[] cek: The content encryption key for the cipher. :param bytes[] iv: The initialization vector for the cipher. :return: A cipher for encrypting in AES256 CBC. :rtype: ~cryptography.hazmat.primitives.ciphers.Cipher
26,696
def uri(host=, port=5432, dbname=, user=, password=None): if port: host = % (host, port) if password: return % (user, password, host, dbname) return % (user, host, dbname)
Return a PostgreSQL connection URI for the specified values. :param str host: Host to connect to :param int port: Port to connect on :param str dbname: The database name :param str user: User to connect as :param str password: The password to use, None for no password :return str: The PostgreSQL connection URI
26,697
def __cache_point(self, index): if self.__cache_points: if self.__points is None: self.__points = [] self.__points.append(index)
! @brief Store index points. @param[in] index (uint): Index point that should be stored.
26,698
def create(self, table_id, schema): from google.cloud.bigquery import SchemaField from google.cloud.bigquery import Table if self.exists(table_id): raise TableCreationError( "Table {0} already " "exists".format(table_id) ) if not _Dataset(self.project_id, credentials=self.credentials).exists( self.dataset_id ): _Dataset( self.project_id, credentials=self.credentials, location=self.location, ).create(self.dataset_id) table_ref = self.client.dataset(self.dataset_id).table(table_id) table = Table(table_ref) for field in schema["fields"]: if "mode" not in field: field["mode"] = "NULLABLE" table.schema = [ SchemaField.from_api_repr(field) for field in schema["fields"] ] try: self.client.create_table(table) except self.http_error as ex: self.process_http_error(ex)
Create a table in Google BigQuery given a table and schema Parameters ---------- table : str Name of table to be written schema : str Use the generate_bq_schema to generate your table schema from a dataframe.
26,699
def ignore(mapping): if isinstance(mapping, Mapping): return AsDict(mapping) elif isinstance(mapping, list): return [ignore(item) for item in mapping] return mapping
Use ignore to prevent a mapping from being mapped to a namedtuple.