Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
9,900 | def _call_pyfftw(self, x, out, **kwargs):
kwargs.pop(, None)
kwargs.pop(, None)
kwargs.pop(, None)
if self.halfcomplex:
preproc = self._preprocess(x)
assert is_real_dtype(preproc.dtype)
else:
preproc = self._preprocess(x, out=out)
assert is_complex_floating_dtype(preproc.dtype)
self._fftw_plan = pyfftw_call(
preproc, out, direction=direction, halfcomplex=self.halfcomplex,
axes=self.axes, normalise_idft=False, **kwargs)
assert is_complex_floating_dtype(out.dtype)
out = self._postprocess(out, out=out)
assert is_complex_floating_dtype(out.dtype)
return out | Implement ``self(x[, out, **kwargs])`` for pyfftw back-end.
Parameters
----------
x : `numpy.ndarray`
Array representing the function to be transformed
out : `numpy.ndarray`
Array to which the output is written
planning_effort : {'estimate', 'measure', 'patient', 'exhaustive'}
Flag for the amount of effort put into finding an optimal
FFTW plan. See the `FFTW doc on planner flags
<http://www.fftw.org/fftw3_doc/Planner-Flags.html>`_.
planning_timelimit : float or ``None``, optional
Limit planning time to roughly this many seconds.
Default: ``None`` (no limit)
threads : int, optional
Number of threads to use. Default: 1
Returns
-------
out : `numpy.ndarray`
Result of the transform. The returned object is a reference
to the input parameter ``out``. |
9,901 | def get_full_pipe(sol, base=()):
pipe, i = DspPipe(), len(base)
for p in sol._pipe:
n, s = p[-1]
d = s.dsp
p = {: p}
if n in s._errors:
p[] = s._errors[n]
node_id = s.full_name + (n,)
assert base == node_id[:i], % (node_id[:i], base)
n_id = node_id[i:]
n, path = d.get_node(n, node_attr=None)
if n[] == and in n:
try:
sub_sol = s.workflow.node[path[-1]][]
sp = get_full_pipe(sub_sol, base=node_id)
if sp:
p[] = sp
except KeyError:
pass
pipe[bypass(*n_id)] = p
return pipe | Returns the full pipe of a dispatch run.
:param sol:
A Solution object.
:type sol: schedula.utils.Solution
:param base:
Base node id.
:type base: tuple[str]
:return:
Full pipe of a dispatch run.
:rtype: DspPipe |
9,902 | def get_upstream_paths(self, port):
base_paths = {
: self.REPLAY_API % port,
: self.CDX_API % port,
}
if self.recorder_path:
base_paths[] = self.recorder_path
return base_paths | Retrieve a dictionary containing the full URLs of the upstream apps
:param int port: The port used by the replay and cdx servers
:return: A dictionary containing the upstream paths (replay, cdx-server, record [if enabled])
:rtype: dict[str, str] |
9,903 | def _cromwell_debug(metadata):
def get_failed_calls(cur, key=None):
if key is None: key = []
out = []
if isinstance(cur, dict) and "failures" in cur and "callRoot" in cur:
out.append((key, cur))
elif isinstance(cur, dict):
for k, v in cur.items():
out.extend(get_failed_calls(v, key + [k]))
elif isinstance(cur, (list, tuple)):
for i, v in enumerate(cur):
out.extend(get_failed_calls(v, key + [i]))
return out
print("Failed bcbio Cromwell run")
print("-------------------------")
for fail_k, fail_call in get_failed_calls(metadata["calls"]):
root_dir = os.path.join("cromwell_work", os.path.relpath(fail_call["callRoot"]))
print("Failure in step: %s" % ".".join([str(x) for x in fail_k]))
print(" bcbio log file : %s" % os.path.join(root_dir, "execution", "log", "bcbio-nextgen-debug.log"))
print(" bcbio commands file: %s" % os.path.join(root_dir, "execution", "log",
"bcbio-nextgen-commands.log"))
print(" Cromwell directory : %s" % root_dir)
print() | Format Cromwell failures to make debugging easier. |
9,904 | def credentials_loader(self, in_credentials: str = "client_secrets.json") -> dict:
accepted_extensions = (".ini", ".json")
if not path.isfile(in_credentials):
raise IOError("Credentials file doesninstalled'
auth_settings = in_auth.get("installed")
out_auth = {
"auth_mode": "user",
"client_id": auth_settings.get("client_id"),
"client_secret": auth_settings.get("client_secret"),
"scopes": auth_settings.get("scopes", ["resources:read"]),
"uri_auth": auth_settings.get("auth_uri"),
"uri_token": auth_settings.get("token_uri"),
"uri_base": self.get_url_base_from_url_token(
auth_settings.get("token_uri")
),
"uri_redirect": auth_settings.get("redirect_uris", None),
}
else:
ini_parser = ConfigParser()
ini_parser.read(in_credentials)
if "auth" in ini_parser._sections:
auth_settings = ini_parser["auth"]
else:
raise ValueError(
"Input INI structure is not as expected."
" Section of credentials must be named: auth"
)
out_auth = {
"auth_mode": auth_settings.get("CLIENT_TYPE"),
"client_id": auth_settings.get("CLIENT_ID"),
"client_secret": auth_settings.get("CLIENT_SECRET"),
"uri_auth": auth_settings.get("URI_AUTH"),
"uri_token": auth_settings.get("URI_TOKEN"),
"uri_base": self.get_url_base_from_url_token(
auth_settings.get("URI_TOKEN")
),
"uri_redirect": auth_settings.get("URI_REDIRECT"),
}
return out_auth | Loads API credentials from a file, JSON or INI.
:param str in_credentials: path to the credentials file. By default,
look for a client_secrets.json file. |
9,905 | def addItem(self, item, message=None):
if message is None:
message = % item.path
try:
v = Version.new(repo=self)
v.addItem(item)
v.save(message)
except VersionError, e:
raise RepoError(e) | add a new Item class object |
9,906 | def elixir_decode(elixir_filename):
import re, pyfits
parts_RE=re.compile(r)
dataset_name = parts_RE.findall(elixir_filename)
if not dataset_name or len(dataset_name)<5 :
raise ValueError(
% elixir_filename )
comments={: ,
: ,
: ,
: ,
: ,
:
}
keywords={}
keywords[]=elixir_filename
keywords[]=dataset_name[0]
keywords[]=dataset_name[1]
keywords[]=None
keywords[]=None
if re.match(r,dataset_name[2]):
keyword[]=int(dataset_name[2])
else:
keyword[]=dataset_name[2]
keywords[]=dataset_name[3]
keywords[]=dataset_name[4]
header=pyfits.Header()
for keyword in keywords.keys():
if keywords[keyword]:
header.update(keyword,keywords[keyword],comment=comment[keyword])
return header | Takes an elixir style file name and decodes it's content.
Values returned as a dictionary. Elixir filenames have the format
RUNID.TYPE.FILTER/EXPTIME.CHIPID.VERSION.fits |
9,907 | def wrap(self, stream, name=None, filename=None):
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == :
token =
elif token == :
token =
elif token in (, ):
continue
elif token == :
value = self._normalize_newlines(value)
elif token == :
token = value
elif token == :
value = str(value)
if check_ident and not value.isidentifier():
raise TemplateSyntaxError(
,
lineno, name, filename)
elif token == :
try:
value = self._normalize_newlines(value[1:-1]) \
.encode(, ) \
.decode()
except Exception as e:
msg = str(e).split()[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
elif token == :
value = int(value)
elif token == :
value = float(value)
elif token == :
token = operators[value]
yield Token(lineno, token, value) | This is called with the stream as returned by `tokenize` and wraps
every token in a :class:`Token` and converts the value. |
9,908 | def print_http_nfc_lease_info(info):
print \
.format(info)
device_number = 1
if info.deviceUrl:
for device_url in info.deviceUrl:
print \
\
\
\
\
.format(device_url,
device_number)
if not device_url.targetId:
print "No targetId found for this device"
print "Device is not eligible for export. This could be a mounted iso or img of some sort"
print "It will NOT be downloaded\n"
device_number += 1
else:
print | Prints information about the lease,
such as the entity covered by the lease,
and HTTP URLs for up/downloading file backings.
:param info:
:type info: vim.HttpNfcLease.Info
:return: |
9,909 | def _brace_key(self, key):
if isinstance(key, six.integer_types):
t = str
key = t(key)
else:
t = type(key)
return t(u) + key + t(u) | key: 'x' -> '{x}' |
9,910 | def clean(cls, path):
for pth in os.listdir(path):
pth = os.path.abspath(os.path.join(path, pth))
if os.path.isdir(pth):
logger.debug( % pth)
shutil.rmtree(pth)
else:
logger.debug( % pth)
os.remove(pth) | Clean up all the files in a provided path |
9,911 | def _apply_sub_frames(cls, documents, subs):
for path, projection in subs.items():
child_document = document
keys = cls._path_to_keys(path)
for key in keys[:-1]:
child_document = child_document[key]
child_document[keys[-1]] = value
if projection:
sub._apply_projection(raw_subs, projection) | Convert embedded documents to sub-frames for one or more documents |
9,912 | def _getKeyForUrl(url, existing=None):
(keyName, bucketName))
elif existing is None:
pass
else:
assert False
if key is None:
key = bucket.new_key(keyName)
except:
with panic():
s3.close()
else:
return key | Extracts a key from a given s3:// URL. On return, but not on exceptions, this method
leaks an S3Connection object. The caller is responsible to close that by calling
key.bucket.connection.close().
:param bool existing: If True, key is expected to exist. If False, key is expected not to
exists and it will be created. If None, the key will be created if it doesn't exist.
:rtype: Key |
9,913 | def get_client_address(self):
return "amqps://{}:{}@{}.{}:5671/{}".format(
urllib.parse.quote_plus(self.policy),
urllib.parse.quote_plus(self.sas_key),
self.sb_name,
self.namespace_suffix,
self.eh_name) | Returns an auth token dictionary for making calls to eventhub
REST API.
:rtype: str |
9,914 | def sql_program_name_func(command):
args = command.split()
for prog in args:
if not in prog:
return prog
return args[0] | Extract program name from `command`.
>>> sql_program_name_func('ls')
'ls'
>>> sql_program_name_func('git status')
'git'
>>> sql_program_name_func('EMACS=emacs make')
'make'
:type command: str |
9,915 | def get_replacement_method(method_to_patch, side_effect=UNDEFINED, rvalue=UNDEFINED, ignore=UNDEFINED, callback=UNDEFINED, context=UNDEFINED, subsequent_rvalue=UNDEFINED):
def patch_with(*args, **kwargs):
if side_effect != UNDEFINED:
return execute_side_effect(side_effect, args, kwargs)
if rvalue != UNDEFINED:
return rvalue
return cache(method_to_patch, args=args, kwargs=kwargs, ignore=ignore, call_stack=context.stack, callback=callback, subsequent_rvalue=subsequent_rvalue)
return patch_with | Returns the method to be applied in place of an original method. This method either executes a side effect, returns an rvalue, or implements caching in place of the method_to_patch
:param function method_to_patch: A reference to the method that will be patched.
:param mixed side_effect: The side effect to execute. Either a callable with the same parameters as the target, or an exception.
:param mixed rvalue: The value that should be immediately returned without executing the target.
:param caliendo.Ignore ignore: The parameters that should be ignored when determining cachekeys. These are typically the dynamic values such as datetime.datetime.now() or a setting from an environment specific file.
:param function callback: A pickleable callback to execute when the patched method is called and the cache is hit. (has to have been cached the first time).
:param caliendo.hooks.Context ctxt: The context this patch should be executed under. Generally reserved for internal use. The vast majority of use cases should leave this parameter alone.
:param mixed subsequent_rvalue: If passed; this will be the return value each time this method is run regardless of what is returned when it is initially cached. Caching for this method will be skipped. This is useful when the method returns something unpickleable but we still need to stub it out.
:rtype: function
:returns: The function to replace all references to method_to_patch with. |
9,916 | def get_port(self, id_or_uri, port_id_or_uri):
uri = self._client.build_subresource_uri(id_or_uri, port_id_or_uri, "ports")
return self._client.get(uri) | Gets an interconnect port.
Args:
id_or_uri: Can be either the interconnect id or uri.
port_id_or_uri: The interconnect port id or uri.
Returns:
dict: The interconnect port. |
9,917 | def list():
infos = manager.get_all()
if not infos:
print("No known TensorBoard instances running.")
return
print("Known TensorBoard instances:")
for info in infos:
template = " - port {port}: {data_source} (started {delta} ago; pid {pid})"
print(template.format(
port=info.port,
data_source=manager.data_source_from_info(info),
delta=_time_delta_from_info(info),
pid=info.pid,
)) | Print a listing of known running TensorBoard instances.
TensorBoard instances that were killed uncleanly (e.g., with SIGKILL
or SIGQUIT) may appear in this list even if they are no longer
running. Conversely, this list may be missing some entries if your
operating system's temporary directory has been cleared since a
still-running TensorBoard instance started. |
9,918 | def make_config_file(guided=False):
config_path = _make_config_location(guided=guided)
config_data = make_config_data(guided=guided)
write_config_file(config_path, config_data) | Options: --auto, --guided, --manual
Places for the file: --inplace, --user |
9,919 | def draw(self):
self.screen.clear()
x, y = 1, 1
max_y, max_x = self.screen.getmaxyx()
max_rows = max_y - y
lines, current_line = self.get_lines()
scroll_top = getattr(self, , 0)
if current_line <= scroll_top:
scroll_top = 0
elif current_line - scroll_top > max_rows:
scroll_top = current_line - max_rows
self.scroll_top = scroll_top
lines_to_draw = lines[scroll_top:scroll_top+max_rows]
for line in lines_to_draw:
if type(line) is tuple:
self.screen.addnstr(y, x, line[0], max_x-2, line[1])
else:
self.screen.addnstr(y, x, line, max_x-2)
y += 1
self.screen.refresh() | draw the curses ui on the screen, handle scroll if needed |
9,920 | def saveJSON(g, data, backup=False):
if not backup:
fname = filedialog.asksaveasfilename(
defaultextension=,
filetypes=[(, ), ],
initialdir=g.cpars[]
)
else:
fname = os.path.join(os.path.expanduser(), )
if not fname:
g.clog.warn()
return False
with open(fname, ) as of:
of.write(
json.dumps(data, sort_keys=True, indent=4,
separators=(, ))
)
g.clog.info( + fname)
return True | Saves the current setup to disk.
g : hcam_drivers.globals.Container
Container with globals
data : dict
The current setup in JSON compatible dictionary format.
backup : bool
If we are saving a backup on close, don't prompt for filename |
9,921 | def addvFunc(self,solution,EndOfPrdvP):
self.makeEndOfPrdvFunc(EndOfPrdvP)
solution.vFunc = self.makevFunc(solution)
return solution | Creates the value function for this period and adds it to the solution.
Parameters
----------
solution : ConsumerSolution
The solution to this single period problem, likely including the
consumption function, marginal value function, etc.
EndOfPrdvP : np.array
Array of end-of-period marginal value of assets corresponding to the
asset values in self.aNrmNow.
Returns
-------
solution : ConsumerSolution
The single period solution passed as an input, but now with the
value function (defined over market resources m) as an attribute. |
9,922 | def build_kal_scan_channel_string(kal_bin, channel, args):
option_mapping = {"gain": "-g",
"device": "-d",
"error": "-e"}
base_string = "%s -v -c %s" % (kal_bin, channel)
base_string += options_string_builder(option_mapping, args)
return(base_string) | Return string for CLI invocation of kal, for channel scan. |
9,923 | def previous_song(self):
if self.current_song is None:
return self._get_good_song(base=-1, direction=-1)
if self.playback_mode == PlaybackMode.random:
previous_song = self._get_good_song(direction=-1)
else:
current_index = self._songs.index(self.current_song)
previous_song = self._get_good_song(base=current_index - 1, direction=-1)
return previous_song | previous song for player to play
NOTE: not the last played song |
9,924 | def finalize(self):
self.global_scope.close()
name_generator = NameGenerator(skip=self.reserved_keywords)
self.global_scope.build_remap_symbols(
name_generator,
children_only=not self.obfuscate_globals,
) | Finalize the run - build the name generator and use it to build
the remap symbol tables. |
9,925 | def _run_get_data_background(macs, queue, shared_data, bt_device):
run_flag = RunFlag()
def add_data(data):
if not shared_data[]:
run_flag.running = False
data[1][] = datetime.utcnow().isoformat()
queue.put(data)
RuuviTagSensor.get_datas(add_data, macs, run_flag, bt_device) | Background process function for RuuviTag Sensors |
9,926 | def get_arctic_lib(connection_string, **kwargs):
m = CONNECTION_STR.match(connection_string)
if not m:
raise ValueError("connection string incorrectly formed: %s" % connection_string)
library, host = m.group(1), m.group(2)
return _get_arctic(host, **kwargs)[library] | Returns a mongo library for the given connection string
Parameters
---------
connection_string: `str`
Format must be one of the following:
library@trading for known mongo servers
library@hostname:port
Returns:
--------
Arctic library |
9,927 | def filter(self, **kwargs):
from sqlalchemy import or_
Statement = self.get_model()
Tag = self.get_model()
session = self.Session()
page_size = kwargs.pop(, 1000)
order_by = kwargs.pop(, None)
tags = kwargs.pop(, [])
exclude_text = kwargs.pop(, None)
exclude_text_words = kwargs.pop(, [])
persona_not_startswith = kwargs.pop(, None)
search_text_contains = kwargs.pop(, None)
if type(tags) == str:
tags = [tags]
if len(kwargs) == 0:
statements = session.query(Statement).filter()
else:
statements = session.query(Statement).filter_by(**kwargs)
if tags:
statements = statements.join(Statement.tags).filter(
Tag.name.in_(tags)
)
if exclude_text:
statements = statements.filter(
~Statement.text.in_(exclude_text)
)
if exclude_text_words:
or_word_query = [
Statement.text.ilike( + word + ) for word in exclude_text_words
]
statements = statements.filter(
~or_(*or_word_query)
)
if persona_not_startswith:
statements = statements.filter(
~Statement.persona.startswith()
)
if search_text_contains:
or_query = [
Statement.search_text.contains(word) for word in search_text_contains.split()
]
statements = statements.filter(
or_(*or_query)
)
if order_by:
if in order_by:
index = order_by.index()
order_by[index] = Statement.created_at.asc()
statements = statements.order_by(*order_by)
total_statements = statements.count()
for start_index in range(0, total_statements, page_size):
for statement in statements.slice(start_index, start_index + page_size):
yield self.model_to_object(statement)
session.close() | Returns a list of objects from the database.
The kwargs parameter can contain any number
of attributes. Only objects which contain all
listed attributes and in which all values match
for all listed attributes will be returned. |
9,928 | def print_chain_summary(self, stream=sys.stdout, indent=""):
stream.write("%sTotal files : %i\n" %
(indent, len(self.file_dict)))
stream.write("%s Input files : %i\n" %
(indent, len(self.chain_input_files)))
stream.write("%s Output files : %i\n" %
(indent, len(self.chain_output_files)))
stream.write("%s Internal files : %i\n" %
(indent, len(self.internal_files)))
stream.write("%s Temp files : %i\n" %
(indent, len(self.temp_files))) | Print a summary of the files in this file dict.
This version uses chain_input_files and chain_output_files to
count the input and output files. |
9,929 | def id_source(source, full=False):
if source not in source_ids:
return
if full:
return source_ids[source][1]
else:
return source_ids[source][0] | Returns the name of a website-scrapping function. |
9,930 | def check_path(path, create=False):
if not os.path.exists(path):
if create:
os.makedirs(path)
return os.path.exists(path)
else:
return False
return True | Check for a path on filesystem
:param path: str - path name
:param create: bool - create if do not exist
:return: bool - path exists |
9,931 | def create_page(self, **extra_kwargs):
with translation.override(self.default_language_code):
self.default_lang_name = dict(
self.languages)[self.default_language_code]
self.slug = self.get_slug(self.default_language_code,
self.default_lang_name)
assert self.slug != ""
page = None
parent = self.get_parent_page()
if parent is not None:
assert parent.publisher_is_draft == True, "Parent page must be a draft!" % parent
if self.delete_first:
if self.apphook_namespace is not None:
pages = Page.objects.filter(
application_namespace=self.apphook_namespace,
parent=parent,
)
else:
pages = Page.objects.filter(
title_set__slug=self.slug,
parent=parent,
)
log.debug("Delete %i pages...", pages.count())
pages.delete()
else:
if self.apphook_namespace is not None:
queryset = Page.objects.drafts()
queryset = queryset.filter(parent=parent)
try:
page = queryset.get(
application_namespace=self.apphook_namespace)
except Page.DoesNotExist:
pass
else:
log.debug("Use existing page: %s", page)
created = False
return page, created
else:
queryset = Title.objects.filter(
language=self.default_language_code)
queryset = queryset.filter(page__parent=parent)
try:
title = queryset.filter(slug=self.slug).first()
except Title.DoesNotExist:
pass
else:
if title is not None:
log.debug("Use page from title with slug %r",
self.slug)
page = title.page
created = False
if page is None:
with translation.override(self.default_language_code):
page = create_page(
title=self.get_title(self.default_language_code,
self.default_lang_name),
menu_title=self.get_menu_title(self.default_language_code,
self.default_lang_name),
template=self.get_template(self.default_language_code,
self.default_lang_name),
language=self.default_language_code,
slug=self.slug,
published=False,
parent=parent,
in_navigation=self.in_navigation,
apphook=self.apphook,
apphook_namespace=self.apphook_namespace,
**extra_kwargs)
created = True
log.debug("Page created in %s: %s", self.default_lang_name,
page)
assert page.publisher_is_draft == True
return page, created | Create page (and page title) in default language
extra_kwargs will be pass to cms.api.create_page()
e.g.:
extra_kwargs={
"soft_root": True,
"reverse_id": my_reverse_id,
} |
9,932 | def rm_watch(self, wd, rec=False, quiet=True):
lwd = self.__format_param(wd)
if rec:
lwd = self.__get_sub_rec(lwd)
ret_ = {}
for awd in lwd:
wd_ = self._inotify_wrapper.inotify_rm_watch(self._fd, awd)
if wd_ < 0:
ret_[awd] = False
err = ( % \
(awd, self._inotify_wrapper.str_errno()))
if quiet:
log.error(err)
continue
raise WatchManagerError(err, ret_)
if awd in self._wmd:
del self._wmd[awd]
ret_[awd] = True
log.debug(, awd, self.get_path(awd))
return ret_ | Removes watch(s).
@param wd: Watch Descriptor of the file or directory to unwatch.
Also accepts a list of WDs.
@type wd: int or list of int.
@param rec: Recursively removes watches on every already watched
subdirectories and subfiles.
@type rec: bool
@param quiet: If False raises a WatchManagerError exception on
error. See example not_quiet.py
@type quiet: bool
@return: dict of watch descriptors associated to booleans values.
True if the corresponding wd has been successfully
removed, False otherwise.
@rtype: dict of {int: bool} |
9,933 | def updateData(self, axeskey, x, y):
if axeskey == :
self.stimPlot.setData(x,y)
ranges = self.viewRange()
self.rangeChange(self, ranges)
if axeskey == :
self.clearTraces()
if self._traceUnit == :
y = y * self._ampScalar
if self.zeroAction.isChecked():
start_avg = np.mean(y[5:25])
y = y - start_avg
self.tracePlot.setData(x,y*self._polarity) | Replaces the currently displayed data
:param axeskey: name of data plot to update. Valid options are 'stim' or 'response'
:type axeskey: str
:param x: index values associated with y to plot
:type x: numpy.ndarray
:param y: values to plot at x
:type y: numpy.ndarray |
9,934 | def _get_timestamp_tuple(ts):
if isinstance(ts, datetime.datetime):
return Timestamp.from_datetime(ts).tuple()
elif isinstance(ts, Timestamp):
return ts
raise TypeError() | Internal method to get a timestamp tuple from a value.
Handles input being a datetime or a Timestamp. |
9,935 | def _process_uniprot_ids(self, limit=None):
LOG.info("Processing UniProt IDs")
if self.test_mode:
graph = self.testgraph
else:
graph = self.graph
line_counter = 0
model = Model(graph)
geno = Genotype(graph)
raw = .join((self.rawdir, self.files[][]))
with open(raw, , encoding="iso-8859-1") as csvfile:
filereader = csv.reader(csvfile, delimiter=, quotechar=)
for row in filereader:
line_counter += 1
(gene_id, gene_so_id, gene_symbol, uniprot_id
) = row
if self.test_mode and gene_id not in self.test_ids[]:
continue
gene_id = + gene_id.strip()
uniprot_id = + uniprot_id.strip()
geno.addGene(gene_id, gene_symbol)
model.addIndividualToGraph(
uniprot_id, None, self.globaltt[])
graph.addTriple(
gene_id, self.globaltt[], uniprot_id)
if not self.test_mode and limit is not None and line_counter > limit:
break
LOG.info("Done with UniProt IDs")
return | This method processes the mappings from ZFIN gene IDs to UniProtKB IDs.
Triples created:
<zfin_gene_id> a class
<zfin_gene_id> rdfs:label gene_symbol
<uniprot_id> is an Individual
<uniprot_id> has type <polypeptide>
<zfin_gene_id> has_gene_product <uniprot_id>
:param limit:
:return: |
9,936 | def get_net(req):
try:
nxt, prev = map(
int, (req.GET.get(, 0), req.GET.get(, 0))
)
net = nxt - prev
except Exception:
net = 0
return net | Get the net of any 'next' and 'prev' querystrings. |
9,937 | def calcAspectRatioFromCorners(corners, in_plane=False):
q = corners
l0 = [q[0, 0], q[0, 1], q[1, 0], q[1, 1]]
l1 = [q[0, 0], q[0, 1], q[-1, 0], q[-1, 1]]
l2 = [q[2, 0], q[2, 1], q[3, 0], q[3, 1]]
l3 = [q[2, 0], q[2, 1], q[1, 0], q[1, 1]]
a1 = line.length(l0) / line.length(l1)
a2 = line.length(l2) / line.length(l3)
if in_plane:
if (abs(0.5 * np.pi - abs(line.angle2(l0, l1)))
< abs(0.5 * np.pi - abs(line.angle2(l2, l3)))):
return a1
else:
return a2
return 0.5 * (a1 + a2) | simple and better alg. than below
in_plane -> whether object has no tilt, but only rotation and translation |
9,938 | def validate_sceneInfo(self):
if self.sceneInfo.prefix not in self.__satellitesMap:
logger.error(
% (self.sceneInfo.name, self.sceneInfo.prefix))
raise WrongSceneNameError(
% (self.sceneInfo.name, self.sceneInfo.prefix)) | Check scene name and whether remote file exists. Raises
WrongSceneNameError if the scene name is wrong. |
9,939 | def in_scope(self, exclude_scopes=None, include_scopes=None):
if include_scopes is not None and not isinstance(include_scopes, Scope):
raise ValueError(.format(
type(include_scopes)
))
if exclude_scopes is not None and not isinstance(exclude_scopes, Scope):
raise ValueError(.format(
type(exclude_scopes)
))
if exclude_scopes and any(s in exclude_scopes for s in self):
return False
if include_scopes and not any(s in include_scopes for s in self):
return False
return True | Whether this scope should be included by the given inclusion and exclusion rules.
:param Scope exclude_scopes: An optional Scope containing scope names to exclude. None (the
default value) indicates that no filtering should be done based on exclude_scopes.
:param Scope include_scopes: An optional Scope containing scope names to include. None (the
default value) indicates that no filtering should be done based on include_scopes.
:return: True if none of the input scopes are in `exclude_scopes`, and either (a) no include
scopes are provided, or (b) at least one input scope is included in the `include_scopes` list.
:rtype: bool |
9,940 | def current(self):
if not has_request_context():
return self.no_req_ctx_user_stack.top
user_stack = getattr(_request_ctx_stack.top, , None)
if user_stack and user_stack.top:
return user_stack.top
return _get_user() | Returns the current user |
9,941 | def active_thresholds_value_maps(keywords, exposure_key):
if in keywords:
if keywords[] == layer_mode_continuous[]:
return keywords[]
else:
return keywords[]
if keywords[] == layer_mode_continuous[]:
classifications = keywords[].get(exposure_key)
else:
classifications = keywords[].get(exposure_key)
if classifications is None:
return None
for value in list(classifications.values()):
if value[]:
return value[]
return None | Helper to retrieve active value maps or thresholds for an exposure.
:param keywords: Hazard layer keywords.
:type keywords: dict
:param exposure_key: The exposure key.
:type exposure_key: str
:returns: Active thresholds or value maps.
:rtype: dict |
9,942 | def extract_audioclip_samples(d) -> dict:
ret = {}
if not d.data:
return {}
try:
from fsb5 import FSB5
except ImportError as e:
raise RuntimeError("python-fsb5 is required to extract AudioClip")
af = FSB5(d.data)
for i, sample in enumerate(af.samples):
if i > 0:
filename = "%s-%i.%s" % (d.name, i, af.get_sample_extension())
else:
filename = "%s.%s" % (d.name, af.get_sample_extension())
try:
sample = af.rebuild_sample(sample)
except ValueError as e:
print("WARNING: Could not extract %r (%s)" % (d, e))
continue
ret[filename] = sample
return ret | Extract all the sample data from an AudioClip and
convert it from FSB5 if needed. |
9,943 | def iter_variants(self):
for idx, row in self.bim.iterrows():
yield Variant(
row.name, CHROM_INT_TO_STR[row.chrom], row.pos,
[row.a1, row.a2]
) | Iterate over marker information. |
9,944 | def add_contig_to_header(line, ref_file):
if line.startswith("
out = [line]
for region in ref.file_contigs(ref_file):
out.append("
return "\n".join(out)
else:
return line | Streaming target to add contigs to a VCF file header. |
9,945 | def ecg_hrv(rpeaks=None, rri=None, sampling_rate=1000, hrv_features=["time", "frequency", "nonlinear"]):
if rpeaks is None and rri is None:
raise ValueError("Either rpeaks or RRIs needs to be given.")
if rpeaks is not None and rri is not None:
raise ValueError("Either rpeaks or RRIs should be given but not both.")
hrv = {}
if rpeaks is not None:
RRis = np.diff(rpeaks)
else:
RRis = rri
RRis = RRis/sampling_rate
RRis = RRis.astype(float)
for index, rr in enumerate(RRis):
if RRis[index] < RRis[index-1]*0.75:
RRis[index] = np.nan
if RRis[index] > RRis[index-1]*1.25:
RRis[index] = np.nan
RRis = pd.Series(RRis)
RRis[RRis < 0.6] = np.nan
RRis[RRis > 1.3] = np.nan
if len(RRis) <= 1:
print("NeuroKit Warning: ecg_hrv(): Not enough R peaks to compute HRV :/")
return(hrv)
hrv["n_Artifacts"] = pd.isnull(RRis).sum()/len(RRis)
artifacts_indices = RRis.index[RRis.isnull()]
RRis = RRis.drop(artifacts_indices)
RRis = RRis*1000
hrv["RR_Intervals"] = RRis
if len(RRis) <= 1:
print("NeuroKit Warning: ecg_hrv(): Not enough normal R peaks to compute HRV :/")
return(hrv)
if "time" in hrv_features:
hrv["RMSSD"] = np.sqrt(np.mean(np.diff(RRis) ** 2))
hrv["meanNN"] = np.mean(RRis)
hrv["sdNN"] = np.std(RRis, ddof=1)
hrv["cvNN"] = hrv["sdNN"] / hrv["meanNN"]
hrv["CVSD"] = hrv["RMSSD"] / hrv["meanNN"]
hrv["medianNN"] = np.median(abs(RRis))
hrv["madNN"] = mad(RRis, constant=1)
hrv["mcvNN"] = hrv["madNN"] / hrv["medianNN"]
nn50 = sum(abs(np.diff(RRis)) > 50)
nn20 = sum(abs(np.diff(RRis)) > 20)
hrv["pNN50"] = nn50 / len(RRis) * 100
hrv["pNN20"] = nn20 / len(RRis) * 100
if "frequency" in hrv_features:
beats_times = rpeaks[1:].copy()
beats_times -= list(beats_times)[0]
beats_times = np.delete(list(beats_times), artifacts_indices)
try:
RRi = interpolate(RRis, beats_times, sampling_rate)
except TypeError:
print("NeuroKit Warning: ecg_hrv(): Sequence too short to compute interpolation. Will skip many features.")
return(hrv)
hrv["df"] = RRi.to_frame("ECG_RR_Interval")
try:
bin_number = 32
for bin_number_current in range(2, 50):
bin_width = np.diff(np.histogram(RRi, bins=bin_number_current, density=True)[1])[0]
if abs(8 - bin_width) < abs(8 - np.diff(np.histogram(RRi, bins=bin_number, density=True)[1])[0]):
bin_number = bin_number_current
hrv["Triang"] = len(RRis)/np.max(np.histogram(RRi, bins=bin_number, density=True)[0])
hrv["Shannon_h"] = complexity_entropy_shannon(np.histogram(RRi, bins=bin_number, density=True)[0])
except ValueError:
hrv["Triang"] = np.nan
hrv["Shannon_h"] = np.nan
freq_bands = {
"ULF": [0.0001, 0.0033],
"VLF": [0.0033, 0.04],
"LF": [0.04, 0.15],
"HF": [0.15, 0.40],
"VHF": [0.4, 0.5]}
freq_powers = {}
for band in freq_bands:
freqs = freq_bands[band]
filtered, sampling_rate, params = biosppy.signals.tools.filter_signal(signal=RRi, ftype=, band=, order=1, frequency=freqs, sampling_rate=sampling_rate)
amplitude, phase = biosppy.signals.tools.analytic_signal(filtered)
freq_powers["ECG_HRV_" + band] = amplitude
freq_powers = pd.DataFrame.from_dict(freq_powers)
freq_powers.index = hrv["df"].index
hrv["df"] = pd.concat([hrv["df"], freq_powers], axis=1)
power, freq = mne.time_frequency.psd_array_multitaper(RRi, sfreq=sampling_rate, fmin=0, fmax=0.5, adaptive=False, normalization=)
def power_in_band(power, freq, band):
power = np.trapz(y=power[(freq >= band[0]) & (freq < band[1])], x=freq[(freq >= band[0]) & (freq < band[1])])
return(power)
hrv["ULF"] = power_in_band(power, freq, freq_bands["ULF"])
hrv["VLF"] = power_in_band(power, freq, freq_bands["VLF"])
hrv["LF"] = power_in_band(power, freq, freq_bands["LF"])
hrv["HF"] = power_in_band(power, freq, freq_bands["HF"])
hrv["VHF"] = power_in_band(power, freq, freq_bands["VHF"])
hrv["Total_Power"] = power_in_band(power, freq, [0, 0.5])
hrv["LFn"] = hrv["LF"]/(hrv["LF"]+hrv["HF"])
hrv["HFn"] = hrv["HF"]/(hrv["LF"]+hrv["HF"])
hrv["LF/HF"] = hrv["LF"]/hrv["HF"]
hrv["LF/P"] = hrv["LF"]/hrv["Total_Power"]
hrv["HF/P"] = hrv["HF"]/hrv["Total_Power"]
if "nonlinear" in hrv_features:
if len(RRis) > 17:
hrv["DFA_1"] = nolds.dfa(RRis, range(4, 17))
if len(RRis) > 66:
hrv["DFA_2"] = nolds.dfa(RRis, range(16, 66))
hrv["Shannon"] = complexity_entropy_shannon(RRis)
hrv["Sample_Entropy"] = nolds.sampen(RRis, emb_dim=2)
try:
hrv["Correlation_Dimension"] = nolds.corr_dim(RRis, emb_dim=2)
except AssertionError as error:
print("NeuroKit Warning: ecg_hrv(): Correlation Dimension. Error: " + str(error))
hrv["Correlation_Dimension"] = np.nan
mse = complexity_entropy_multiscale(RRis, max_scale_factor=20, m=2)
hrv["Entropy_Multiscale_AUC"] = mse["MSE_AUC"]
hrv["Entropy_SVD"] = complexity_entropy_svd(RRis, emb_dim=2)
hrv["Entropy_Spectral_VLF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.0033, 0.04, 0.001))
hrv["Entropy_Spectral_LF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.04, 0.15, 0.001))
hrv["Entropy_Spectral_HF"] = complexity_entropy_spectral(RRis, sampling_rate, bands=np.arange(0.15, 0.40, 0.001))
hrv["Fisher_Info"] = complexity_fisher_info(RRis, tau=1, emb_dim=2)
hrv["FD_Petrosian"] = complexity_fd_petrosian(RRis)
hrv["FD_Higushi"] = complexity_fd_higushi(RRis, k_max=16)
return(hrv) | Computes the Heart-Rate Variability (HRV). Shamelessly stolen from the `hrv <https://github.com/rhenanbartels/hrv/blob/develop/hrv>`_ package by Rhenan Bartels. All credits go to him.
Parameters
----------
rpeaks : list or ndarray
R-peak location indices.
rri: list or ndarray
RR intervals in the signal. If this argument is passed, rpeaks should not be passed.
sampling_rate : int
Sampling rate (samples/second).
hrv_features : list
What HRV indices to compute. Any or all of 'time', 'frequency' or 'nonlinear'.
Returns
----------
hrv : dict
Contains hrv features and percentage of detected artifacts.
Example
----------
>>> import neurokit as nk
>>> sampling_rate = 1000
>>> hrv = nk.bio_ecg.ecg_hrv(rpeaks=rpeaks, sampling_rate=sampling_rate)
Notes
----------
*Details*
- **HRV**: Heart-Rate Variability (HRV) is a finely tuned measure of heart-brain communication, as well as a strong predictor of morbidity and death (Zohar et al., 2013). It describes the complex variation of beat-to-beat intervals mainly controlled by the autonomic nervous system (ANS) through the interplay of sympathetic and parasympathetic neural activity at the sinus node. In healthy subjects, the dynamic cardiovascular control system is characterized by its ability to adapt to physiologic perturbations and changing conditions maintaining the cardiovascular homeostasis (Voss, 2015). In general, the HRV is influenced by many several factors like chemical, hormonal and neural modulations, circadian changes, exercise, emotions, posture and preload. There are several procedures to perform HRV analysis, usually classified into three categories: time domain methods, frequency domain methods and non-linear methods.
- **sdNN**: The standard deviation of the time interval between successive normal heart beats (*i.e.*, the RR intervals). Reflects all influences on HRV including slow influences across the day, circadian variations, the effect of hormonal influences such as cortisol and epinephrine. It should be noted that total variance of HRV increases with the length of the analyzed recording.
- **meanNN**: The the mean RR interval.
- **CVSD**: The coefficient of variation of successive differences (van Dellen et al., 1985), the RMSSD divided by meanNN.
- **cvNN**: The Coefficient of Variation, *i.e.* the ratio of sdNN divided by meanNN.
- **RMSSD** is the root mean square of the RR intervals (*i.e.*, square root of the mean of the squared differences in time between successive normal heart beats). Reflects high frequency (fast or parasympathetic) influences on HRV (*i.e.*, those influencing larger changes from one beat to the next).
- **medianNN**: Median of the Absolute values of the successive Differences between the RR intervals.
- **madNN**: Median Absolute Deviation (MAD) of the RR intervals.
- **mcvNN**: Median-based Coefficient of Variation, *i.e.* the ratio of madNN divided by medianNN.
- **pNN50**: The proportion derived by dividing NN50 (The number of interval differences of successive RR intervals greater than 50 ms) by the total number of RR intervals.
- **pNN20**: The proportion derived by dividing NN20 (The number of interval differences of successive RR intervals greater than 20 ms) by the total number of RR intervals.
- **Triang**: The HRV triangular index measurement is the integral of the density distribution (that is, the number of all RR intervals) divided by the maximum of the density distribution (class width of 8ms).
- **Shannon_h**: Shannon Entropy calculated on the basis of the class probabilities pi (i = 1,...,n with n—number of classes) of the NN interval density distribution (class width of 8 ms resulting in a smoothed histogram suitable for HRV analysis).
- **VLF** is the variance (*i.e.*, power) in HRV in the Very Low Frequency (.003 to .04 Hz). Reflect an intrinsic rhythm produced by the heart which is modulated by primarily by sympathetic activity.
- **LF** is the variance (*i.e.*, power) in HRV in the Low Frequency (.04 to .15 Hz). Reflects a mixture of sympathetic and parasympathetic activity, but in long-term recordings like ours, it reflects sympathetic activity and can be reduced by the beta-adrenergic antagonist propanolol (McCraty & Atkinson, 1996).
- **HF** is the variance (*i.e.*, power) in HRV in the High Frequency (.15 to .40 Hz). Reflects fast changes in beat-to-beat variability due to parasympathetic (vagal) activity. Sometimes called the respiratory band because it corresponds to HRV changes related to the respiratory cycle and can be increased by slow, deep breathing (about 6 or 7 breaths per minute) (Kawachi et al., 1995) and decreased by anticholinergic drugs or vagal blockade (Hainsworth, 1995).
- **Total_Power**: Total power of the density spectra.
- **LFHF**: The LF/HF ratio is sometimes used by some investigators as a quantitative mirror of the sympatho/vagal balance.
- **LFn**: normalized LF power LFn = LF/(LF+HF).
- **HFn**: normalized HF power HFn = HF/(LF+HF).
- **LFp**: ratio between LF and Total_Power.
- **HFp**: ratio between H and Total_Power.
- **DFA**: Detrended fluctuation analysis (DFA) introduced by Peng et al. (1995) quantifies the fractal scaling properties of time series. DFA_1 is the short-term fractal scaling exponent calculated over n = 4–16 beats, and DFA_2 is the long-term fractal scaling exponent calculated over n = 16–64 beats.
- **Shannon**: Shannon Entropy over the RR intervals array.
- **Sample_Entropy**: Sample Entropy (SampEn) over the RR intervals array with emb_dim=2.
- **Correlation_Dimension**: Correlation Dimension over the RR intervals array with emb_dim=2.
- **Entropy_Multiscale**: Multiscale Entropy over the RR intervals array with emb_dim=2.
- **Entropy_SVD**: SVD Entropy over the RR intervals array with emb_dim=2.
- **Entropy_Spectral_VLF**: Spectral Entropy over the RR intervals array in the very low frequency (0.003-0.04).
- **Entropy_Spectral_LF**: Spectral Entropy over the RR intervals array in the low frequency (0.4-0.15).
- **Entropy_Spectral_HF**: Spectral Entropy over the RR intervals array in the very high frequency (0.15-0.40).
- **Fisher_Info**: Fisher information over the RR intervals array with tau=1 and emb_dim=2.
- **Lyapunov**: Lyapunov Exponent over the RR intervals array with emb_dim=58 and matrix_dim=4.
- **FD_Petrosian**: Petrosian's Fractal Dimension over the RR intervals.
- **FD_Higushi**: Higushi's Fractal Dimension over the RR intervals array with k_max=16.
*Authors*
- `Dominique Makowski <https://dominiquemakowski.github.io/>`_
- Rhenan Bartels (https://github.com/rhenanbartels)
*Dependencies*
- scipy
- numpy
*See Also*
- RHRV: http://rhrv.r-forge.r-project.org/
References
-----------
- Heart rate variability. (1996). Standards of measurement, physiological interpretation, and clinical use. Task Force of the European Society of Cardiology and the North American Society of Pacing and Electrophysiology. Eur Heart J, 17, 354-381.
- Voss, A., Schroeder, R., Heitmann, A., Peters, A., & Perz, S. (2015). Short-term heart rate variability—influence of gender and age in healthy subjects. PloS one, 10(3), e0118308.
- Zohar, A. H., Cloninger, C. R., & McCraty, R. (2013). Personality and heart rate variability: exploring pathways from personality to cardiac coherence and health. Open Journal of Social Sciences, 1(06), 32.
- Smith, A. L., Owen, H., & Reynolds, K. J. (2013). Heart rate variability indices for very short-term (30 beat) analysis. Part 2: validation. Journal of clinical monitoring and computing, 27(5), 577-585.
- Lippman, N. E. A. L., Stein, K. M., & Lerman, B. B. (1994). Comparison of methods for removal of ectopy in measurement of heart rate variability. American Journal of Physiology-Heart and Circulatory Physiology, 267(1), H411-H418.
- Peltola, M. A. (2012). Role of editing of R–R intervals in the analysis of heart rate variability. Frontiers in physiology, 3. |
9,946 | def get_authority_key_identifier(self):
try:
ski = self.x509.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except x509.ExtensionNotFound:
return x509.AuthorityKeyIdentifier.from_issuer_public_key(self.x509.public_key())
else:
return x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ski) | Return the AuthorityKeyIdentifier extension used in certificates signed by this CA. |
9,947 | def _get_exc_info(self, exc_tuple=None):
if exc_tuple is None:
etype, value, tb = sys.exc_info()
else:
etype, value, tb = exc_tuple
if etype is None:
if hasattr(sys, ):
etype, value, tb = sys.last_type, sys.last_value, \
sys.last_traceback
if etype is None:
raise ValueError("No exception to find")
sys.last_type = etype
sys.last_value = value
sys.last_traceback = tb
return etype, value, tb | get exc_info from a given tuple, sys.exc_info() or sys.last_type etc.
Ensures sys.last_type,value,traceback hold the exc_info we found,
from whichever source.
raises ValueError if none of these contain any information |
9,948 | def get_schema(repo, content_type):
try:
with open(
os.path.join(repo.working_dir,
,
% (content_type,)), ) as fp:
data = fp.read()
return avro.schema.parse(data)
except IOError:
raise NotFound() | Return a schema for a content type in a repository.
:param Repo repo:
The git repository.
:returns: dict |
9,949 | def send_location(self, geo_uri, name, thumb_url=None, **thumb_info):
return self.client.api.send_location(self.room_id, geo_uri, name,
thumb_url, thumb_info) | Send a location to the room.
See http://matrix.org/docs/spec/client_server/r0.2.0.html#m-location
for thumb_info
Args:
geo_uri (str): The geo uri representing the location.
name (str): Description for the location.
thumb_url (str): URL to the thumbnail of the location.
thumb_info (): Metadata about the thumbnail, type ImageInfo. |
9,950 | def getprice(self):
target = self.bot.get("target", {})
if target.get("reference") == "feed":
assert self.market == self.market.core_quote_market(), "Wrong market for reference!"
ticker = self.market.ticker()
price = ticker.get("quoteSettlement_price")
assert abs(price["price"]) != float("inf"), "Check price feed of asset! (%s)" % str(price)
return price | Here we obtain the price for the quote and make sure it has
a feed price |
9,951 | def iter_instances(self):
for wrkey in set(self.keys()):
obj = self.get(wrkey)
if obj is None:
continue
yield wrkey, obj | Iterate over the stored objects
Yields:
wrkey: The two-tuple key used to store the object
obj: The instance or function object |
9,952 | def get_csv_from_metadata(dsn, d):
logger_csvs.info("enter get_csv_from_metadata")
_csvs = OrderedDict()
_d = copy.deepcopy(d)
try:
if "paleoData" in _d:
_d["paleoData"], _csvs = _get_csv_from_section(_d["paleoData"], "{}.paleo".format(dsn), _csvs)
if "chronData" in _d:
_d["chronData"], _csvs = _get_csv_from_section(_d["chronData"], "{}.chron".format(dsn), _csvs)
except Exception as e:
print("Error: get_csv_from_metadata: {}, {}".format(dsn, e))
logger_csvs.error("get_csv_from_metadata: {}, {}".format(dsn, e))
logger_csvs.info("exit get_csv_from_metadata")
return _d, _csvs | Two goals. Get all csv from metadata, and return new metadata with generated filenames to match files.
:param str dsn: Dataset name
:param dict d: Metadata
:return dict _csvs: Csv |
9,953 | def barycentric_to_cartesian(tri, bc):
bc = np.asarray(bc)
tri = np.asarray(tri)
if len(bc.shape) == 1:
return barycentric_to_cartesian(np.transpose(np.asarray([tri]), (1,2,0)),
np.asarray([bc]).T)[:,0]
bc = bc if bc.shape[0] == 2 else bc.T
if bc.shape[0] != 2: raise ValueError()
n = bc.shape[1]
(l1,l2) = bc
(p1, p2, p3) = tri
l3 = (1 - l1 - l2)
return np.asarray([x1*l1 + x2*l2 + x3*l3 for (x1,x2,x3) in zip(p1, p2, p3)]) | barycentric_to_cartesian(tri, bc) yields the d x n coordinate matrix of the given barycentric
coordinate matrix (also d x n) bc interpolated in the n triangles given in the array tri. See
also cartesian_to_barycentric. If tri and bc represent one triangle and coordinate, then just
the coordinate and not a matrix is returned. The value d, dimensions, must be 2 or 3. |
9,954 | def update_thread(cls, session, conversation, thread):
data = thread.to_api()
data[] = True
return cls(
% (
conversation.id, thread.id,
),
data=data,
request_type=RequestPaginator.PUT,
singleton=True,
session=session,
) | Update a thread.
Args:
session (requests.sessions.Session): Authenticated session.
conversation (helpscout.models.Conversation): The conversation
that the thread belongs to.
thread (helpscout.models.Thread): The thread to be updated.
Returns:
helpscout.models.Conversation: Conversation including freshly
updated thread. |
9,955 | def format_all(self):
res = + str(self.name) +
res += + str(self.parent) +
res += self._get_all_children()
res += self._get_links()
return res | return a trace of parents and children of the obect |
9,956 | def rtype_to_model(rtype):
models = goldman.config.MODELS
for model in models:
if rtype.lower() == model.RTYPE.lower():
return model
raise ValueError( % rtype) | Return a model class object given a string resource type
:param rtype:
string resource type
:return:
model class object
:raise:
ValueError |
9,957 | def get_repo(name, basedir=None, **kwargs):
***
repos = list_repos(basedir)
repofile =
for repo in repos:
if repo == name:
repofile = repos[repo][]
if repofile:
filerepos = _parse_repo_file(repofile)[1]
return filerepos[name]
return {} | Display a repo from <basedir> (default basedir: all dirs in ``reposdir``
yum option).
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo myrepo
salt '*' pkg.get_repo myrepo basedir=/path/to/dir
salt '*' pkg.get_repo myrepo basedir=/path/to/dir,/path/to/another/dir |
9,958 | def _set_collection(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=collection.collection, is_container=, presence=False, yang_name="collection", rest_name="collection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=True)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__collection = t
if hasattr(self, ):
self._set() | Setter method for collection, mapped from YANG variable /interface/fortygigabitethernet/rmon/collection (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_collection is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_collection() directly. |
9,959 | def save_cloud_optimized(self, dest_url, resampling=Resampling.gauss, blocksize=256,
overview_blocksize=256, creation_options=None):
src = self
with tempfile.NamedTemporaryFile(suffix=) as tf:
src.save(tf.name, overviews=False)
convert_to_cog(tf.name, dest_url, resampling, blocksize, overview_blocksize, creation_options)
geotiff = GeoRaster2.open(dest_url)
return geotiff | Save as Cloud Optimized GeoTiff object to a new file.
:param dest_url: path to the new raster
:param resampling: which Resampling to use on reading, default Resampling.gauss
:param blocksize: the size of the blocks default 256
:param overview_blocksize: the block size of the overviews, default 256
:param creation_options: dict, options that can override the source raster profile,
notice that you can't override tiled=True, and the blocksize
the list of creation_options can be found here https://www.gdal.org/frmt_gtiff.html
:return: new GeoRaster of the tiled object |
9,960 | def env():
ssh = cij.env_to_dict(PREFIX, REQUIRED)
if "KEY" in ssh:
ssh["KEY"] = cij.util.expand_path(ssh["KEY"])
if cij.ENV.get("SSH_PORT") is None:
cij.ENV["SSH_PORT"] = "22"
cij.warn("cij.ssh.env: SSH_PORT was not set, assigned: %r" % (
cij.ENV.get("SSH_PORT")
))
if cij.ENV.get("SSH_CMD_TIME") is None:
cij.ENV["SSH_CMD_TIME"] = "1"
cij.warn("cij.ssh.env: SSH_CMD_TIME was not set, assigned: %r" % (
cij.ENV.get("SSH_CMD_TIME")
))
return 0 | Verify SSH variables and construct exported variables |
9,961 | def cli(self, commands):
cli_output = dict()
if type(commands) is not list:
raise TypeError()
for command in commands:
output = self._send_command(command)
if in output:
raise ValueError(.format(command))
cli_output.setdefault(command, {})
cli_output[command] = output
return cli_output | Execute a list of commands and return the output in a dictionary format using the command
as the key.
Example input:
['show clock', 'show calendar']
Output example:
{ 'show calendar': u'22:02:01 UTC Thu Feb 18 2016',
'show clock': u'*22:01:51.165 UTC Thu Feb 18 2016'} |
9,962 | def run_review(*args):
email_cnt =
idx = 1
email_cnt = email_cnt +
email_cnt, idx = __get_post_review(email_cnt, idx)
email_cnt, idx = __get_page_review(email_cnt, idx)
email_cnt, idx = __get_wiki_review(email_cnt, idx)
diff_str = __get_diff_recent()
if len(diff_str) < 20000:
email_cnt = email_cnt + diff_str
email_cnt = email_cnt +
if idx > 1:
send_mail(post_emails, "{0}|{1}|{2}".format(SMTP_CFG[], , DATE_STR), email_cnt) | Get the difference of recents modification, and send the Email.
For: wiki, page, and post. |
9,963 | def update_marker(self, iid, **kwargs):
if iid not in self._markers:
raise ValueError("Unknown iid passed as argument: {}".format(iid))
self.check_kwargs(kwargs)
marker = self._markers[iid]
marker.update(kwargs)
self.delete_marker(iid)
return self.create_marker(marker["category"], marker["start"], marker["finish"], marker) | Change the options for a certain marker and redraw the marker
:param iid: identifier of the marker to change
:type iid: str
:param kwargs: Dictionary of options to update
:type kwargs: dict
:raises: ValueError |
9,964 | def failback_from_replicant(self, volume_id, replicant_id):
return self.client.call(, ,
replicant_id, id=volume_id) | Failback from a volume replicant.
:param integer volume_id: The id of the volume
:param integer replicant_id: ID of replicant to failback from
:return: Returns whether failback was successful or not |
9,965 | def make_error_response(self, cond):
if self.stanza_type == "error":
raise ValueError("Errors may not be generated in response"
" to errors")
stanza = Presence(stanza_type = "error", from_jid = self.from_jid,
to_jid = self.to_jid, stanza_id = self.stanza_id,
status = self._status, show = self._show,
priority = self._priority, error_cond = cond)
if self._payload is None:
self.decode_payload()
for payload in self._payload:
stanza.add_payload(payload)
return stanza | Create error response for the any non-error presence stanza.
:Parameters:
- `cond`: error condition name, as defined in XMPP specification.
:Types:
- `cond`: `unicode`
:return: new presence stanza.
:returntype: `Presence` |
9,966 | def get(self,
key: Text,
count: Optional[int]=None,
formatter: Formatter=None,
locale: Text=None,
params: Optional[Dict[Text, Any]]=None,
flags: Optional[Flags]=None) -> List[Text]:
if params is None:
params = {}
if count is not None:
raise TranslationError()
locale = self.choose_locale(locale)
try:
group: SentenceGroup = self.dict[locale][key]
except KeyError:
raise MissingTranslationError(
.format(key))
try:
trans = group.render(flags or {})
out = []
for line in trans:
if not formatter:
out.append(line.format(**params))
else:
out.append(formatter.format(line, **params))
except KeyError as e:
raise MissingParamError(
.format(e.args[0], key)
)
else:
return out | Get the appropriate translation given the specified parameters.
:param key: Translation key
:param count: Count for plurals
:param formatter: Optional string formatter to use
:param locale: Prefered locale to get the string from
:param params: Params to be substituted
:param flags: Flags to help choosing one version or the other |
9,967 | def get_entry_by_material_id(self, material_id, compatible_only=True,
inc_structure=None, property_data=None,
conventional_unit_cell=False):
data = self.get_entries(material_id, compatible_only=compatible_only,
inc_structure=inc_structure,
property_data=property_data,
conventional_unit_cell=conventional_unit_cell)
return data[0] | Get a ComputedEntry corresponding to a material_id.
Args:
material_id (str): Materials Project material_id (a string,
e.g., mp-1234).
compatible_only (bool): Whether to return only "compatible"
entries. Compatible entries are entries that have been
processed using the MaterialsProjectCompatibility class,
which performs adjustments to allow mixing of GGA and GGA+U
calculations for more accurate phase diagrams and reaction
energies.
inc_structure (str): If None, entries returned are
ComputedEntries. If inc_structure="final",
ComputedStructureEntries with final structures are returned.
Otherwise, ComputedStructureEntries with initial structures
are returned.
property_data (list): Specify additional properties to include in
entry.data. If None, no data. Should be a subset of
supported_properties.
conventional_unit_cell (bool): Whether to get the standard
conventional unit cell
Returns:
ComputedEntry or ComputedStructureEntry object. |
9,968 | def create(cls, tokens:Tokens, max_vocab:int, min_freq:int) -> :
"Create a vocabulary from a set of `tokens`."
freq = Counter(p for o in tokens for p in o)
itos = [o for o,c in freq.most_common(max_vocab) if c >= min_freq]
for o in reversed(defaults.text_spec_tok):
if o in itos: itos.remove(o)
itos.insert(0, o)
return cls(itos) | Create a vocabulary from a set of `tokens`. |
9,969 | def get_mapping_from_db3_file( db_path ):
import sqlite3
conn = sqlite3.connect(db_path)
results = conn.cursor().execute()
rosetta_residue_ids = []
mapping = {}
for r in results:
mapping["%s%s%s" % (r[0], str(r[1]).rjust(4), r[2])] = { : r[4], : r[5], : r[6]}
rosetta_residue_ids.append(r[4])
raw_residue_list = [r for r in conn.cursor().execute()]
assert(sorted([r[0] for r in raw_residue_list]) == sorted(rosetta_residue_ids))
return mapping | Does the work of reading the Rosetta SQLite3 .db3 file to retrieve the mapping |
9,970 | def get_version(module):
init_py = open(.format(module)).read()
return re.search("__version__ = [\"]+)['\"]", init_py).group(1) | Return package version as listed in `__version__`. |
9,971 | def find_movers(choosers, rates, rate_column):
logger.debug()
relocation_rates = pd.Series(
np.zeros(len(choosers)), index=choosers.index)
for _, row in rates.iterrows():
indexes = util.filter_table(choosers, row, ignore={rate_column}).index
relocation_rates.loc[indexes] = row[rate_column]
movers = relocation_rates.index[
relocation_rates > np.random.random(len(choosers))]
logger.debug(.format(len(movers)))
logger.debug()
return movers | Returns an array of the indexes of the `choosers` that are slated
to move.
Parameters
----------
choosers : pandas.DataFrame
Table of agents from which to find movers.
rates : pandas.DataFrame
Table of relocation rates. Index is unused.
Other columns describe filters on the `choosers`
table so that different segments can have different relocation
rates. Columns that ends with '_max' will be used to create
a "less than" filters, columns that end with '_min' will be
used to create "greater than or equal to" filters.
A column with no suffix will be used to make an 'equal to' filter.
An example `rates` structure:
age_of_head_max age_of_head_min
nan 65
65 40
In this example the `choosers` table would need to have an
'age_of_head' column on which to filter.
nan should be used to flag filters that do not apply
in a given row.
rate_column : object
Name of column in `rates` table that has relocation rates.
Returns
-------
movers : pandas.Index
Suitable for indexing `choosers` by index. |
9,972 | def render_mail(self, template_prefix, email, context):
subject = render_to_string(.format(template_prefix),
context)
subject = " ".join(subject.splitlines()).strip()
subject = self.format_email_subject(subject)
bodies = {}
for ext in [, ]:
try:
template_name = .format(template_prefix, ext)
bodies[ext] = render_to_string(template_name,
context).strip()
except TemplateDoesNotExist:
if ext == and not bodies:
raise
if in bodies:
msg = EmailMultiAlternatives(subject,
bodies[],
settings.DEFAULT_FROM_EMAIL,
[email])
if in bodies:
msg.attach_alternative(bodies[], )
else:
msg = EmailMessage(subject,
bodies[],
settings.DEFAULT_FROM_EMAIL,
[email])
msg.content_subtype =
return msg | Renders an e-mail to `email`. `template_prefix` identifies the
e-mail that is to be sent, e.g. "account/email/email_confirmation" |
9,973 | def upload_all_books(book_id_start, book_id_end, rdf_library=None):
logger.info(
"starting a gitberg mass upload: {0} -> {1}".format(
book_id_start, book_id_end
)
)
for book_id in range(int(book_id_start), int(book_id_end) + 1):
cache = {}
errors = 0
try:
if int(book_id) in missing_pgid:
print(u.format(book_id))
continue
upload_book(book_id, rdf_library=rdf_library, cache=cache)
except Exception as e:
print(u.format(book_id))
logger.error(u"Error processing: {}\r{}".format(book_id, e))
errors += 1
if errors > 10:
print()
break | Uses the fetch, make, push subcommands to
mirror Project Gutenberg to a github3 api |
9,974 | def _restore_replace(self):
if PyFunceble.path.isdir(self.base + ".git"):
if "PyFunceble" not in Command("git remote show origin").execute():
return True
return False
return True | Check if we need to replace ".gitignore" to ".keep".
:return: The replacement status.
:rtype: bool |
9,975 | def getattr(self, name, default: Any = _missing):
return deep_getattr(self.clsdict, self.bases, name, default) | Convenience method equivalent to
``deep_getattr(mcs_args.clsdict, mcs_args.bases, 'attr_name'[, default])`` |
9,976 | def _fetch_stock_data(self, stock_list):
pool = multiprocessing.pool.ThreadPool(len(stock_list))
try:
res = pool.map(self.get_stocks_by_range, stock_list)
finally:
pool.close()
return [d for d in res if d is not None] | 获取股票信息 |
9,977 | def Earth(pos=(0, 0, 0), r=1, lw=1):
import os
tss = vtk.vtkTexturedSphereSource()
tss.SetRadius(r)
tss.SetThetaResolution(72)
tss.SetPhiResolution(36)
earthMapper = vtk.vtkPolyDataMapper()
earthMapper.SetInputConnection(tss.GetOutputPort())
earthActor = Actor(c="w")
earthActor.SetMapper(earthMapper)
atext = vtk.vtkTexture()
pnmReader = vtk.vtkPNMReader()
cdir = os.path.dirname(__file__)
if cdir == "":
cdir = "."
fn = settings.textures_path + "earth.ppm"
pnmReader.SetFileName(fn)
atext.SetInputConnection(pnmReader.GetOutputPort())
atext.InterpolateOn()
earthActor.SetTexture(atext)
if not lw:
earthActor.SetPosition(pos)
return earthActor
es = vtk.vtkEarthSource()
es.SetRadius(r / 0.995)
earth2Mapper = vtk.vtkPolyDataMapper()
earth2Mapper.SetInputConnection(es.GetOutputPort())
earth2Actor = Actor()
earth2Actor.SetMapper(earth2Mapper)
earth2Mapper.ScalarVisibilityOff()
earth2Actor.GetProperty().SetLineWidth(lw)
ass = Assembly([earthActor, earth2Actor])
ass.SetPosition(pos)
settings.collectable_actors.append(ass)
return ass | Build a textured actor representing the Earth.
.. hint:: |geodesic| |geodesic.py|_ |
9,978 | def _check_lock_permission(
self, url, lock_type, lock_scope, lock_depth, token_list, principal
):
assert lock_type == "write"
assert lock_scope in ("shared", "exclusive")
assert lock_depth in ("0", "infinity")
_logger.debug(
"checkLockPermission({}, {}, {}, {})".format(
url, lock_scope, lock_depth, principal
)
)
errcond = DAVErrorCondition(PRECONDITION_CODE_LockConflict)
self._lock.acquire_read()
try:
u = url
while u:
ll = self.get_url_lock_list(u)
for l in ll:
_logger.debug(" check parent {}, {}".format(u, lock_string(l)))
if u != url and l["depth"] != "infinity":
continue
elif l["scope"] == "shared" and lock_scope == "shared":
continue
_logger.debug(
" -> DENIED due to locked parent {}".format(lock_string(l))
)
errcond.add_href(l["root"])
u = util.get_uri_parent(u)
if lock_depth == "infinity":
childLocks = self.storage.get_lock_list(
url, include_root=False, include_children=True, token_only=False
)
for l in childLocks:
assert util.is_child_uri(url, l["root"])
_logger.debug(
" -> DENIED due to locked child {}".format(lock_string(l))
)
errcond.add_href(l["root"])
finally:
self._lock.release()
if len(errcond.hrefs) > 0:
raise DAVError(HTTP_LOCKED, err_condition=errcond)
return | Check, if <principal> can lock <url>, otherwise raise an error.
If locking <url> would create a conflict, DAVError(HTTP_LOCKED) is
raised. An embedded DAVErrorCondition contains the conflicting resource.
@see http://www.webdav.org/specs/rfc4918.html#lock-model
- Parent locks WILL NOT be conflicting, if they are depth-0.
- Exclusive depth-infinity parent locks WILL be conflicting, even if
they are owned by <principal>.
- Child locks WILL NOT be conflicting, if we request a depth-0 lock.
- Exclusive child locks WILL be conflicting, even if they are owned by
<principal>. (7.7)
- It is not enough to check whether a lock is owned by <principal>, but
also the token must be passed with the request. (Because <principal>
may run two different applications on his client.)
- <principal> cannot lock-exclusive, if he holds a parent shared-lock.
(This would only make sense, if he was the only shared-lock holder.)
- TODO: litmus tries to acquire a shared lock on one resource twice
(locks: 27 'double_sharedlock') and fails, when we return HTTP_LOCKED.
So we allow multi shared locks on a resource even for the same
principal.
@param url: URL that shall be locked
@param lock_type: "write"
@param lock_scope: "shared"|"exclusive"
@param lock_depth: "0"|"infinity"
@param token_list: list of lock tokens, that the user submitted in If: header
@param principal: name of the principal requesting a lock
@return: None (or raise) |
9,979 | def client_list(self, *args):
if len(self._clients) == 0:
self.log()
else:
self.log(self._clients, pretty=True) | Display a list of connected clients |
9,980 | def certify_enum(value, kind=None, required=True):
if certify_required(
value=value,
required=required,
):
return
if not isinstance(value, kind):
raise CertifierTypeError(
message="expected {expected!r}, but value is of type {actual!r}".format(
expected=kind.__name__, actual=value.__class__.__name__),
value=value,
required=required,
) | Certifier for enum.
:param value:
The value to be certified.
:param kind:
The enum type that value should be an instance of.
:param bool required:
Whether the value can be `None`. Defaults to True.
:raises CertifierTypeError:
The type is invalid |
9,981 | def is_shortcut_in_use(self, shortcut):
for path, actionName, action in foundations.walkers.dictionaries_walker(self.__categories):
if action.shortcut() == QKeySequence(shortcut):
return True
return False | Returns if given action shortcut is in use.
:param name: Action shortcut.
:type name: unicode
:return: Is shortcut in use.
:rtype: bool |
9,982 | def line_spacing_rule(self):
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing_rule(
pPr.spacing_line, pPr.spacing_lineRule
) | A member of the :ref:`WdLineSpacing` enumeration indicating how the
value of :attr:`line_spacing` should be interpreted. Assigning any of
the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or
:attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing`
to be updated to produce the corresponding line spacing. |
9,983 | def dequeue(self, k):
if self.j + k <= self.M:
out = self.A[self.j:(self.j + k)]
self.j += k
elif k <= self.M:
out = np.empty(k, )
nextra = self.j + k - self.M
out[:(k - nextra)] = self.A[self.j:]
self.enqueue()
out[(k - nextra):] = self.A[:nextra]
self.j = nextra
else:
raise ValueError()
return out | Outputs *k* draws from the multinomial distribution. |
9,984 | def export(self, exclude=[]):
fields = ( (key, self.get_field(key)) for key in self.schema
if not key.startswith("_") and key not in exclude )
doc = {name: field.export() for name, field in fields}
return doc | returns a dictionary representation of the document |
9,985 | def wait_for(self, pids=[], status_list=process_result_statuses):
simple_shell_commandTest simple shell command servicefailureerror
results={}
pids = self._get_pids(pids)
for pid in pids:
while(True):
try:
stat = self._call_rest_api(, +pid+, error=%pid)
if stat in status_list:
results[pid]=stat
break
time.sleep(5)
except requests.exceptions.RequestException as e:
self.session=None
raise e
return results | wait_for(self, pids=[], status_list=process_result_statuses)
Waits for a process to finish
:Parameters:
* *pids* (`list`) -- list of processes waiting to be finished
* *status_list* (`list`) -- optional - List of statuses to wait for processes to finish with
:Example:
.. code-block:: python
pid = opereto_client.create_process(service='simple_shell_command', title='Test simple shell command service')
opereto_client.wait_for([pid], ['failure', 'error'])
opereto_client.rerun_process(pid) |
9,986 | def _group_dict_set(iterator):
d = defaultdict(set)
for key, value in iterator:
d[key].add(value)
return dict(d) | Make a dict that accumulates the values for each key in an iterator of doubles.
:param iter[tuple[A,B]] iterator: An iterator
:rtype: dict[A,set[B]] |
9,987 | def profile(self, name):
self.selected_profile = self.profiles.get(name)
return self.profiles.get(name) | Return a specific profile. |
9,988 | def set_icon_data(self, base64_data, mimetype="image/png", rel="icon"):
self.add_child("favicon", %(rel, base64_data, mimetype)) | Allows to define an icon for the App
Args:
base64_data (str): base64 encoded image data (ie. "data:image/x-icon;base64,AAABAAEAEBA....")
mimetype (str): mimetype of the image ("image/png" or "image/x-icon"...)
rel (str): leave it unchanged (standard "icon") |
9,989 | def gen_postinit(self, cls: ClassDefinition, slotname: str) -> Optional[str]:
rlines: List[str] = []
slot = self.schema.slots[slotname]
if slot.alias:
slotname = slot.alias
slotname = self.python_name_for(slotname)
range_type_name = self.range_type_name(slot, cls.name)
| Generate python post init rules for slot in class |
9,990 | def indent(self, space=4):
if not isinstance(space,int):
raise TypeError("space must be an int")
if space < 0:
raise ValueError("space must be a non-negative integer")
space = *space; o = []; l = 0
for c in self.newick():
if c == :
o.append(); l += 1; o.append(space*l)
elif c == :
o.append(); l -= 1; o.append(space*l); o.append()
elif c == :
o.append(); o.append(space*l)
else:
o.append(c)
return .join(o) | Return an indented Newick string, just like ``nw_indent`` in Newick Utilities
Args:
``space`` (``int``): The number of spaces a tab should equal
Returns:
``str``: An indented Newick string |
9,991 | def minutes_from_utc(self):
offset = 0
if self.__datetime is not None and \
self.__datetime.utcoffset() is not None:
offset = self.__datetime.utcoffset().seconds / 60
if self.__datetime.utcoffset().days == -1:
offset = -((60 * 24) - offset)
return int(offset) | The timezone offset of this point in time object as +/- minutes from
UTC.
A positive value of the timezone offset indicates minutes east of UTC,
and a negative value indicates minutes west of UTC.
0, if this object represents a time interval. |
9,992 | def handle_args_and_set_context(args):
parser = argparse.ArgumentParser()
parser.add_argument("env", help="environment")
parser.add_argument("path_to_template", help="path to the config template to process")
parser.add_argument("--no_params", help="disable loading values from params file", action="store_true", default=False)
parser.add_argument("--verbose", help="Output extra info", action="store_true", default=False)
parser.add_argument("--lint", help="Test configs for valid JSON/YAML syntax", action="store_true", default=False)
parser.add_argument("--silent", help="Suppress output of rendered template", action="store_true", default=False)
parsed = vars(parser.parse_args(args))
path_to_template = abspath(parsed["path_to_template"])
service = path_to_template.split()[-3]
return Context(
get_account_alias(parsed["env"]),
EFConfig.DEFAULT_REGION,
parsed["env"],
service,
path_to_template,
parsed["no_params"],
parsed["verbose"],
parsed["lint"],
parsed["silent"]
) | Args:
args: the command line args, probably passed from main() as sys.argv[1:]
Returns:
a populated Context object based on CLI args |
9,993 | def console_load_apf(con: tcod.console.Console, filename: str) -> bool:
return bool(
lib.TCOD_console_load_apf(_console(con), filename.encode("utf-8"))
) | Update a console from an ASCII Paint `.apf` file. |
9,994 | def to_bytes(self):
raw = b
if not self._options:
return raw
for icmpv6popt in self._options:
raw += icmpv6popt.to_bytes()
return raw | Takes a list of ICMPv6Option objects and returns a packed byte string
of options, appropriately padded if necessary. |
9,995 | def _generateModel0(numCategories):
secondOrder[key] = probs
return (initProb, firstOrder, secondOrder, 3) | Generate the initial, first order, and second order transition
probabilities for 'model0'. For this model, we generate the following
set of sequences:
1-2-3 (4X)
1-2-4 (1X)
5-2-3 (1X)
5-2-4 (4X)
Parameters:
----------------------------------------------------------------------
numCategories: Number of categories
retval: (initProb, firstOrder, secondOrder, seqLen)
initProb: Initial probability for each category. This is a vector
of length len(categoryList).
firstOrder: A dictionary of the 1st order probabilities. The key
is the 1st element of the sequence, the value is
the probability of each 2nd element given the first.
secondOrder: A dictionary of the 2nd order probabilities. The key
is the first 2 elements of the sequence, the value is
the probability of each possible 3rd element given the
first two.
seqLen: Desired length of each sequence. The 1st element will
be generated using the initProb, the 2nd element by the
firstOrder table, and the 3rd and all successive
elements by the secondOrder table.
Here is an example of some return values:
initProb: [0.7, 0.2, 0.1]
firstOrder: {'[0]': [0.3, 0.3, 0.4],
'[1]': [0.3, 0.3, 0.4],
'[2]': [0.3, 0.3, 0.4]}
secondOrder: {'[0,0]': [0.3, 0.3, 0.4],
'[0,1]': [0.3, 0.3, 0.4],
'[0,2]': [0.3, 0.3, 0.4],
'[1,0]': [0.3, 0.3, 0.4],
'[1,1]': [0.3, 0.3, 0.4],
'[1,2]': [0.3, 0.3, 0.4],
'[2,0]': [0.3, 0.3, 0.4],
'[2,1]': [0.3, 0.3, 0.4],
'[2,2]': [0.3, 0.3, 0.4]} |
9,996 | def adict(*classes):
a = True
for c in classes:
if isclass(c) and _infer_dict(c):
t = _dict_classes.get(c.__module__, ())
if c.__name__ not in t:
_dict_classes[c.__module__] = t + (c.__name__,)
else:
a = False
return a | Install one or more classes to be handled as dict. |
9,997 | def has_child(self, term):
for parent in self.children:
if parent.item_id == term or parent.has_child(term):
return True
return False | Return True if this GO object has a child GO ID. |
9,998 | def open511_convert(input_doc, output_format, serialize=True, **kwargs):
try:
output_format_info = FORMATS[output_format]
except KeyError:
raise ValueError("Unrecognized output format %s" % output_format)
input_doc = ensure_format(input_doc, output_format_info.input_format)
result = output_format_info.func(input_doc, **kwargs)
if serialize:
result = output_format_info.serializer(result)
return result | Convert an Open511 document between formats.
input_doc - either an lxml open511 Element or a deserialized JSON dict
output_format - short string name of a valid output format, as listed above |
9,999 | def FindLiteral(self, pattern, data):
pattern = utils.Xor(pattern, self.xor_in_key)
offset = 0
while 1:
offset = data.find(pattern, offset)
if offset < 0:
break
yield (offset, offset + len(pattern))
offset += 1 | Search the data for a hit. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.