Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
8,200 | def _save_translations(sender, instance, *args, **kwargs):
if site_is_monolingual():
return False
cls = sender
if not hasattr(cls._meta, "translatable_fields"):
return False
for field in cls._meta.translatable_fields:
value = getattr(instance,field)
if not value is None:
md5_value = checksum(value)
setattr( instance, u"md5"+field, md5_value )
for lang in settings.LANGUAGES:
lang = lang[0]
if lang != settings.LANGUAGE_CODE:
context = u"Updating from object"
if hasattr(instance, "trans_context"):
context = getattr(instance, "trans_context")
trans = FieldTranslation.update(instance, field, lang, context) | This signal saves model translations. |
8,201 | def consume(self, seq):
for kmer in iter_kmers(seq, self.k, canonical=self.canonical):
self._incr(kmer) | Counts all k-mers in sequence. |
8,202 | def _find_home_or_away(self, row):
name = row().text()
if name == self._home_name.text():
return HOME
else:
return AWAY | Determine whether the player is on the home or away team.
Next to every player is their school's name. This name can be matched
with the previously parsed home team's name to determine if the player
is a member of the home or away team.
Parameters
----------
row : PyQuery object
A PyQuery object representing a single row in a boxscore table for
a single player.
Returns
-------
str
Returns a ``string`` constant denoting whether the team plays for
the home or away team. |
8,203 | def remote_server_command(command, environment, user_profile, **kwargs):
if environment.remote_server_key:
temp = tempfile.NamedTemporaryFile(mode="wb")
temp.write(environment.remote_server_key)
temp.seek(0)
known_hosts = temp.name
else:
known_hosts = get_script_path()
binds = {
user_profile.profiledir + : ,
known_hosts: ,
get_script_path():
}
if kwargs.get("include_project_dir", None):
binds[environment.target] =
del kwargs["include_project_dir"]
kwargs["ro"] = binds
try:
web_command(command, **kwargs)
except WebCommandError as e:
e.user_description =
raise e | Wraps web_command function with docker bindings needed to connect to
a remote server (such as datacats.com) and run commands there
(for example, when you want to copy your catalog to that server).
The files binded to the docker image include the user's ssh credentials:
ssh_config file,
rsa and rsa.pub user keys
known_hosts whith public keys of the remote server (if known)
The **kwargs (keyword arguments) are passed on to the web_command call
intact, see the web_command's doc string for details |
8,204 | def generate_gap_bed(fname, outname):
f = Fasta(fname)
with open(outname, "w") as bed:
for chrom in f.keys():
for m in re.finditer(r, f[chrom][:].seq):
bed.write("{}\t{}\t{}\n".format(chrom, m.start(0), m.end(0))) | Generate a BED file with gap locations.
Parameters
----------
fname : str
Filename of input FASTA file.
outname : str
Filename of output BED file. |
8,205 | def pkgdb(opts):
s package database
.. versionadded:: 2015.8.0
pkgdbspmpkgdb'
) | Return modules for SPM's package database
.. versionadded:: 2015.8.0 |
8,206 | def uninstall(self, pkgname, *args, **kwargs):
auto_confirm = kwargs.pop("auto_confirm", True)
verbose = kwargs.pop("verbose", False)
with self.activated():
monkey_patch = next(iter(
dist for dist in self.base_working_set
if dist.project_name == "recursive-monkey-patch"
), None)
if monkey_patch:
monkey_patch.activate()
pip_shims = self.safe_import("pip_shims")
pathset_base = pip_shims.UninstallPathSet
pathset_base._permitted = PatchedUninstaller._permitted
dist = next(
iter(filter(lambda d: d.project_name == pkgname, self.get_working_set())),
None
)
pathset = pathset_base.from_dist(dist)
if pathset is not None:
pathset.remove(auto_confirm=auto_confirm, verbose=verbose)
try:
yield pathset
except Exception as e:
if pathset is not None:
pathset.rollback()
else:
if pathset is not None:
pathset.commit()
if pathset is None:
return | A context manager which allows uninstallation of packages from the environment
:param str pkgname: The name of a package to uninstall
>>> env = Environment("/path/to/env/root")
>>> with env.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller:
cleaned = uninstaller.paths
>>> if cleaned:
print("uninstalled packages: %s" % cleaned) |
8,207 | def registration_form_received(self, stanza):
self.lock.acquire()
try:
self.__register = Register(stanza.get_query())
self.registration_callback(stanza, self.__register.get_form())
finally:
self.lock.release() | Handle registration form received.
[client only]
Call self.registration_callback with the registration form received
as the argument. Use the value returned by the callback will be a
filled-in form.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `pyxmpp.iq.Iq` |
8,208 | def JZ(cpu, target):
cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.ZF, target.read(), cpu.PC) | Jumps short if zero.
:param cpu: current CPU.
:param target: destination operand. |
8,209 | def load(self, fileobj):
for loader in (pickle.load, json.load, csv.reader):
fileobj.seek(0)
try:
return self.initial_update(loader(fileobj))
except Exception as e:
pass
raise ValueError() | Load the dict from the file object |
8,210 | def get_stroke_glide_indices(A_g_hf, fs_a, J, t_max):
import numpy
from . import dsp
if A_g_hf.ndim > 1:
raise IndexError(
)
n_max = t_max * fs_a
zc = dsp.findzc(A_g_hf, J, n_max/2)
ind = numpy.where(zc[1:, 0] - zc[0:-1, 1] > n_max)[0]
gl_ind = numpy.vstack([zc[ind, 0] - 1, zc[ind + 1, 1] + 1]).T
gl_mean_idx = numpy.round(numpy.mean(gl_ind, 1)).astype(int)
gl_ind = numpy.round(gl_ind).astype(int)
for i in range(len(gl_mean_idx)):
col = range(gl_mean_idx[i], gl_ind[i, 0], - 1)
test = numpy.where(numpy.isnan(A_g_hf[col]))[0]
if test.size != 0:
gl_mean_idx[i] = numpy.nan
gl_ind[i,0] = numpy.nan
gl_ind[i,1] = numpy.nan
else:
over_J1 = numpy.where(abs(A_g_hf[col]) >= J)[0][0]
gl_ind[i,0] = gl_mean_idx[i] - over_J1 + 1
col = range(gl_mean_idx[i], gl_ind[i, 1])
over_J2 = numpy.where(abs(A_g_hf[col]) >= J)[0][0]
gl_ind[i,1] = gl_mean_idx[i] + over_J2 - 1
GL = gl_ind
GL = GL[numpy.where(GL[:, 1] - GL[:, 0] > n_max / 2)[0], :]
return GL | Get stroke and glide indices from high-pass accelerometer data
Args
----
A_g_hf: 1-D ndarray
Animal frame triaxial accelerometer matrix at sampling rate fs_a.
fs_a: int
Number of accelerometer samples per second
J: float
Frequency threshold for detecting a fluke stroke in m/s^2. If J is not
given, fluke strokes will not be located but the rotations signal (pry)
will be computed.
t_max: int
Maximum duration allowable for a fluke stroke in seconds. A fluke
stroke is counted whenever there is a cyclic variation in the pitch
deviation with peak-to-peak magnitude greater than +/-J and consistent
with a fluke stroke duration of less than t_max seconds, e.g., for
Mesoplodon choose t_max=4.
Returns
-------
GL: 1-D ndarray
Matrix containing the start time (first column) and end time (2nd
column) of any glides (i.e., no zero crossings in t_max or more
seconds). Times are in seconds.
Note
----
If no J or t_max is given, J=[], or t_max=[], GL returned as None |
8,211 | def AddArguments(cls, argument_group):
argument_group.add_argument(
, dest=, action=, default=False,
required=cls._DEFAULT_APPEND, help=(
))
argument_group.add_argument(
, dest=, type=str,
default=cls._DEFAULT_EVIDENCE, action=, required=False,
help=)
argument_group.add_argument(
, dest=, type=str, action=,
default=cls._DEFAULT_FIELDS, help=(
))
argument_group.add_argument(
, dest=, type=str,
action=, default=, help=(
.format(
cls._DEFAULT_FIELDS))) | Adds command line arguments the helper supports to an argument group.
This function takes an argument parser or an argument group object and adds
to it all the command line arguments this helper supports.
Args:
argument_group (argparse._ArgumentGroup|argparse.ArgumentParser):
argparse group. |
8,212 | def config_dict(config):
return dict(
(key, getattr(config, key))
for key in config.values
) | Given a Sphinx config object, return a dictionary of config
values. |
8,213 | def retry(tries, delay=0, back_off=1, raise_msg=):
if back_off < 1:
raise ValueError()
tries = math.floor(tries)
if tries < 0:
raise ValueError()
if delay < 0:
raise ValueError()
def deco_retry(f):
def f_retry(*args, **kwargs):
max_tries, max_delay = tries, delay
while max_tries > 0:
rv = f(*args, **kwargs)
if rv:
return rv
max_tries -= 1
time.sleep(max_delay)
max_delay *= back_off
else:
if raise_msg:
raise Exception(raise_msg)
return
return f_retry
return deco_retry | Retries a function or method until it got True.
- ``delay`` sets the initial delay in seconds
- ``back_off`` sets the factor by which
- ``raise_msg`` if not '', it'll raise an Exception |
8,214 | def bootstrap(ns_var_name: str = NS_VAR_NAME, core_ns_name: str = CORE_NS) -> None:
core_ns_sym = sym.symbol(core_ns_name)
ns_var_sym = sym.symbol(ns_var_name, ns=core_ns_name)
__NS = Maybe(Var.find(ns_var_sym)).or_else_raise(
lambda: RuntimeException(f"Dynamic Var {ns_var_sym} not bound!")
)
def in_ns(s: sym.Symbol):
ns = Namespace.get_or_create(s)
__NS.value = ns
return ns
Var.intern_unbound(core_ns_sym, sym.symbol("unquote"))
Var.intern_unbound(core_ns_sym, sym.symbol("unquote-splicing"))
Var.intern(
core_ns_sym, sym.symbol("in-ns"), in_ns, meta=lmap.map({_REDEF_META_KEY: True})
)
Var.intern(
core_ns_sym,
sym.symbol(_PRINT_GENERATED_PY_VAR_NAME),
False,
dynamic=True,
meta=lmap.map({_PRIVATE_META_KEY: True}),
)
Var.intern(
core_ns_sym,
sym.symbol(_GENERATED_PYTHON_VAR_NAME),
"",
dynamic=True,
meta=lmap.map({_PRIVATE_META_KEY: True}),
)
Var.intern(
core_ns_sym, sym.symbol(_PRINT_DUP_VAR_NAME), lobj.PRINT_DUP, dynamic=True
)
Var.intern(
core_ns_sym, sym.symbol(_PRINT_LENGTH_VAR_NAME), lobj.PRINT_LENGTH, dynamic=True
)
Var.intern(
core_ns_sym, sym.symbol(_PRINT_LEVEL_VAR_NAME), lobj.PRINT_LEVEL, dynamic=True
)
Var.intern(
core_ns_sym, sym.symbol(_PRINT_META_VAR_NAME), lobj.PRINT_META, dynamic=True
)
Var.intern(
core_ns_sym,
sym.symbol(_PRINT_READABLY_VAR_NAME),
lobj.PRINT_READABLY,
dynamic=True,
) | Bootstrap the environment with functions that are are difficult to
express with the very minimal lisp environment. |
8,215 | def _storage_list_keys(bucket, pattern):
data = [{: item.metadata.name,
: item.metadata.content_type,
: item.metadata.size,
: item.metadata.updated_on}
for item in _storage_get_keys(bucket, pattern)]
return datalab.utils.commands.render_dictionary(data, [, , , ]) | List all storage keys in a specified bucket that match a pattern. |
8,216 | def build_slabs(self):
slabs = self.get_unreconstructed_slabs()
recon_slabs = []
for slab in slabs:
d = get_d(slab)
top_site = sorted(slab, key=lambda site: site.frac_coords[2])[-1].coords
if "points_to_remove" in self.reconstruction_json.keys():
pts_to_rm = copy.deepcopy(self.reconstruction_json["points_to_remove"])
for p in pts_to_rm:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1],
top_site[2]+p[2]*d])[2]
cart_point = slab.lattice.get_cartesian_coords(p)
dist = [site.distance_from_point(cart_point) for site in slab]
site1 = dist.index(min(dist))
slab.symmetrically_remove_atoms([site1])
if "points_to_add" in self.reconstruction_json.keys():
pts_to_add = copy.deepcopy(self.reconstruction_json["points_to_add"])
for p in pts_to_add:
p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1],
top_site[2]+p[2]*d])[2]
slab.symmetrically_add_atom(slab[0].specie, p)
slab.reconstruction = self.name
setattr(slab, "recon_trans_matrix", self.trans_matrix)
ouc = slab.oriented_unit_cell.copy()
ouc.make_supercell(self.trans_matrix)
slab.oriented_unit_cell = ouc
recon_slabs.append(slab)
return recon_slabs | Builds the reconstructed slab by:
(1) Obtaining the unreconstructed slab using the specified
parameters for the SlabGenerator.
(2) Applying the appropriate lattice transformation in the
a and b lattice vectors.
(3) Remove any specified sites from both surfaces.
(4) Add any specified sites to both surfaces.
Returns:
(Slab): The reconstructed slab. |
8,217 | def cmServicePrompt():
a = TpPd(pd=0x5)
b = MessageType(mesType=0x25)
c = PdAndSapi()
packet = a / b / c
return packet | CM SERVICE PROMPT Section 9.2.5a |
8,218 | def on_song_changed(self, song):
if song is None or song.lyric is None:
self._lyric = None
self._pos_s_map = {}
else:
self._lyric = song.lyric.content
self._pos_s_map = parse(self._lyric)
self._pos_list = sorted(list(self._pos_s_map.keys()))
self._pos = None
self.current_sentence = | bind song changed signal with this |
8,219 | def has_path(nodes, A, B):
r
x1 = np.intersect1d(nodes, A).size > 0
x2 = np.intersect1d(nodes, B).size > 0
return x1 and x2 | r"""Test if nodes from a breadth_first_order search lead from A to
B.
Parameters
----------
nodes : array_like
Nodes from breadth_first_oder_seatch
A : array_like
The set of educt states
B : array_like
The set of product states
Returns
-------
has_path : boolean
True if there exists a path, else False |
8,220 | def add_string_label(self, str_):
if self.STRING_LABELS.get(str_, None) is None:
self.STRING_LABELS[str_] = backend.tmp_label()
return self.STRING_LABELS[str_] | Maps ("folds") the given string, returning an unique label ID.
This allows several constant labels to be initialized to the same address
thus saving memory space.
:param str_: the string to map
:return: the unique label ID |
8,221 | def rank_loss(sentence_emb, image_emb, margin=0.2):
with tf.name_scope("rank_loss"):
sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
image_emb = tf.nn.l2_normalize(image_emb, 1)
scores = tf.matmul(image_emb, tf.transpose(sentence_emb))
diagonal = tf.diag_part(scores)
cost_s = tf.maximum(0.0, margin - diagonal + scores)
cost_im = tf.maximum(
0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)
batch_size = tf.shape(sentence_emb)[0]
empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
cost_s *= empty_diagonal_mat
cost_im *= empty_diagonal_mat
return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) | Experimental rank loss, thanks to kkurach@ for the code. |
8,222 | def setwinsize(self, r, c):
TIOCSWINSZ = getattr(termios, , -2146929561)
if TIOCSWINSZ == 2148037735L:
TIOCSWINSZ = -2146929561
s = struct.pack(, r, c, 0, 0)
fcntl.ioctl(self.fileno(), TIOCSWINSZ, s) | This sets the terminal window size of the child tty. This will cause
a SIGWINCH signal to be sent to the child. This does not change the
physical window size. It changes the size reported to TTY-aware
applications like vi or curses -- applications that respond to the
SIGWINCH signal. |
8,223 | def estimate_pos_and_err_parabolic(tsvals):
a = tsvals[2] - tsvals[0]
bc = 2. * tsvals[1] - tsvals[0] - tsvals[2]
s = a / (2 * bc)
err = np.sqrt(2 / bc)
return s, err | Solve for the position and uncertainty of source in one dimension
assuming that you are near the maximum and the errors are parabolic
Parameters
----------
tsvals : `~numpy.ndarray`
The TS values at the maximum TS, and for each pixel on either side
Returns
-------
The position and uncertainty of the source, in pixel units
w.r.t. the center of the maximum pixel |
8,224 | def _effectinit_raise_col_padding_on_focus(self, name, **kwargs):
self._effects[name] = kwargs
if "enlarge_time" not in kwargs:
kwargs[] = 0.5
if "padding" not in kwargs:
kwargs[] = 10
kwargs[] = kwargs[] / kwargs[]
for option in self.options:
option[] = 0.0 | Init the column padding on focus effect.
Keyword arguments can contain enlarge_time and padding. |
8,225 | def eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None):
if self._solver_required and solver is None:
raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
results = self._eval(
self.convert(expr), n, extra_constraints=self.convert_list(extra_constraints),
solver=solver, model_callback=model_callback
)
results = list(results)
if type(expr) is not BV:
return results
size = expr.length
for i in range(len(results)):
results[i] &= (1 << size) - 1
solver.terminate()
return results | This function returns up to `n` possible solutions for expression `expr`.
:param expr: expression (an AST) to evaluate
:param n: number of results to return
:param solver: a solver object, native to the backend, to assist in
the evaluation (for example, a z3.Solver)
:param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve
:param model_callback: a function that will be executed with recovered models (if any)
:return: A sequence of up to n results (backend objects) |
8,226 | def _is_reference(bpe):
if isinstance(bpe, _bp()) or \
isinstance(bpe, _bpimpl()) or \
isinstance(bpe, _bp()) or \
isinstance(bpe, _bpimpl()) or \
isinstance(bpe, _bp()) or \
isinstance(bpe, _bpimpl()) or \
isinstance(bpe, _bp()) or \
isinstance(bpe, _bpimpl()):
return True
else:
return False | Return True if the element is an entity reference. |
8,227 | def close(self):
self.close_graph.set()
if self.is_alive():
self.child.join(2) | close the graph |
8,228 | def get_api_client():
with _api_lock:
global _api_client
if not _api_client:
conf_file = os.path.join(os.environ.get("HOME"),
".python-grid5000.yaml")
_api_client = Client.from_yaml(conf_file)
return _api_client | Gets the reference to the API cient (singleton). |
8,229 | def list_teams(profile="github", ignore_cache=False):
key = .format(
_get_config_value(profile, )
)
if key not in __context__ or ignore_cache:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, )
)
teams_data = organization.get_teams()
teams = {}
for team in teams_data:
}
__context__[key] = teams
return __context__[key] | Lists all teams with the organization.
profile
The name of the profile configuration to use. Defaults to ``github``.
ignore_cache
Bypasses the use of cached teams.
CLI Example:
.. code-block:: bash
salt myminion github.list_teams
.. versionadded:: 2016.11.0 |
8,230 | def __advice_stack_frame_protection(self, frame):
if frame is None:
logger.debug(
)
return
f_back = frame.f_back
while f_back:
if f_back.f_code is self.handle.__code__:
raise RuntimeError(
"indirect invocation of by is forbidden" %
frame.f_code.co_name,
)
f_back = f_back.f_back | Overriding of this is only permitted if and only if your name is
Megumin and you have a pet/familiar named Chomusuke. |
8,231 | def describe_policy(policyName,
region=None, key=None, keyid=None, profile=None):
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
policy = conn.get_policy(policyName=policyName)
if policy:
keys = (, , ,
)
return {: dict([(k, policy.get(k)) for k in keys])}
else:
return {: None}
except ClientError as e:
err = __utils__[](e)
if e.response.get(, {}).get() == :
return {: None}
return {: __utils__[](e)} | Given a policy name describe its properties.
Returns a dictionary of interesting properties.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.describe_policy mypolicy |
8,232 | def write(self, fptr):
length = 4 + 4 + 16 + len(self.raw_data)
write_buffer = struct.pack(, length, b)
fptr.write(write_buffer)
fptr.write(self.uuid.bytes)
fptr.write(self.raw_data) | Write a UUID box to file. |
8,233 | def _infer(self, request):
label_vocab = inference_utils.get_label_vocab(
request.args.get())
try:
if request.method != :
logger.error(, request.method)
return http_util.Respond(request, {: },
, code=405)
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
indices_to_infer = sorted(self.updated_example_indices)
examples_to_infer = [self.examples[index] for index in indices_to_infer]
infer_objs = []
for model_num in xrange(len(inference_addresses)):
serving_bundle = inference_utils.ServingBundle(
inference_addresses[model_num],
model_names[model_num],
request.args.get(),
model_versions[model_num],
model_signatures[model_num],
request.args.get() == ,
request.args.get(),
request.args.get())
infer_objs.append(inference_utils.run_inference_for_inference_results(
examples_to_infer, serving_bundle))
resp = {: indices_to_infer, : infer_objs}
self.updated_example_indices = set()
return http_util.Respond(request, {: json.dumps(resp),
: json.dumps(label_vocab)},
)
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, {: e.message},
, code=400)
except AbortionError as e:
return http_util.Respond(request, {: e.details},
, code=400) | Returns JSON for the `vz-line-chart`s for a feature.
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.
Returns:
A list of JSON objects, one for each chart. |
8,234 | def compare_profiles(profile1, profile2):
length = len(profile1)
profile1 = np.array(list(profile1))
profile2 = np.array(list(profile2))
similarity_array = profile1 == profile2
matches = np.sum(similarity_array)
similarity_ratio = matches/length
return similarity_ratio | Given two profiles, determine the ratio of similarity, i.e.
the hamming distance between the strings.
Args:
profile1/2 (str): profile string
Returns:
similarity_ratio (float): the ratio of similiarity (0-1) |
8,235 | def insert_first(self, val):
self.head = Node(val, next_node=self.head)
return True | Insert in head
:param val: Object to insert
:return: True iff insertion completed successfully |
8,236 | def cleanup_lines( lines, **kwargs ):
capescunesc
if not isinstance( lines, list ):
raise Exception()
remove_caps = False
remove_clo = False
double_quotes = None
fix_sent_tags = False
for argName, argVal in kwargs.items() :
if argName in [, ]:
remove_caps = bool(argVal)
if argName == :
remove_clo = bool(argVal)
if argName == :
fix_sent_tags = bool(argVal)
if argName in [, ] and argVal and \
argVal.lower() in [, , , ]:
double_quotes = argVal.lower()
pat_token_line = re.compile()
pat_analysis_start = re.compile()
i = 0
to_delete = []
while ( i < len(lines) ):
line = lines[i]
isAnalysisLine = line.startswith() or line.startswith()
if not isAnalysisLine:
removeCurrentTokenAndAnalysis = False
if line.startswith():
if i+1 == len(lines) or (i+1 < len(lines) and not in lines[i+1]):
removeCurrentTokenAndAnalysis = True
if line.startswith():
if i+1 == len(lines) or (i+1 < len(lines) and not in lines[i+1]):
removeCurrentTokenAndAnalysis = True
if removeCurrentTokenAndAnalysis:
del lines[i]
j=i
while ( j < len(lines) ):
line2 = lines[j]
if line2.startswith() or line2.startswith():
del lines[j]
else:
break
continue
if double_quotes:
if pat_token_line.match( lines[i] ):
token_cleaned = (pat_token_line.match(lines[i])).group(1)
if double_quotes in [, ]:
token_cleaned = token_cleaned.replace(, )
lines[i] = +token_cleaned+
elif double_quotes in [, ]:
token_cleaned = token_cleaned.replace(, )
lines[i] = +token_cleaned+
else:
lines[i] = re.sub(, , lines[i])
lines[i] = re.sub(, , lines[i])
lines[i] = re.sub(, , lines[i])
if remove_caps:
lines[i] = lines[i].replace(, )
if double_quotes and double_quotes in [, ]:
lines[i] = lines[i].replace(, )
elif double_quotes and double_quotes in [, ]:
m = pat_analysis_start.match( lines[i] )
if m:
start = m.group(1)
content = m.group(2)
end = m.group(3)
content = content.replace(, )
lines[i] = .join([start, , content, , end])
if remove_clo and in lines[i]:
lines[i] = re.sub(, , lines[i])
lines[i] = re.sub(, , lines[i])
if fix_sent_tags:
if i-1 > -1 and ( in lines[i-1] or in lines[i-1]):
lines[i] =
i += 1
return lines | Cleans up annotation after syntactic pre-processing and processing:
-- Removes embedded clause boundaries "<{>" and "<}>";
-- Removes CLBC markings from analysis;
-- Removes additional information between < and > from analysis;
-- Removes additional information between " and " from analysis;
-- If remove_caps==True , removes 'cap' annotations from analysis;
-- If remove_clo==True , removes CLO CLC CLB markings from analysis;
-- If double_quotes=='esc' then " will be overwritten with \\";
and
if double_quotes=='unesc' then \\" will be overwritten with ";
-- If fix_sent_tags=True, then sentence tags (<s> and </s>) will be
checked for mistakenly added analysis, and found analysis will be
removed;
Returns the input list, which has been cleaned from additional information; |
8,237 | def config(self):
response = self._call(
mc_calls.Config
)
config_list = response.body.get(, {}).get(, [])
return config_list | Get a listing of mobile client configuration settings. |
8,238 | def change_puk(ctx, puk, new_puk):
controller = ctx.obj[]
if not puk:
puk = _prompt_pin(ctx, prompt=)
if not new_puk:
new_puk = click.prompt(
, default=, hide_input=True,
show_default=False, confirmation_prompt=True,
err=True)
if not _valid_pin_length(puk):
ctx.fail()
if not _valid_pin_length(new_puk):
ctx.fail()
try:
controller.change_puk(puk, new_puk)
click.echo()
except AuthenticationBlocked as e:
logger.debug(, exc_info=e)
ctx.fail()
except WrongPuk as e:
logger.debug(
, e.tries_left, exc_info=e)
ctx.fail( % e.tries_left) | Change the PUK code.
If the PIN is lost or blocked it can be reset using a PUK.
The PUK must be between 6 and 8 characters long, and supports any type of
alphanumeric characters. |
8,239 | def apply_substitutions(monomial, monomial_substitutions, pure=False):
if is_number_type(monomial):
return monomial
original_monomial = monomial
changed = True
if not pure:
substitutions = monomial_substitutions
else:
substitutions = {}
for lhs, rhs in monomial_substitutions.items():
irrelevant = False
for atom in lhs.atoms():
if atom.is_Number:
continue
if not monomial.has(atom):
irrelevant = True
break
if not irrelevant:
substitutions[lhs] = rhs
while changed:
for lhs, rhs in substitutions.items():
monomial = fast_substitute(monomial, lhs, rhs)
if original_monomial == monomial:
changed = False
original_monomial = monomial
return monomial | Helper function to remove monomials from the basis. |
8,240 | def add_router_interface(self, context, router_info):
if router_info:
self._select_dicts(router_info[])
cidr = router_info[]
subnet_mask = cidr.split()[1]
router_name = self._arista_router_name(router_info[],
router_info[])
if self._mlag_configured:
mlag_peer_failed = False
for i, server in enumerate(self._servers):
router_ip = self._get_router_ip(cidr, i,
router_info[])
try:
self.add_interface_to_router(router_info[],
router_name,
router_info[],
router_ip, subnet_mask,
server)
mlag_peer_failed = False
except Exception:
if not mlag_peer_failed:
mlag_peer_failed = True
else:
msg = (_(
) % router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(
msg=msg)
else:
for s in self._servers:
self.add_interface_to_router(router_info[],
router_name,
router_info[],
None, subnet_mask, s) | Adds an interface to a router created on Arista HW router.
This deals with both IPv6 and IPv4 configurations. |
8,241 | def initiate_tasks(self):
self.tasks_classes = TaskLoader().load_tasks(
paths=self.configuration[Configuration.ALGORITHM][Configuration.TASKS][Configuration.PATHS]) | Loads all tasks using `TaskLoader` from respective configuration option |
8,242 | def create_snapshot(self, datacenter_id, volume_id,
name=None, description=None):
data = {: name, : description}
response = self._perform_request(
% (
datacenter_id, volume_id),
method=,
data=urlencode(data))
return response | Creates a snapshot of the specified volume.
:param datacenter_id: The unique ID of the data center.
:type datacenter_id: ``str``
:param volume_id: The unique ID of the volume.
:type volume_id: ``str``
:param name: The name given to the volume.
:type name: ``str``
:param description: The description given to the volume.
:type description: ``str`` |
8,243 | def substitute_minor_for_major(progression, substitute_index,
ignore_suffix=False):
(roman, acc, suff) = parse_string(progression[substitute_index])
res = []
if suff == or suff == or suff == and roman in [, ,
] or ignore_suffix:
n = skip(roman, 2)
a = interval_diff(roman, n, 3) + acc
if suff == or ignore_suffix:
res.append(tuple_to_string((n, a, )))
elif suff == or ignore_suffix:
res.append(tuple_to_string((n, a, )))
elif suff == or ignore_suffix:
res.append(tuple_to_string((n, a, )))
return res | Substitute minor chords for its major equivalent.
'm' and 'm7' suffixes recognized, and ['II', 'III', 'VI'] if there is no
suffix.
Examples:
>>> substitute_minor_for_major(['VI'], 0)
['I']
>>> substitute_minor_for_major(['Vm'], 0)
['bVIIM']
>>> substitute_minor_for_major(['VIm7'], 0)
['IM7'] |
8,244 | def run(path, code, params=None, ignore=None, select=None, **meta):
logger.debug()
clear_cache = params.pop(, False)
if clear_cache:
MANAGER.astroid_cache.clear()
class Reporter(BaseReporter):
def __init__(self):
self.errors = []
super(Reporter, self).__init__()
def _display(self, layout):
pass
def handle_message(self, msg):
self.errors.append(dict(
lnum=msg.line,
col=msg.column,
text="%s %s" % (msg.msg_id, msg.msg),
type=msg.msg_id[0]
))
params = _Params(ignore=ignore, select=select, params=params)
logger.debug(params)
reporter = Reporter()
try:
Run([path] + params.to_attrs(), reporter=reporter, do_exit=False)
except TypeError:
import pylint
if pylint.__version__.split()[0] != :
raise
Run([path] + params.to_attrs(), reporter=reporter, exit=False)
return reporter.errors | Pylint code checking.
:return list: List of errors. |
8,245 | def update_redirect(self):
page_history = Stack(session.get("page_history", []))
page_history.push(request.url)
session["page_history"] = page_history.to_json() | Call it on your own endpoint's to update the back history navigation.
If you bypass it, the next submit or back will go over it. |
8,246 | def get_response(self, environ=None):
response = super(SameContentException, self).get_response(
environ=environ
)
if self.etag is not None:
response.set_etag(self.etag)
if self.last_modified is not None:
response.headers[] = http_date(self.last_modified)
return response | Get a list of headers. |
8,247 | def get_catfact():
response = requests.get(CAT_FACTS_URL, verify=False)
response.raise_for_status()
json_data = response.json()
return json_data[] | Get a cat fact from catfact.ninja and return it as a string.
Functions for Soundhound, Google, IBM Watson, or other APIs can be added
to create the desired functionality into this bot. |
8,248 | def compute_transformed(context):
key_composite = compute_key_composite(
password=context._._.password,
keyfile=context._._.keyfile
)
kdf_parameters = context._.header.value.dynamic_header.kdf_parameters.data.dict
if context._._.transformed_key is not None:
transformed_key = context._._.transformed_key
elif kdf_parameters[].value == kdf_uuids[]:
transformed_key = argon2.low_level.hash_secret_raw(
secret=key_composite,
salt=kdf_parameters[].value,
hash_len=32,
type=argon2.low_level.Type.D,
time_cost=kdf_parameters[].value,
memory_cost=kdf_parameters[].value // 1024,
parallelism=kdf_parameters[].value,
version=kdf_parameters[].value
)
elif kdf_parameters[].value == kdf_uuids[]:
transformed_key = aes_kdf(
kdf_parameters[].value,
kdf_parameters[].value,
key_composite
)
else:
raise Exception()
return transformed_key | Compute transformed key for opening database |
8,249 | def as_base_units(self):
b = collections.defaultdict(int)
factor = 1
for k, v in self.items():
derived = False
for d in DERIVED_UNITS.values():
if k in d:
for k2, v2 in d[k].items():
if isinstance(k2, Number):
factor *= k2 ** (v2 * v)
else:
b[k2] += v2 * v
derived = True
break
if not derived:
si, f = _get_si_unit(k)
b[si] += v
factor *= f ** v
return {k: v for k, v in b.items() if v != 0}, factor | Converts all units to base SI units, including derived units.
Returns:
(base_units_dict, scaling factor). base_units_dict will not
contain any constants, which are gathered in the scaling factor. |
8,250 | def stream(self, sha):
hexsha, typename, size, stream = self._git.stream_object_data(bin_to_hex(sha))
return OStream(hex_to_bin(hexsha), typename, size, stream) | For now, all lookup is done by git itself |
8,251 | def _enable_lock(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
if self.is_concurrent:
only_read = kwargs.get()
if only_read is None or only_read:
with self._rwlock:
return func(*args, **kwargs)
else:
self._rwlock.acquire_writer()
try:
return func(*args, **kwargs)
finally:
self._rwlock.release()
else:
return func(*args, **kwargs)
return wrapper | The decorator for ensuring thread-safe when current cache instance is concurrent status. |
8,252 | def save_post(self, title, text, user_id, tags, draft=False,
post_date=None, last_modified_date=None, meta_data=None,
post_id=None):
raise NotImplementedError("This method needs to be implemented by "
"the inheriting class") | Persist the blog post data. If ``post_id`` is ``None`` or ``post_id``
is invalid, the post must be inserted into the storage. If ``post_id``
is a valid id, then the data must be updated.
:param title: The title of the blog post
:type title: str
:param text: The text of the blog post
:type text: str
:param user_id: The user identifier
:type user_id: str
:param tags: A list of tags
:type tags: list
:param draft: If the post is a draft of if needs to be published.
:type draft: bool
:param post_date: (Optional) The date the blog was posted (default
datetime.datetime.utcnow())
:type post_date: datetime.datetime
:param last_modified_date: (Optional) The date when blog was last
modified (default datetime.datetime.utcnow())
:type last_modified_date: datetime.datetime
:param meta_data: The meta data for the blog post
:type meta_data: dict
:param post_id: The post identifier. This should be ``None`` for an
insert call, and a valid value for update.
:type post_id: int
:return: The post_id value, in case of a successful insert or update.
Return ``None`` if there were errors. |
8,253 | def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable:
rows = self.rows
columns = self.columns
if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0:
rows[0] = rows[0][:]
columns = columns[:]
columns[0] = self._headers_color + columns[0]
rows[0][0] = Printer.NORMAL + str(rows[0][0])
table = PrettyTable(columns, border=border, max_width=get_console_width() - indent)
table.align = self._ALIGN_DICTIONARY[align]
for row in rows:
table.add_row(row)
for column, max_width in self._column_size_map.items():
table.max_width[column] = max_width
return table | Returns the table format of the scheme, i.e.:
<table name>
+----------------+----------------
| <field1> | <field2>...
+----------------+----------------
| value1(field1) | value1(field2)
| value2(field1) | value2(field2)
| value3(field1) | value3(field2)
+----------------+---------------- |
8,254 | def wait(self):
logging.info("waiting for {} jobs to complete".format(len(self.submissions)))
while not self.shutdown:
time.sleep(1) | Waits for all submitted jobs to complete. |
8,255 | def get_logistic_regression_coefs_l2(self, category,
clf=RidgeClassifierCV()):
try:
from sklearn.cross_validation import cross_val_predict
except:
from sklearn.model_selection import cross_val_predict
y = self._get_mask_from_category(category)
X = TfidfTransformer().fit_transform(self._X)
clf.fit(X, y)
y_hat = cross_val_predict(clf, X, y)
acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat)
return clf.coef_[0], acc, baseline | Computes l2-penalized logistic regression score.
Parameters
----------
category : str
category name to score
category : str
category name to score
Returns
-------
(coefficient array, accuracy, majority class baseline accuracy) |
8,256 | def _add_err(self, exinfo):
if self._err:
return
self._err = exinfo
self.all_ok = False | Sets the error on this MultiResult. Will be ignored if an error is
already set.
:param exinfo: Return value from ``sys.exc_info()`` |
8,257 | def continuous_binary_search(f, lo, hi, gap=1e-4):
while hi - lo > gap:
mid = (lo + hi) / 2.
if f(mid):
hi = mid
else:
lo = mid
return lo | Binary search for a function
:param f: boolean monotone function with f(hi) = True
:param int lo:
:param int hi: with hi >= lo
:param float gap:
:returns: first value x in [lo,hi] such that f(x),
x is computed up to some precision
:complexity: `O(log((hi-lo)/gap))` |
8,258 | def watch(self, flag):
lib.EnvSetDeftemplateWatch(self._env, int(flag), self._tpl) | Whether or not the Template is being watched. |
8,259 | def _multi_take(self, tup):
o = self.obj
d = {axis: self._get_listlike_indexer(key, axis)
for (key, axis) in zip(tup, o._AXIS_ORDERS)}
return o._reindex_with_indexers(d, copy=True, allow_dups=True) | Create the indexers for the passed tuple of keys, and execute the take
operation. This allows the take operation to be executed all at once -
rather than once for each dimension - improving efficiency.
Parameters
----------
tup : tuple
Tuple of indexers, one per axis
Returns
-------
values: same type as the object being indexed |
8,260 | def write_Bar(file, bar, bpm=120, repeat=0, verbose=False):
m = MidiFile()
t = MidiTrack(bpm)
m.tracks = [t]
while repeat >= 0:
t.play_Bar(bar)
repeat -= 1
return m.write_file(file, verbose) | Write a mingus.Bar to a MIDI file.
Both the key and the meter are written to the file as well. |
8,261 | def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test):
if outfile is None:
outfile = infile
else:
outfile = outfile
xgb_hyperparams = {: xgb_autotune, : 10, : 100, : 10, : 0.33}
xgb_params = {: 0.3, : 0, : 6, : 1, : 1, : 1, : 1, : 1, : 1, : 0, : 1, : 1, : , : 1, : }
xgb_params_space = {: hp.uniform(, 0.0, 0.3), : hp.uniform(, 0.0, 0.5), : hp.quniform(, 2, 8, 1), : hp.quniform(, 1, 5, 1), : 1, : 1, : 1, : 1, : hp.uniform(, 0.0, 1.0), : hp.uniform(, 0.0, 1.0), : 1.0, : 1, : , : 1, : }
if not apply_weights:
PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test).run()
else:
PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights).run() | Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data. |
8,262 | def get_container_metadata(self, container, prefix=None):
return self._manager.get_metadata(container, prefix=prefix) | Returns a dictionary containing the metadata for the container. |
8,263 | def print_topics(self, Nwords=10):
print( % Nwords)
for k, words in self.list_topics(Nwords):
print(unicode(k).ljust(3) + + .join(list(zip(*words))[0])) | Print the top ``Nwords`` words for each topic. |
8,264 | def absstart(self):
if hasattr(self, "docstart") and self.docstart > 0:
return self.docstart
else:
return self.start | Returns the absolute start of the element by including docstrings
outside of the element definition if applicable. |
8,265 | def main(mash_output, hash_cutoff, sample_id, assembly_file):
input_f = open(mash_output, "r")
master_dict = {}
for line in input_f:
tab_split = line.split("\t")
current_seq = tab_split[1].strip()
ref_accession = "_".join(tab_split[0].strip().split("_")[0:3])
mash_dist = tab_split[2].strip()
hashes_list = tab_split[-1].strip().split("/")
perc_hashes = float(hashes_list[0]) / float(hashes_list[1])
if ref_accession in master_dict.keys():
current_seq += ", {}".format(master_dict[ref_accession][-1])
if perc_hashes > float(hash_cutoff):
master_dict[ref_accession] = [
round(1 - float(mash_dist), 2),
round(perc_hashes, 2),
current_seq
]
send_to_output(master_dict, mash_output, sample_id, assembly_file) | Main function that allows to dump a mash dist txt file to a json file
Parameters
----------
mash_output: str
A string with the input file.
hash_cutoff: str
the percentage cutoff for the percentage of shared hashes between query
and plasmid in database that is allowed for the plasmid to be reported
to the results outputs
sample_id: str
The name of the sample. |
8,266 | def crossover_with(self, other, points=2):
assert isinstance(other, BitCondition)
assert len(self) == len(other)
template = BitString.crossover_template(len(self), points)
inv_template = ~template
bits1 = (self._bits & template) | (other._bits & inv_template)
mask1 = (self._mask & template) | (other._mask & inv_template)
bits2 = (self._bits & inv_template) | (other._bits & template)
mask2 = (self._mask & inv_template) | (other._mask & template)
return type(self)(bits1, mask1), type(self)(bits2, mask2) | Perform 2-point crossover on this bit condition and another of
the same length, returning the two resulting children.
Usage:
offspring1, offspring2 = condition1.crossover_with(condition2)
Arguments:
other: A second BitCondition of the same length as this one.
points: An int, the number of crossover points of the
crossover operation.
Return:
A tuple (condition1, condition2) of BitConditions, where the
value at each position of this BitCondition and the other is
preserved in one or the other of the two resulting conditions. |
8,267 | def clear(zpool, device=None):
**
target = []
target.append(zpool)
target.append(device)
res = __salt__[](
__utils__[](
command=,
target=target,
),
python_shell=False,
)
return __utils__[](res, ) | Clears device errors in a pool.
.. warning::
The device must not be part of an active pool configuration.
zpool : string
name of storage pool
device : string
(optional) specific device to clear
.. versionadded:: 2018.3.1
CLI Example:
.. code-block:: bash
salt '*' zpool.clear mypool
salt '*' zpool.clear mypool /path/to/dev |
8,268 | def _get_seqprop_to_seqprop_alignment(self, seqprop1, seqprop2):
if isinstance(seqprop1, str):
seqprop1_id = seqprop1
else:
seqprop1_id = seqprop1.id
if isinstance(seqprop2, str):
seqprop2_id = seqprop2
else:
seqprop2_id = seqprop2.id
aln_id = .format(seqprop1_id, seqprop2_id)
if self.sequence_alignments.has_id(aln_id):
alignment = self.sequence_alignments.get_by_id(aln_id)
return alignment
else:
raise ValueError(.format(aln_id)) | Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop |
8,269 | def clip_image(image, clip_min, clip_max):
return np.minimum(np.maximum(clip_min, image), clip_max) | Clip an image, or an image batch, with upper and lower threshold. |
8,270 | def last_in_date_group(df,
data_query_cutoff_times,
assets,
reindex=True,
have_sids=True,
extra_groupers=None):
idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted(
df[TS_FIELD_NAME].values,
)]]
if have_sids:
idx += [SID_FIELD_NAME]
if extra_groupers is None:
extra_groupers = []
idx += extra_groupers
last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby(
idx,
sort=False,
).last()
for _ in range(len(idx) - 1):
last_in_group = last_in_group.unstack(-1)
if reindex:
if have_sids:
cols = last_in_group.columns
last_in_group = last_in_group.reindex(
index=data_query_cutoff_times,
columns=pd.MultiIndex.from_product(
tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,),
names=cols.names,
),
)
else:
last_in_group = last_in_group.reindex(data_query_cutoff_times)
return last_in_group | Determine the last piece of information known on each date in the date
index for each group. Input df MUST be sorted such that the correct last
item is chosen from each group.
Parameters
----------
df : pd.DataFrame
The DataFrame containing the data to be grouped. Must be sorted so that
the correct last item is chosen from each group.
data_query_cutoff_times : pd.DatetimeIndex
The dates to use for grouping and reindexing.
assets : pd.Int64Index
The assets that should be included in the column multiindex.
reindex : bool
Whether or not the DataFrame should be reindexed against the date
index. This will add back any dates to the index that were grouped
away.
have_sids : bool
Whether or not the DataFrame has sids. If it does, they will be used
in the groupby.
extra_groupers : list of str
Any extra field names that should be included in the groupby.
Returns
-------
last_in_group : pd.DataFrame
A DataFrame with dates as the index and fields used in the groupby as
levels of a multiindex of columns. |
8,271 | def get_name_init(self, name):
self._register_name(name)
return self._var_name_mappers[name].get_init() | Get initial name of symbol. |
8,272 | def get_composition_lookup_session_for_repository(self, repository_id, proxy):
if repository_id is None:
raise NullArgument()
if not self.supports_composition_lookup():
raise Unimplemented()
try:
from . import sessions
except ImportError:
raise
proxy = self._convert_proxy(proxy)
try:
session = sessions.CompositionLookupSession(repository_id, proxy, runtime=self._runtime)
except AttributeError:
raise
return session | Gets the OsidSession associated with the composition lookup
service for the given repository.
arg: repository_id (osid.id.Id): the Id of the repository
arg proxy (osid.proxy.Proxy): a proxy
return: (osid.repository.CompositionLookupSession) - the new
CompositionLookupSession
raise: NotFound - repository_id not found
raise: NullArgument - repository_id is null
raise: OperationFailed - unable to complete request
raise: Unimplemented - supports_composition_lookup() or
supports_visible_federation() is false
compliance: optional - This method must be implemented if
supports_composition_lookup() and
supports_visible_federation() are true. |
8,273 | def _open(self, mode=):
open_file = None
writeable = in mode or in mode or in mode
try:
if (self.filename.startswith()
or self.filename.startswith()):
open_file = fs.opener.fsopendir(self.filename).open(,
mode)
else:
if not hasattr(self, ):
self._pyfs, self._path = fs.opener.opener.parse(
self.filename, writeable=writeable)
if self._cache_timeout is not None:
self._pyfs = fs.remote.CacheFS(
self._pyfs, cache_timeout=self._cache_timeout)
open_file = self._pyfs.open(self._path, mode)
except fs.errors.ResourceNotFoundError:
if self._can_create:
segments = fs.opener.opener.split_segments(self.filename)
if segments:
fs_name, credentials, url1, url2, path = segments.groups()
assert fs_name,
host =
if in url2:
split_url2 = url2.split(, 1)
if len(split_url2) > 1:
url2 = split_url2[1]
else:
url2 =
host = split_url2[0]
pyfs = fs.opener.opener.opendir(
% (fs_name, host))
if self._cache_timeout is not None:
pyfs = fs.remote.CacheFS(
pyfs, cache_timeout=self._cache_timeout)
url2_path, url2_filename = os.path.split(url2)
if url2_path and not pyfs.exists(url2_path):
pyfs.makedir(url2_path, recursive=True)
else:
full_url = fs.opener._expand_syspath(self.filename)
url2_path, url2 = os.path.split(full_url)
pyfs = fs.osfs.OSFS(url2_path)
try:
self._pyfs = pyfs
self._path = url2
return pyfs.open(url2, mode)
except fs.errors.ResourceNotFoundError:
if writeable:
raise
else:
pass
if writeable:
raise
else:
pass
return open_file | Open the password file in the specified mode |
8,274 | def _heartbeat_manager(self):
if self._process and not self._process.is_alive() and not self.done:
self.start() | Heartbeat DAG file processor and start it if it is not alive.
:return: |
8,275 | def _make_readline_peeker(self):
counter = itertools.count(0)
def readline():
try:
return self._peek_buffer(next(counter))
except StopIteration:
return
return readline | Make a readline-like function which peeks into the source. |
8,276 | def parse_data(self, data, msg_signature=None, timestamp=None, nonce=None):
result = {}
if isinstance(data, six.text_type):
data = data.encode()
if self.conf.encrypt_mode == :
if not (msg_signature and timestamp and nonce):
raise ParseError()
data = self.conf.crypto.decrypt_message(
msg=data,
msg_signature=msg_signature,
timestamp=timestamp,
nonce=nonce,
)
try:
xml = XMLStore(xmlstring=data)
except Exception:
raise ParseError()
result = xml.xml2dict
result[] = data
result[] = result.pop().lower()
message_type = MESSAGE_TYPES.get(result[], UnknownMessage)
self.__message = message_type(result)
self.__is_parse = True | 解析微信服务器发送过来的数据并保存类中
:param data: HTTP Request 的 Body 数据
:param msg_signature: EncodingAESKey 的 msg_signature
:param timestamp: EncodingAESKey 用时间戳
:param nonce: EncodingAESKey 用随机数
:raises ParseError: 解析微信服务器数据错误, 数据不合法 |
8,277 | def _map_type_to_dict(self, type_name):
root = self._root_instance
if type_name == RESULT:
return root._results
elif type_name == PARAMETER:
return root._parameters
elif type_name == DERIVED_PARAMETER:
return root._derived_parameters
elif type_name == CONFIG:
return root._config
elif type_name == LEAF:
return root._other_leaves
else:
raise RuntimeError() | Maps a an instance type representation string (e.g. 'RESULT')
to the corresponding dictionary in root. |
8,278 | def unix_ts(dtval):
s int(datetime.timestamp()).
:param dt: datetime to convert
'
epoch = datetime(1970, 1, 1, 0, 0, tzinfo=tzutc())
delta = (dtval - epoch)
return delta.days * 24 * 3600 + delta.seconds | Convert datetime into a unix timestamp.
This is the equivalent to Python 3's int(datetime.timestamp()).
:param dt: datetime to convert |
8,279 | def Lexicon(**rules):
for (lhs, rhs) in rules.items():
rules[lhs] = [word.strip() for word in rhs.split()]
return rules | Create a dictionary mapping symbols to alternative words.
>>> Lexicon(Art = "the | a | an")
{'Art': ['the', 'a', 'an']} |
8,280 | def erfcc(x):
z = abs(x)
t = 1.0 / (1.0 + 0.5 * z)
ans = t * math.exp(
-z * z - 1.26551223 + t * (1.00002368 + t * (0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (-0.82215223 + t * 0.17087277)))))))))
if x >= 0:
return ans
else:
return 2.0 - ans | Returns the complementary error function erfc(x) with fractional
error everywhere less than 1.2e-7. Adapted from Numerical Recipies.
Usage: lerfcc(x) |
8,281 | async def _wrap_gen(self, ID: str):
while True:
result = await self._gens_queue[ID].get()
if isinstance(result, StopAsyncIteration):
del self._gens_queue[ID]
break
else:
yield result | 异步迭代器包装.
Parameters:
ID (str): - 任务ID
Yield:
(Any): - 从异步迭代器结果队列中获取的结果
Raise:
(StopAsyncIteration): - 异步迭代器终止时抛出该异常 |
8,282 | def showMenu(self, point=None):
menu = self.createMenu(self)
menu.exec_(QtGui.QCursor.pos())
menu.deleteLater() | Displays the menu for this view widget.
:param point | <QPoint> |
8,283 | def cpp_best_split_full_model(X, Uy, C, S, U, noderange, delta,
save_memory=False):
return CSP.best_split_full_model(X, Uy, C, S, U, noderange, delta) | wrappe calling cpp splitting function |
8,284 | def split_leading_indent(line, max_indents=None):
indent = ""
while (
(max_indents is None or max_indents > 0)
and line.startswith((openindent, closeindent))
) or line.lstrip() != line:
if max_indents is not None and line.startswith((openindent, closeindent)):
max_indents -= 1
indent += line[0]
line = line[1:]
return indent, line | Split line into leading indent and main. |
8,285 | def closure(self):
def single_var(var):
"Checks if var represents a single variable"
if not hasattr(var, ):
return True
else:
return len(var) == 1
def sg0(ind):
"Symmetry rule: -> "
return IndependenceAssertion(ind.event2, ind.event1, ind.event3)
def apply_left_and_right(func):
def symmetric_func(*args):
if len(args) == 1:
return func(args[0]) + func(sg0(args[0]))
if len(args) == 2:
return (func(*args) + func(args[0], sg0(args[1])) +
func(sg0(args[0]), args[1]) + func(sg0(args[0]), sg0(args[1])))
return symmetric_func
@apply_left_and_right
def sg1(ind):
"Decomposition rule: -> , "
if single_var(ind.event2):
return []
else:
return [IndependenceAssertion(ind.event1, ind.event2 - {elem}, ind.event3)
for elem in ind.event2]
@apply_left_and_right
def sg2(ind):
"Weak Union rule: -> , "
if single_var(ind.event2):
return []
else:
return [IndependenceAssertion(ind.event1, ind.event2 - {elem}, {elem} | ind.event3)
for elem in ind.event2]
@apply_left_and_right
def sg3(ind1, ind2):
"Contraction rule: & -> "
if ind1.event1 != ind2.event1:
return []
Y = ind2.event2
Z = ind2.event3
Y_Z = ind1.event3
if Y < Y_Z and Z < Y_Z and Y.isdisjoint(Z):
return [IndependenceAssertion(ind1.event1, ind1.event2 | Y, Z)]
else:
return []
all_independencies = set()
new_inds = set(self.independencies)
while new_inds:
new_pairs = (set(itertools.permutations(new_inds, 2)) |
set(itertools.product(new_inds, all_independencies)) |
set(itertools.product(all_independencies, new_inds)))
all_independencies |= new_inds
new_inds = set(sum([sg1(ind) for ind in new_inds] +
[sg2(ind) for ind in new_inds] +
[sg3(*inds) for inds in new_pairs], []))
new_inds -= all_independencies
return Independencies(*list(all_independencies)) | Returns a new `Independencies()`-object that additionally contains those `IndependenceAssertions`
that are implied by the the current independencies (using with the `semi-graphoid axioms
<https://en.wikipedia.org/w/index.php?title=Conditional_independence&oldid=708760689#Rules_of_conditional_independence>`_;
see (Pearl, 1989, `Conditional Independence and its representations
<http://www.cs.technion.ac.il/~dang/journal_papers/pearl1989conditional.pdf>`_)).
Might be very slow if more than six variables are involved.
Examples
--------
>>> from pgmpy.independencies import Independencies
>>> ind1 = Independencies(('A', ['B', 'C'], 'D'))
>>> ind1.closure()
(A _|_ B | D, C)
(A _|_ B, C | D)
(A _|_ B | D)
(A _|_ C | D, B)
(A _|_ C | D)
>>> ind2 = Independencies(('W', ['X', 'Y', 'Z']))
>>> ind2.closure()
(W _|_ Y)
(W _|_ Y | X)
(W _|_ Z | Y)
(W _|_ Z, X, Y)
(W _|_ Z)
(W _|_ Z, X)
(W _|_ X, Y)
(W _|_ Z | X)
(W _|_ Z, Y | X)
[..] |
8,286 | def cut_gmail_quote(html_message):
gmail_quote = cssselect(, html_message)
if gmail_quote and (gmail_quote[0].text is None or not RE_FWD.match(gmail_quote[0].text)):
gmail_quote[0].getparent().remove(gmail_quote[0])
return True | Cuts the outermost block element with class gmail_quote. |
8,287 | def http_basic_auth_get_user(request):
try:
if user_is_authenticated(request.user):
return request.user
except AttributeError:
pass
if in request.META:
auth_data = request.META[].split()
if len(auth_data) == 2 and auth_data[0].lower() == "basic":
uname, passwd = base64.b64decode(auth_data[1]).decode().split()
django_user = authenticate(username=uname, password=passwd)
if django_user is not None:
login(request, django_user)
try:
return request.user
except AttributeError:
return AnonymousUser() | Inspect the given request to find a logged user. If not found, the header HTTP_AUTHORIZATION
is read for 'Basic Auth' login and password, and try to authenticate against default UserModel.
Always return a User instance (possibly anonymous, meaning authentication failed) |
8,288 | def html2groff(data, name):
try:
data = data[data.index():]
except ValueError:
pass
for rp in pre_rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
for table in re.findall(r, data, re.S):
tbl = parse_table(escape_pre_section(table))
tbl = re.compile(r, re.S).sub(r, tbl)
data = data.replace(table, tbl)
for rp in rps:
data = re.compile(rp[0], rp[2]).sub(rp[1], data)
for st in re.findall(r, data):
data = data.replace(st, st.upper())
page_type = re.search(r, data)
if page_type and in page_type.group(1):
class_name = re.search(r, data).group(1)
secs = re.findall(r, data, re.S)
for sec, content in secs:
if ( in sec and
not in sec and
not in sec and
sec != ):
content2 = re.sub(r, r
% class_name, content)
content2 = re.sub(r, r % class_name,
content2)
content2 = re.sub(r, r % class_name,
content2)
data = data.replace(content, content2)
elif in sec and in sec:
inherit = re.search(r,
sec).group(1).lower()
content2 = re.sub(r, r
% inherit, content)
data = data.replace(content, content2)
data = data.replace(, )
return data | Convert HTML text from cplusplus.com to Groff-formatted text. |
8,289 | def write_Composition(composition, filename, zip=False):
text = from_Composition(composition)
if not zip:
f = open(filename + , )
f.write(text)
f.close()
else:
import zipfile
import os
zf = zipfile.ZipFile(filename + , mode=,
compression=zipfile.ZIP_DEFLATED)
zi = zipfile.ZipInfo( + os.sep + )
zi.external_attr = 0660 << 16L
zf.writestr(zi,
"<?xml version= encoding=?>"
"<container><rootfiles><rootfile full-path=/>"
"</rootfiles></container>".format(filename))
zi = zipfile.ZipInfo(filename + )
zi.external_attr = 0660 << 16L
zf.writestr(zi, text)
zf.close() | Create an XML file (or MXL if compressed) for a given composition. |
8,290 | def run(self):
x, y = 1,0
num_steps = 0
while self.s.get_state() != :
self.s.command({:, :, :[x, y]}, self.a1)
self.s.command({:, :, :[x, y+1]}, self.a2)
num_steps += 1
if num_steps >= 3:
break
for a in self.s.agents:
print(a.name, , a.coords[], a.coords[]) | This AI simple moves the characters towards the opposite
edges of the grid for 3 steps or until event halts the
simulation |
8,291 | def do_edit_settings(fake):
path = resources.user.open().name
click.echo()
for (option, _, description) in legit_settings.config_defaults:
click.echo(columns([crayons.yellow(option), 25], [description, None]))
click.echo("")
if fake:
click.echo(crayons.red(.format(path)))
else:
click.edit(path) | Opens legit settings in editor. |
8,292 | def stop(self):
if self.stream and self.stream.session.state != STATE_STOPPED:
self.stream.stop() | Stop stream. |
8,293 | def _to_bytes(self, data, key=, expired=None, noc=0, ncalls=0):
data_tuple = (data, expired, noc, ncalls)
if not can_encrypt and key:
warnings.warn("Pycrypto is not installed. The data will not be encrypted",
UserWarning)
result = encode_safely(data_tuple)
elif can_encrypt and key:
if PY3:
cipher = AESCipher(key.encode(settings.DEFAULT_ENCODING))
else:
cipher = AESCipher(key)
result = cipher.encrypt(encode_safely(data_tuple))
else:
result = encode_safely(data_tuple)
return result | Serialize (and encrypt if `key` is provided) the data and represent it as string.
**Parameters**
:param data: any python serializable (pickable) object
:param key: If the key is provided and `pycrypto` is installed, cached
data will be encrypted (If `pycrypto` is not installed, this #TODO: pycrypto or something else?!
parameter will be ignored). Empty string by default.
:param expired: exact date when the cache will be expired; It is `None` by default
:param noc: the number of allowed calls; TODO: Clarify what does it mean, exactly?!!!!
:param ncalls: What is it; I don't understand!!! TODO: clarify this!!!!
:type key: str
:type expired: `datetime` or `None`
:type noc: int
:type ncalls: int
:returns: serialized data
:rtype: str |
8,294 | def cprint(color, prefix, message):
message = message or ""
prefix = prefix or ""
print((Console.theme[color] +
prefix +
message +
Console.theme[])) | prints a message in a given color
:param color: the color as defined in the theme
:param prefix: the prefix (a string)
:param message: the message
:return: |
8,295 | def info(name):
s available, or OpenSSL text if not
:param name: CommonName of cert
CLI example:
.. code-block:: bash
salt acme.info dev.example.com
certtls.cert_infotls.cert_infoextensionsextensionsopenssl x509 -in {0} -noout -textcmd.runquiet') | Return information about a certificate
.. note::
Will output tls.cert_info if that's available, or OpenSSL text if not
:param name: CommonName of cert
CLI example:
.. code-block:: bash
salt 'gitlab.example.com' acme.info dev.example.com |
8,296 | def nodeListGetString(self, list, inLine):
if list is None: list__o = None
else: list__o = list._o
ret = libxml2mod.xmlNodeListGetString(self._o, list__o, inLine)
return ret | Build the string equivalent to the text contained in the
Node list made of TEXTs and ENTITY_REFs |
8,297 | def heat_process(body, message):
event_type = body[]
process = heat_customer_process.get(event_type)
if process is not None:
process(body, message)
else:
matched = False
process_wildcard = None
for pattern in heat_customer_process_wildcard.keys():
if pattern.match(event_type):
process_wildcard = heat_customer_process_wildcard.get(pattern)
matched = True
break
if matched:
process_wildcard(body, message)
else:
default_process(body, message)
message.ack() | This function deal with the heat notification.
First, find process from customer_process that not include wildcard.
if not find from customer_process, then find process from customer_process_wildcard.
if not find from customer_process_wildcard, then use ternya default process.
:param body: dict of openstack notification.
:param message: kombu Message class
:return: |
8,298 | def walk_train_dirs(root_dir: str) -> Iterable[Tuple[str, Iterable[str]]]:
if is_train_dir(root_dir):
yield , [root_dir]
return
for dir_, subdirs, _ in os.walk(root_dir, topdown=True):
train_subdirs = [subdir for subdir in subdirs if is_train_dir(path.join(dir_, subdir))]
for subdir in train_subdirs:
subdirs.remove(subdir)
yield dir_, train_subdirs | Modify os.walk with the following:
- return only root_dir and sub-dirs
- return only training sub-dirs
- stop recursion at training dirs
:param root_dir: root dir to be walked
:return: generator of (root_dir, training sub-dirs) pairs |
8,299 | def _remove_germline_filter(rec, name):
if _is_germline(rec):
if rec.FILTER and name in rec.FILTER:
return vcfutils.cyvcf_remove_filter(rec, name)
elif not _is_somatic(rec):
if rec.FILTER and name in rec.FILTER:
return vcfutils.cyvcf_remove_filter(rec, name)
return rec | Check if germline based on STATUS/SS and REJECT flag.
Handles VarDict, FreeBayes, MuTect, MuTect2 and VarScan. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.