Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
13,700 | def load_spacy_rule(file_path: str) -> Dict:
with open(file_path) as fp:
return json.load(fp) | A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules |
13,701 | def _generate_union_cstor_funcs(self, union):
for field in union.all_fields:
enum_field_name = fmt_enum_name(field.name, union)
func_args = [] if is_void_type(
field.data_type) else fmt_func_args_from_fields([field])
with self.block_func(
func=self._cstor_name_from_field(field),
args=func_args,
return_type=):
self.emit()
with self.block_init():
self.emit(.format(enum_field_name))
if not is_void_type(field.data_type):
self.emit(.format(
fmt_var(field.name), fmt_var(field.name)))
self.emit() | Emits standard union constructor. |
13,702 | def pick_config_ids(device_type, os, navigator):
if os is None:
default_dev_types = []
else:
default_dev_types = list(DEVICE_TYPE_OS.keys())
dev_type_choices = get_option_choices(
, device_type, default_dev_types,
list(DEVICE_TYPE_OS.keys())
)
os_choices = get_option_choices(, os, list(OS_NAVIGATOR.keys()),
list(OS_NAVIGATOR.keys()))
nav_choices = get_option_choices(, navigator,
list(NAVIGATOR_OS.keys()),
list(NAVIGATOR_OS.keys()))
variants = []
for dev, os, nav in product(dev_type_choices, os_choices,
nav_choices):
if (os in DEVICE_TYPE_OS[dev]
and nav in DEVICE_TYPE_NAVIGATOR[dev]
and nav in OS_NAVIGATOR[os]):
variants.append((dev, os, nav))
if not variants:
raise InvalidOption(
)
device_type, os_id, navigator_id = choice(variants)
assert os_id in OS_PLATFORM
assert navigator_id in NAVIGATOR_OS
assert device_type in DEVICE_TYPE_OS
return device_type, os_id, navigator_id | Select one random pair (device_type, os_id, navigator_id) from
all possible combinations matching the given os and
navigator filters.
:param os: allowed os(es)
:type os: string or list/tuple or None
:param navigator: allowed browser engine(s)
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "tablet", "all" |
13,703 | def gsea_compute(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
w = weighted_score_type
subsets = sorted(gmt.keys())
es = []
RES=[]
hit_ind=[]
esnull = [ [] for a in range(len(subsets)) ]
logging.debug("Start to compute enrichment scores......................")
if permutation_type == "phenotype":
logging.debug("Start to permutate classes..............................")
rs = np.random.RandomState(seed)
genes_mat, cor_mat = ranking_metric_tensor(exprs=data, method=method,
permutation_num=n,
pos=pheno_pos, neg=pheno_neg,
classes=classes,
ascending=ascending, rs=rs)
logging.debug("Start to compute enrichment nulls.......................")
es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=genes_mat,
cor_mat=cor_mat,
gene_sets=gmt,
weighted_score_type=w,
nperm=n, rs=rs,
single=False, scale=False,)
else:
gl, cor_vec = data.index.values, data.values
logging.debug("Start to compute es and esnulls........................")
temp_esnu=[]
pool_esnu = Pool(processes=processes)
for subset in subsets:
rs = np.random.RandomState(seed)
temp_esnu.append(pool_esnu.apply_async(enrichment_score,
args=(gl, cor_vec, gmt.get(subset), w,
n, rs, single, scale)))
pool_esnu.close()
pool_esnu.join()
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp.get()
esnull[si] = enu
es.append(e)
RES.append(rune)
hit_ind.append(hit)
return gsea_significance(es, esnull), hit_ind, RES, subsets | compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms |
13,704 | def count(cls, slug):
from .models import Content
cnt = cls._cache.get(slug)
if cnt is None:
cnt = Content.search_objects.search(tags=[slug]).count()
cls._cache[slug] = cnt
return cnt | get the number of objects in the cache for a given slug
:param slug: cache key
:return: `int` |
13,705 | def find_sanitizer(self, name):
name_parts = name.split(".")
if len(name_parts) < 2:
raise ConfigurationError(
"Unable to separate module name from function name in " % (
name,
),
)
module_name_suffix = ".".join(name_parts[:-1])
function_name = "sanitize_%s" % (name_parts[-1],)
module_name = "sanitizers.%s" % (module_name_suffix,)
callback = self.find_sanitizer_from_module(
module_name=module_name,
function_name=function_name,
)
if callback:
return callback
for addon_package_name in self.addon_packages:
module_name = "%s.%s" % (
addon_package_name,
module_name_suffix,
)
callback = self.find_sanitizer_from_module(
module_name=module_name,
function_name=function_name,
)
if callback:
return callback
module_name = "database_sanitizer.sanitizers.%s" % (module_name_suffix,)
callback = self.find_sanitizer_from_module(
module_name=module_name,
function_name=function_name,
)
if callback:
return callback
raise ConfigurationError("Unable to find sanitizer called " % (
name,
)) | Searches for a sanitizer function with given name. The name should
contain two parts separated from each other with a dot, the first
part being the module name while the second being name of the function
contained in the module, when it's being prefixed with "sanitize_".
The lookup process consists from three attempts, which are:
1. First package to look the module will be top level package called
"sanitizers".
2. Module will be looked under the "addon" packages, if they have been
defined.
3. Finally the sanitation function will be looked from the builtin
sanitizers located in "database_sanitizer.sanitizers" package.
If none of these provide any results, ConfigurationError will be
thrown.
:param name: "Full name" of the sanitation function containing name
of the module as well as name of the function.
:type name: str
:return: First function which can be imported with the given name.
:rtype: callable |
13,706 | def GetList(self):
soap_request = soap()
soap_request.add_parameter(, self.listName)
self.last_request = str(soap_request)
response = self._session.post(url=self._url(),
headers=self._headers(),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode(), parser=etree.XMLParser(huge_tree=self.huge_tree))
_list = envelope[0][0][0][0]
info = {key: value for (key, value) in _list.items()}
for row in _list[0].getchildren():
self.fields.append({key: value for (key, value) in row.items()})
for setting in _list[1].getchildren():
self.regional_settings[
setting.tag.strip()] = setting.text
for setting in _list[2].getchildren():
self.server_settings[
setting.tag.strip()] = setting.text
fields = envelope[0][0][0][0][0]
else:
raise Exception("ERROR:", response.status_code, response.text) | Get Info on Current List
This is run in __init__ so you don't
have to run it again.
Access from self.schema |
13,707 | def com_google_fonts_check_name_postscriptname(ttFont, style, familyname):
from fontbakery.utils import name_entry_id
failed = False
for name in ttFont[].names:
if name.nameID == NameID.POSTSCRIPT_NAME:
expected_value = f"{familyname}-{style}"
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
yield FAIL, ("Entry {} on the table: "
"Expected "
"but got .").format(name_entry_id(name),
expected_value,
string)
if not failed:
yield PASS, "POSTCRIPT_NAME entries are all good." | Check name table: POSTSCRIPT_NAME entries. |
13,708 | def hydrate(self, values):
def hydrate_(obj):
if isinstance(obj, Structure):
try:
f = self.hydration_functions[obj.tag]
except KeyError:
return obj
else:
return f(*map(hydrate_, obj.fields))
elif isinstance(obj, list):
return list(map(hydrate_, obj))
elif isinstance(obj, dict):
return {key: hydrate_(value) for key, value in obj.items()}
else:
return obj
return tuple(map(hydrate_, values)) | Convert PackStream values into native values. |
13,709 | def get_alias(self,
alias=None,
manifest=None,
verify=True,
sizes=False,
dcd=None):
return self._get_alias(alias, manifest, verify, sizes, dcd, False) | Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:type manifest: str
:param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).
:type verify: bool
:param sizes: Whether to return sizes of the blobs along with their hashes
:type sizes: bool
:param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.
:type dcd: str
:rtype: list
:returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob. |
13,710 | def parse_value(self, tup_tree):
self.check_node(tup_tree, , (), (), (), allow_pcdata=True)
return self.pcdata(tup_tree) | Parse a VALUE element and return its text content as a unicode string.
Whitespace is preserved.
The conversion of the text representation of the value to a CIM data
type object requires CIM type information which is not available on the
VALUE element and therefore will be done when parsing higher level
elements that have that information.
::
<!ELEMENT VALUE (#PCDATA)> |
13,711 | def html(self):
try:
from lxml import html
return html.fromstring(self.content)
except ImportError as ie:
raise DependencyException(ie) | Create an ``lxml``-based HTML DOM from the response. The tree
will not have a root, so all queries need to be relative
(i.e. start with a dot). |
13,712 | def _execute_callback(self, status, message, job, res, err, stacktrace):
if self._callback is not None:
try:
self._logger.info()
self._callback(status, message, job, res, err, stacktrace)
except Exception as e:
self._logger.exception(
.format(e)) | Execute the callback.
:param status: Job status. Possible values are "invalid" (job could not
be deserialized or was malformed), "failure" (job raised an error),
"timeout" (job timed out), or "success" (job finished successfully
and returned a result).
:type status: str
:param message: Kafka message.
:type message: :doc:`kq.Message <message>`
:param job: Job object, or None if **status** was "invalid".
:type job: kq.Job
:param res: Job result, or None if an exception was raised.
:type res: object | None
:param err: Exception raised by job, or None if there was none.
:type err: Exception | None
:param stacktrace: Exception traceback, or None if there was none.
:type stacktrace: str | None |
13,713 | def parse(self):
result = []
z = None
before_line_number, after_line_number = 0, 0
position = 0
for line in self.diff_text.splitlines():
match = re.search(r
r, line)
if match is not None:
if z is not None:
result.append(z)
z = Entry(match.group(),
match.group())
position = 0
continue
if self.should_skip_line(line):
continue
header = diff_re.search(line)
if header is not None:
before_line_number = int(header.group())
after_line_number = int(header.group())
position += 1
continue
if line.startswith():
z.new_removed(Line(before_line_number, position, line[1:]))
z.new_origin(Line(before_line_number, position, line[1:]))
before_line_number += 1
elif line.startswith():
z.new_added(Line(after_line_number, position, line[1:]))
z.new_result(Line(after_line_number, position, line[1:]))
after_line_number += 1
else:
z.new_origin(Line(before_line_number, position, line[1:]))
z.new_result(Line(after_line_number, position, line[1:]))
before_line_number += 1
after_line_number += 1
position += 1
if z is not None:
result.append(z)
return result | Parses everyting into a datastructure that looks like:
result = [{
'origin_filename': '',
'result_filename': '',
'origin_lines': [], // all lines of the original file
'result_lines': [], // all lines of the newest file
'added_lines': [], // all lines added to the result file
'removed_lines': [], // all lines removed from the result file
}, ...] |
13,714 | def check_tweet(tweet, validation_checking=False):
if "id" not in tweet:
raise NotATweetError("This text has no key")
original_format = is_original_format(tweet)
if original_format:
_check_original_format_tweet(tweet, validation_checking=validation_checking)
else:
_check_activity_streams_tweet(tweet, validation_checking=validation_checking)
return original_format | Ensures a tweet is valid and determines the type of format for the tweet.
Args:
tweet (dict/Tweet): the tweet payload
validation_checking (bool): check for valid key structure in a tweet. |
13,715 | def _marshall_value(value):
if PYTHON3 and isinstance(value, bytes):
return {: base64.b64encode(value).decode()}
elif PYTHON3 and isinstance(value, str):
return {: value}
elif not PYTHON3 and isinstance(value, str):
if is_binary(value):
return {: base64.b64encode(value).decode()}
return {: value}
elif not PYTHON3 and isinstance(value, unicode):
return {: value.encode()}
elif isinstance(value, dict):
return {: marshall(value)}
elif isinstance(value, bool):
return {: value}
elif isinstance(value, (int, float)):
return {: str(value)}
elif isinstance(value, datetime.datetime):
return {: value.isoformat()}
elif isinstance(value, uuid.UUID):
return {: str(value)}
elif isinstance(value, list):
return {: [_marshall_value(v) for v in value]}
elif isinstance(value, set):
if PYTHON3 and all([isinstance(v, bytes) for v in value]):
return {: _encode_binary_set(value)}
elif PYTHON3 and all([isinstance(v, str) for v in value]):
return {: sorted(list(value))}
elif all([isinstance(v, (int, float)) for v in value]):
return {: sorted([str(v) for v in value])}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) for v in value]):
return {: _encode_binary_set(value)}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) is False for v in value]):
return {: sorted(list(value))}
else:
raise ValueError()
elif value is None:
return {: True}
raise ValueError( % type(value)) | Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required. |
13,716 | def resourceprep(string, allow_unassigned=False):
chars = list(string)
_resourceprep_do_mapping(chars)
do_normalization(chars)
check_prohibited_output(
chars,
(
stringprep.in_table_c12,
stringprep.in_table_c21,
stringprep.in_table_c22,
stringprep.in_table_c3,
stringprep.in_table_c4,
stringprep.in_table_c5,
stringprep.in_table_c6,
stringprep.in_table_c7,
stringprep.in_table_c8,
stringprep.in_table_c9,
))
check_bidi(chars)
if not allow_unassigned:
check_unassigned(
chars,
(
stringprep.in_table_a1,
)
)
return "".join(chars) | Process the given `string` using the Resourceprep (`RFC 6122`_) profile. In
the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError`
is raised. |
13,717 | def _client_wrapper(attr, *args, **kwargs):
catch_api_errors = kwargs.pop(, True)
func = getattr(__context__[], attr, None)
if func is None or not hasattr(func, ):
raise SaltInvocationError({0}\.format(attr))
if attr in (, ):
try:
__context__[].reload_config()
except AttributeError:
pass
err =
try:
log.debug(
s "%s" function with args=%s and kwargs=%sError {0}: {1}re here, itUnable to perform {0}: {0}'.format(err)
raise CommandExecutionError(msg) | Common functionality for running low-level API calls |
13,718 | def __clear_bp(self, aProcess):
lpAddress = self.get_address()
flNewProtect = aProcess.mquery(lpAddress).Protect
flNewProtect = flNewProtect & (0xFFFFFFFF ^ win32.PAGE_GUARD)
aProcess.mprotect(lpAddress, self.get_size(), flNewProtect) | Restores the original permissions of the target pages.
@type aProcess: L{Process}
@param aProcess: Process object. |
13,719 | def cache_url(url, model_dir=None, progress=True):
r
if model_dir is None:
torch_home = os.path.expanduser(os.getenv(, ))
model_dir = os.getenv(, os.path.join(torch_home, ))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write(.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file | r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth') |
13,720 | def extract(self, member, path="", set_attrs=True):
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e) | Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False. |
13,721 | def is_compatible_assembly_level(self, ncbi_assembly_level):
configured_ncbi_strings = [self._LEVELS[level] for level in self.assembly_level]
return ncbi_assembly_level in configured_ncbi_strings | Check if a given ncbi assembly level string matches the configured assembly levels. |
13,722 | def add(self, game_object: Hashable, tags: Iterable[Hashable]=()) -> None:
if isinstance(tags, (str, bytes)):
raise TypeError("You passed a string instead of an iterable, this probably isn't what you intended.\n\nTry making it a tuple.")
self.all.add(game_object)
for kind in type(game_object).mro():
self.kinds[kind].add(game_object)
for tag in tags:
self.tags[tag].add(game_object) | Add a game_object to the container.
game_object: Any Hashable object. The item to be added.
tags: An iterable of Hashable objects. Values that can be used to
retrieve a group containing the game_object.
Examples:
container.add(MyObject())
container.add(MyObject(), tags=("red", "blue") |
13,723 | def spkcov(spk, idcode, cover=None):
spk = stypes.stringToCharP(spk)
idcode = ctypes.c_int(idcode)
if cover is None:
cover = stypes.SPICEDOUBLE_CELL(2000)
else:
assert isinstance(cover, stypes.SpiceCell)
assert cover.is_double()
libspice.spkcov_c(spk, idcode, ctypes.byref(cover))
return cover | Find the coverage window for a specified ephemeris object in a
specified SPK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcov_c.html
:param spk: Name of SPK file.
:type spk: str
:param idcode: ID code of ephemeris object.
:type idcode: int
:param cover: Optional SPICE Window giving coverage in "spk" for "idcode".
:type cover: spiceypy.utils.support_types.SpiceCell |
13,724 | def preTranslate(self, tx, ty):
self.e += tx * self.a + ty * self.c
self.f += tx * self.b + ty * self.d
return self | Calculate pre translation and replace current matrix. |
13,725 | def _quantile_function(self, alpha=0.5, smallest_count=None):
total = float(self.total())
smallest_observed_count = min(itervalues(self))
if smallest_count is None:
smallest_count = smallest_observed_count
else:
smallest_count = min(smallest_count, smallest_observed_count)
beta = alpha * smallest_count
debug_plot = []
cumulative_sum = 0.0
inverse = sortedcontainers.SortedDict()
for value, count in iteritems(self):
debug_plot.append((cumulative_sum / total, value))
inverse[(cumulative_sum + beta) / total] = value
cumulative_sum += count
inverse[(cumulative_sum - beta) / total] = value
debug_plot.append((cumulative_sum / total, value))
q_min = inverse.iloc[0]
q_max = inverse.iloc[-1]
def function(q):
if q < 0.0 or q > 1.0:
msg = % q
raise ValueError(msg)
elif q < q_min:
q = q_min
elif q > q_max:
q = q_max
if beta > 0:
if q in inverse:
result = inverse[q]
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = (y2 - y1) * (q - x1) / float(x2 - x1) + y1
else:
if q in inverse:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = 0.5 * (y1 + y2)
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
result = inverse[x1]
return float(result)
return function | Return a function that returns the quantile values for this
histogram. |
13,726 | def catch(ignore=[],
was_doing="something important",
helpfull_tips="you should use a debugger",
gbc=None):
exc_cls, exc, tb=sys.exc_info()
if exc_cls in ignore:
msg=
gbc.say(+str(exc_cls))
return
ex_message = traceback.format_exception_only(exc_cls, exc)[-1]
ex_message = ex_message.strip()
error_frame = tb
while error_frame.tb_next is not None:
error_frame = error_frame.tb_next
file = error_frame.tb_frame.f_code.co_filename
line = error_frame.tb_lineno
stack = traceback.extract_tb(tb)
formated_stack = []
for summary in stack:
formated_stack.append({
: summary[0],
: summary[1],
: summary[2],
: summary[3]
})
event = {
:was_doing,
: ex_message,
: {
: file,
: line,
: file + + str(line)
},
: formated_stack
}
try:
gbc.cry(+pformat(event))
print()
print()
print(helpfull_tips)
except Exception as e:
print( % e) | Catch, prepare and log error
:param exc_cls: error class
:param exc: exception
:param tb: exception traceback |
13,727 | def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
if not isinstance(filter_values, (list, tuple, set, frozenset)):
filter_values = [filter_values]
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len // filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources | List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values. |
13,728 | def all_resource_urls(query):
urls = []
next = True
while next:
response = requests.get(query)
json_data = json.loads(response.content)
for resource in json_data[]:
urls.append(resource[])
if bool(json_data[]):
query = json_data[]
else:
next = False
return urls | Get all the URLs for every resource |
13,729 | def _create(cls, repo, path, resolve, reference, force, logmsg=None):
git_dir = _git_dir(repo, path)
full_ref_path = cls.to_full_path(path)
abs_ref_path = osp.join(git_dir, full_ref_path)
target = reference
if resolve:
target = repo.rev_parse(str(reference))
if not force and osp.isfile(abs_ref_path):
target_data = str(target)
if isinstance(target, SymbolicReference):
target_data = target.path
if not resolve:
target_data = "ref: " + target_data
with open(abs_ref_path, ) as fd:
existing_data = fd.read().decode(defenc).strip()
if existing_data != target_data:
raise OSError("Reference at %r does already exist, pointing to %r, requested was %r" %
(full_ref_path, existing_data, target_data))
ref = cls(repo, full_ref_path)
ref.set_reference(target, logmsg)
return ref | internal method used to create a new symbolic reference.
If resolve is False, the reference will be taken as is, creating
a proper symbolic reference. Otherwise it will be resolved to the
corresponding object and a detached symbolic reference will be created
instead |
13,730 | def _conf(cls, opts):
logging_conf = cls.config.get(, , None)
if logging_conf is None:
return False
if not os.path.exists(logging_conf):
raise OSError("Error: Unable to locate specified logging configuration file!")
logging.config.fileConfig(logging_conf)
return True | Setup logging via ini-file from logging_conf_file option. |
13,731 | def add_term(self,term_obj):
if self.term_layer is None:
self.term_layer = Cterms(type=self.type)
self.root.append(self.term_layer.get_node())
self.term_layer.add_term(term_obj) | Adds a term to the term layer
@type term_obj: L{Cterm}
@param term_obj: the term object |
13,732 | def get_active(cls, database, conditions=""):
if conditions:
conditions +=
conditions +=
return SystemPart.get(database, conditions=conditions) | Gets active data from system.parts table
:param database: A database object to fetch data from.
:param conditions: WHERE clause conditions. Database and active conditions are added automatically
:return: A list of SystemPart objects |
13,733 | def read_range(self, begin: str, end: str) -> int:
if self.read_eof():
return False
c = self._stream.peek_char
if begin <= c <= end:
self._stream.incpos()
return True
return False | Consume head byte if it is >= begin and <= end else return false
Same as 'a'..'z' in BNF |
13,734 | def wait_for_close(
raiden: ,
payment_network_id: PaymentNetworkID,
token_address: TokenAddress,
channel_ids: List[ChannelID],
retry_timeout: float,
) -> None:
return wait_for_channel_in_states(
raiden=raiden,
payment_network_id=payment_network_id,
token_address=token_address,
channel_ids=channel_ids,
retry_timeout=retry_timeout,
target_states=CHANNEL_AFTER_CLOSE_STATES,
) | Wait until all channels are closed.
Note:
This does not time out, use gevent.Timeout. |
13,735 | def delete_comment(self, project, work_item_id, comment_id):
route_values = {}
if project is not None:
route_values[] = self._serialize.url(, project, )
if work_item_id is not None:
route_values[] = self._serialize.url(, work_item_id, )
if comment_id is not None:
route_values[] = self._serialize.url(, comment_id, )
self._send(http_method=,
location_id=,
version=,
route_values=route_values) | DeleteComment.
[Preview API] Delete a comment on a work item.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item.
:param int comment_id: |
13,736 | def build_def_use(graph, lparams):
analysis = reach_def_analysis(graph, lparams)
UD = defaultdict(list)
for node in graph.rpo:
for i, ins in node.get_loc_with_ins():
for var in ins.get_used_vars():
if var not in analysis.def_to_loc:
continue
ldefs = analysis.defs[node]
prior_def = -1
for v in ldefs.get(var, set()):
if prior_def < v < i:
prior_def = v
if prior_def >= 0:
UD[var, i].append(prior_def)
else:
intersect = analysis.def_to_loc[var].intersection(
analysis.R[node])
UD[var, i].extend(intersect)
DU = defaultdict(list)
for var_loc, defs_loc in UD.items():
var, loc = var_loc
for def_loc in defs_loc:
DU[var, def_loc].append(loc)
return UD, DU | Builds the Def-Use and Use-Def (DU/UD) chains of the variables of the
method. |
13,737 | def ConvertValues(default_metadata, values, token=None, options=None):
batch_data = [(default_metadata, obj) for obj in values]
return ConvertValuesWithMetadata(batch_data, token=token, options=options) | Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: export.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.
values: Values to convert. They should be of the same type.
token: Security token.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Returns:
Converted values. Converted values may be of different types
(unlike the source values which are all of the same type). This is due to
the fact that multiple ExportConverters may be applied to the same value
thus generating multiple converted values of different types.
Raises:
NoConverterFound: in case no suitable converters were found for the values. |
13,738 | def login(self):
form = self._get_form()
if form.validate_on_submit():
try:
self.security_service.login_user(form.user, form.remember.data)
except AuthenticationError as e:
form._errors = {: [str(e)]}
else:
self.after_this_request(self._commit)
if request.is_json:
return self.jsonify({: form.user.get_auth_token(),
: form.user})
self.flash(_(),
category=)
return self.redirect()
else:
identity_attrs = app.config.SECURITY_USER_IDENTITY_ATTRIBUTES
msg = f"Invalid {.join(identity_attrs)} and/or password."
form._errors = {: [msg]}
for field in form._fields.values():
field.errors = None
if form.errors and request.is_json:
return self.jsonify({: form.errors.get()[0]},
code=HTTPStatus.UNAUTHORIZED)
return self.render(,
login_user_form=form,
**self.security.run_ctx_processor()) | View function to log a user in. Supports html and json requests. |
13,739 | def split(*items):
out = []
for data in [x[0] for x in items]:
dis_orgs = data["config"]["algorithm"].get("disambiguate")
if dis_orgs:
if not data.get("disambiguate", None):
data["disambiguate"] = {"genome_build": data["genome_build"],
"base": True}
out.append([data])
if isinstance(dis_orgs, six.string_types):
dis_orgs = [dis_orgs]
for dis_org in dis_orgs:
dis_data = copy.deepcopy(data)
dis_data["disambiguate"] = {"genome_build": dis_org}
dis_data["genome_build"] = dis_org
dis_data["config"]["algorithm"]["effects"] = False
dis_data = run_info.add_reference_resources(dis_data)
out.append([dis_data])
else:
out.append([data])
return out | Split samples into all possible genomes for alignment. |
13,740 | def refresh_rooms(self):
for room_id in self.user_api.get_joined_rooms()["joined_rooms"]:
self._rooms[room_id] = MatrixRoom(room_id, self.user_api) | Calls GET /joined_rooms to refresh rooms list. |
13,741 | def __get_average_inter_cluster_distance(self, entry):
linear_part_distance = sum(list_math_multiplication(self.linear_sum, entry.linear_sum));
return ( (entry.number_points * self.square_sum - 2.0 * linear_part_distance + self.number_points * entry.square_sum) / (self.number_points * entry.number_points) ) ** 0.5; | !
@brief Calculates average inter cluster distance between current and specified clusters.
@param[in] entry (cfentry): Clustering feature to which distance should be obtained.
@return (double) Average inter cluster distance. |
13,742 | def close(self):
self._logger.info("Closing")
if self._opened:
self._opened = False
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return | Close the policy instance. |
13,743 | def setex(self, key, seconds, value):
if isinstance(seconds, float):
return self.psetex(key, int(seconds * 1000), value)
if not isinstance(seconds, int):
raise TypeError("milliseconds argument must be int")
fut = self.execute(b, key, seconds, value)
return wait_ok(fut) | Set the value and expiration of a key.
If seconds is float it will be multiplied by 1000
coerced to int and passed to `psetex` method.
:raises TypeError: if seconds is neither int nor float |
13,744 | def process_xlsx(content):
data = {}
workbook = xlrd.open_workbook(file_contents=content)
worksheets = [w for w in workbook.sheet_names() if not w.startswith()]
for worksheet_name in worksheets:
if worksheet_name.startswith():
continue
worksheet = workbook.sheet_by_name(worksheet_name)
merged_cells = worksheet.merged_cells
if len(merged_cells):
raise MergedCellError(worksheet.name, merged_cells)
worksheet.name = slughifi(worksheet.name)
headers = make_headers(worksheet)
worksheet_data = make_worksheet_data(headers, worksheet)
data[worksheet.name] = worksheet_data
return data | Turn Excel file contents into Tarbell worksheet data |
13,745 | def _get_ignore_from_manifest_lines(lines):
ignore = []
ignore_regexps = []
for line in lines:
try:
cmd, rest = line.split(None, 1)
except ValueError:
continue
for part in rest.split():
if part.startswith():
warning("ERROR: Leading slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if part.endswith():
warning("ERROR: Trailing slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if cmd == :
rest = rest.rstrip()
ignore.append(rest)
ignore.append(rest + os.path.sep + )
return ignore, ignore_regexps | Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore. |
13,746 | def save_scenario(self, scenario_file_path=None):
warning_title = tr()
is_valid, warning_message = self.validate_input()
if not is_valid:
QMessageBox.warning(self, warning_title, warning_message)
return
if self.dock.extent.user_extent is not None \
and self.dock.extent.crs is not None:
user_extent = self.dock.extent.user_extent.boundingBox()
extent = extent_to_array(user_extent, self.dock.extent.crs)
else:
extent = viewport_geo_array(self.iface.mapCanvas())
extent_string = .join(( % x) for x in extent)
exposure_path = self.exposure_layer.source()
hazard_path = self.hazard_layer.source()
title = self.keyword_io.read_keywords(self.hazard_layer, )
title = tr(title)
default_filename = title.replace(
, ).replace(, ).replace(, )
dialog_title = tr()
if scenario_file_path is None:
scenario_file_path, __ = QFileDialog.getSaveFileName(
self,
dialog_title,
os.path.join(self.output_directory, default_filename + ),
"Text files (*.txt)")
if scenario_file_path is None or scenario_file_path == :
return
self.output_directory = os.path.dirname(scenario_file_path)
parser = ConfigParser()
parser.add_section(title)
parser.set(title, , exposure_path)
parser.set(title, , hazard_path)
parser.set(title, , extent_string)
if self.dock.extent.crs is None:
parser.set(title, , )
else:
parser.set(
title,
,
self.dock.extent.crs.authid())
if self.aggregation_layer is not None:
aggregation_path = self.aggregation_layer.source()
relative_aggregation_path = self.relative_path(
scenario_file_path, aggregation_path)
parser.set(title, , relative_aggregation_path)
try:
of = open(scenario_file_path, )
parser.write(of)
of.close()
except Exception as e:
QMessageBox.warning(
self,
,
tr(
).format(
path=scenario_file_path, exception=str(e)))
finally:
of.close()
self.save_state() | Save current scenario to a text file.
You can use the saved scenario with the batch runner.
:param scenario_file_path: A path to the scenario file.
:type scenario_file_path: str |
13,747 | def __create_core_and_model_object_copies(self, selection, smart_selection_adaption):
all_models_selected = selection.get_all()
if not all_models_selected:
logger.warning("Nothing to copy because state machine selection is empty.")
return
parent_m = self.do_selection_reduction_to_one_parent(selection)
self.copy_parent_state_id = parent_m.state.state_id if parent_m else None
if smart_selection_adaption:
self.do_smart_selection_adaption(selection, parent_m)
selected_models_dict = {}
for state_element_attr in ContainerState.state_element_attrs:
selected_models_dict[state_element_attr] = list(getattr(selection, state_element_attr))
self.destroy_all_models_in_dict(self.model_copies)
self.model_copies = deepcopy(selected_models_dict)
new_content_of_clipboard = .join(["{0} {1}".format(len(elems), key if len(elems) > 1 else key[:-1])
for key, elems in self.model_copies.items() if elems])
logger.info("The new content is {0}".format(new_content_of_clipboard.replace(, )))
return selected_models_dict, parent_m | Copy all elements of a selection.
The method copies all objects and modifies the selection before copying the elements if the smart flag is true.
The smart selection adaption is by default enabled. In any case the selection is reduced to have one parent
state that is used as the root of copy, except a root state it self is selected.
:param Selection selection: an arbitrary selection, whose elements should be copied
.param bool smart_selection_adaption: flag to enable smart selection adaptation mode
:return: dictionary of selected models copied, parent model of copy |
13,748 | def validate(collection, onerror: Callable[[str, List], None] = None):
BioCValidator(onerror).validate(collection) | Validate BioC data structure. |
13,749 | def fromPy(cls, val, typeObj, vldMask=None):
vld = int(val is not None)
if not vld:
assert vldMask is None or vldMask == 0
val = False
else:
if vldMask == 0:
val = False
vld = 0
else:
val = bool(val)
return cls(val, typeObj, vld) | :param val: value of python type bool or None
:param typeObj: instance of HdlType
:param vldMask: None vldMask is resolved from val,
if is 0 value is invalidated
if is 1 value has to be valid |
13,750 | def bind_env(self, action, env):
if env in self._env_actions:
raise ValueError( % env)
self._env_actions[env] = action
action.env = env | Bind an environment variable to an argument action. The env
value will traditionally be something uppercase like `MYAPP_FOO_ARG`.
Note that the ENV value is assigned using `set_defaults()` and as such
it will be overridden if the argument is set via `parse_args()` |
13,751 | def str_dict_keys(a_dict):
new_dict = {}
for key in a_dict:
if six.PY2 and isinstance(key, six.text_type):
new_dict[str(key)] = a_dict[key]
else:
new_dict[key] = a_dict[key]
return new_dict | return a modified dict where all the keys that are anything but str get
converted to str.
E.g.
>>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
>>> # can't compare whole dicts in doctests
>>> result['name']
u'Peter'
>>> result['age']
99
>>> result[1]
2
The reason for this is that in Python <= 2.6.4 doing
``MyClass(**{u'name': u'Peter'})`` would raise a TypeError
Note that only unicode types are converted to str types.
The reason for that is you might have a class that looks like this::
class Option(object):
def __init__(self, foo=None, bar=None, **kwargs):
...
And it's being used like this::
Option(**{u'foo':1, u'bar':2, 3:4})
Then you don't want to change that {3:4} part which becomes part of
`**kwargs` inside the __init__ method.
Using integers as parameter keys is a silly example but the point is that
due to the python 2.6.4 bug only unicode keys are converted to str. |
13,752 | def handle_timeouts(self):
now = getnow()
while self.timeouts and self.timeouts[0].timeout <= now:
op = heapq.heappop(self.timeouts)
coro = op.coro
if op.weak_timeout and hasattr(op, ):
if op.last_update > op.last_checkpoint:
op.last_checkpoint = op.last_update
op.timeout = op.last_checkpoint + op.delta
heapq.heappush(self.timeouts, op)
continue
if op.state is events.RUNNING and coro and coro.running and \
op.cleanup(self, coro):
self.active.append((
CoroutineException(
events.OperationTimeout,
events.OperationTimeout(op)
),
coro
)) | Handle timeouts. Raise timeouted operations with a OperationTimeout
in the associated coroutine (if they are still alive and the operation
hasn't actualy sucessfuly completed) or, if the operation has a
weak_timeout flag, update the timeout point and add it back in the
heapq.
weak_timeout notes:
* weak_timeout means a last_update attribute is updated with
a timestamp of the last activity in the operation - for example, a
may recieve new data and not complete (not enough data, etc)
* if there was activity since the last time we've cheched this
timeout we push it back in the heapq with a timeout value we'll check
it again
Also, we call a cleanup on the op, only if cleanup return true we raise
the timeout (finalized isn't enough to check if the op has completed
since finalized is set when the operation gets back in the coro - and
it might still be in the Scheduler.active queue when we get to this
timeout - well, this is certainly a problem magnet: TODO: fix_finalized) |
13,753 | def _chk_type(recdef, rec):
if len(recdef) != len(rec):
raise TypeError("Number of columns (%d) is different from RecordDef (%d)" % (len(rec), len(recdef)))
for i in xrange(len(recdef)):
try:
def_type = recdef[i].type
col_type = Type.equivalent_relshell_type(rec[i])
if col_type != def_type:
raise TypeError("Column %d has mismatched type: Got [%s] ; Expected [%s]" %
(i, rec[i], col_type, def_type))
except AttributeError as e:
try:
Type.equivalent_relshell_type(rec[i])
except NotImplementedError as e:
raise TypeError("%s" % (e)) | Checks if type of `rec` matches `recdef`
:param recdef: instance of RecordDef
:param rec: instance of Record
:raises: `TypeError` |
13,754 | def result(self, wait=0):
if self.started:
return result(self.id, wait=wait, cached=self.cached) | return the full list of results.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results |
13,755 | def _checkResponseWriteData(payload, writedata):
_checkString(payload, minlength=4, description=)
_checkString(writedata, minlength=2, maxlength=2, description=)
BYTERANGE_FOR_WRITEDATA = slice(2, 4)
receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA]
if receivedWritedata != writedata:
raise ValueError(.format( \
receivedWritedata, writedata, payload)) | Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError |
13,756 | def stop_recording(self):
self._stop_recording.set()
with self._source_lock:
self._source.stop()
self._recording = False | Stop recording from the audio source. |
13,757 | def add_episode(self, text, text_format, title, author, summary=None,
publish_date=None, synthesizer=, synth_args=None, sentence_break=):
if title in self.episodes:
raise ValueError( + title + )
link = self.output_path + + title.replace(, ).lower() +
episode_text = convert_to_ssml(text, text_format)
new_episode = Episode(episode_text, text_format, title, author, link, summary, publish_date, synthesizer, synth_args, sentence_break)
self.episodes[title] = new_episode | Add a new episode to the podcast.
:param text:
See :meth:`Episode`.
:param text_format:
See :meth:`Episode`.
:param title:
See :meth:`Episode`.
:param author:
See :meth:`Episode`.
:param summary:
See :meth:`Episode`.
:param publish_date:
See :meth:`Episode`.
:param synthesizer:
See :meth:`typecaster.utils.text_to_speech`.
:param synth_args:
See :meth:`typecaster.utils.text_to_speech`.
:param sentence_break:
See :meth:`typecaster.utils.text_to_speech`. |
13,758 | def ls(ctx, name):
session = create_session(ctx.obj[])
client = session.client()
results = client.list_clusters(
ClusterStates=[, , , ]
)
for cluster in results[]:
click.echo("{0}\t{1}\t{2}".format(cluster[], cluster[], cluster[][])) | List EMR instances |
13,759 | def apply_vcc(self,vcc):
if not in self.constraints:
self.constraints.append()
for pop in self.poplist:
if not pop.is_specific:
try:
pop.apply_vcc(vcc)
except:
logging.info( % (pop.model)) | Applies velocity contrast curve constraint to each population
See :func:`vespa.stars.StarPopulation.apply_vcc`;
all arguments passed to that function for each population. |
13,760 | def _validate_importers(importers):
if importers is None:
return None
def _to_importer(priority, func):
assert isinstance(priority, int), priority
assert callable(func), func
return (priority, _importer_callback_wrapper(func))
return tuple(_to_importer(priority, func) for priority, func in importers) | Validates the importers and decorates the callables with our output
formatter. |
13,761 | def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
fields = cookie_data.split()
number_of_fields = len(fields)
if number_of_fields not in (1, 4):
parser_mediator.ProduceExtractionWarning(
.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
try:
last_visit_posix_time = int(fields[3], 10) / 1000
else:
last_visit_posix_time = int(fields[3], 10)
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.SemanticTime()
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData()
event_data.cookie_name = self.COOKIE_NAME
event_data.domain_hash = domain_hash
event_data.pages_viewed = number_of_pages_viewed
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data) | Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set. |
13,762 | def hide(self):
for widget in self.replace_widgets:
widget.hide()
QWidget.hide(self)
self.visibility_changed.emit(False)
if self.editor is not None:
self.editor.setFocus()
self.clear_matches() | Overrides Qt Method |
13,763 | def get_groups_by_userid(cls, userid, request):
try:
cache_request_user(cls, request, userid)
except Exception as ex:
log.error(str(ex))
forget(request)
else:
if request._user:
return [ % g for g in request._user.groups] | Return group identifiers of user with id :userid:
Used by Ticket-based auth as `callback` kwarg. |
13,764 | def get_root_url(url, warn=True):
parsed_url = urlparse(url)
root_url = % (parsed_url.scheme, parsed_url.netloc)
if warn and not parsed_url.path.startswith():
logger.warning( % root_url)
return root_url | Get the "root URL" for a URL, as described in the LuminosoClient
documentation. |
13,765 | def seek(self, offset, whence=os.SEEK_SET):
pos = None
if whence == os.SEEK_SET:
pos = self.offset + offset
elif whence == os.SEEK_CUR:
pos = self.tell() + offset
elif whence == os.SEEK_END:
pos = self.offset + self.len + offset
else:
raise ValueError("invalid whence {}".format(whence))
if pos > self.offset + self.len or pos < self.offset:
raise ValueError("seek position beyond chunk area")
self.parent_fd.seek(pos, os.SEEK_SET) | Seek to position in stream, see file.seek |
13,766 | def as_string(self, default_from=None):
encoding = self.charset or
attachments = self.attachments or []
if len(attachments) == 0 and not self.html:
msg = self._mimetext(self.body)
elif len(attachments) > 0 and not self.html:
msg = MIMEMultipart()
msg.attach(self._mimetext(self.body))
else:
msg = MIMEMultipart()
alternative = MIMEMultipart()
alternative.attach(self._mimetext(self.body, ))
alternative.attach(self._mimetext(self.html, ))
msg.attach(alternative)
if self.charset:
msg[] = Header(self.subject, encoding)
else:
msg[] = self.subject
sender = self.sender or default_from
if sender is not None:
msg[] = sanitize_address(sender, encoding)
msg[] = .join(list(set(sanitize_addresses(self.recipients, encoding))))
msg[] = formatdate(self.date, localtime=True)
msg[] = self.msgId
if self.cc:
msg[] = .join(list(set(sanitize_addresses(self.cc, encoding))))
if self.reply_to:
msg[] = sanitize_address(self.reply_to, encoding)
if self.extra_headers:
for k, v in self.extra_headers.items():
msg[k] = v
for attachment in attachments:
f = MIMEBase(*attachment.content_type.split())
f.set_payload(attachment.data)
encode_base64(f)
try:
attachment.filename and attachment.filename.encode()
except UnicodeEncodeError:
filename = attachment.filename
if not PY3:
filename = filename.encode()
f.add_header(, attachment.disposition,
filename=(, , filename))
else:
f.add_header(, %
(attachment.disposition, attachment.filename))
for key, value in attachment.headers:
f.add_header(key, value)
msg.attach(f)
return msg.as_string() | Creates the email |
13,767 | def td_waveform_to_fd_waveform(waveform, out=None, length=None,
buffer_length=100):
if out is None:
if length is None:
N = pnutils.nearest_larger_binary_number(len(waveform) + \
buffer_length)
n = int(N//2) + 1
else:
n = length
N = (n-1)*2
out = zeros(n, dtype=complex_same_precision_as(waveform))
else:
n = len(out)
N = (n-1)*2
delta_f = 1. / (N * waveform.delta_t)
tmplt_length = len(waveform) * waveform.delta_t
if len(waveform) > N:
err_msg = "The time domain template is longer than the intended "
err_msg += "duration in the frequency domain. This situation is "
err_msg += "not supported in this function. Please shorten the "
err_msg += "waveform appropriately before calling this function or "
err_msg += "increase the allowed waveform length. "
err_msg += "Waveform length (in samples): {}".format(len(waveform))
err_msg += ". Intended length: {}.".format(N)
raise ValueError(err_msg)
tChirp = - float( waveform.start_time )
waveform.resize(N)
k_zero = int(waveform.start_time / waveform.delta_t)
waveform.roll(k_zero)
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
fft(waveform.astype(real_same_precision_as(htilde)), htilde)
htilde.length_in_time = tmplt_length
htilde.chirp_length = tChirp
return htilde | Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at t=0 and the length of the waveform should be
shorter than the desired length of the FrequencySeries (times 2 - 1)
so that zeroes can be suitably pre- and post-pended before FFTing.
If given, out is a memory array to be used as the output of the FFT.
If not given memory is allocated internally.
If present the length of the returned FrequencySeries is determined
from the length out. If out is not given the length can be provided
expicitly, or it will be chosen as the nearest power of 2. If choosing
length explicitly the waveform length + buffer_length is used when
choosing the nearest binary number so that some zero padding is always
added. |
13,768 | def approve(self, peer_jid):
self.client.enqueue(
stanza.Presence(type_=structs.PresenceType.SUBSCRIBED,
to=peer_jid)
) | (Pre-)approve a subscription request from `peer_jid`.
:param peer_jid: The peer to (pre-)approve.
This sends a ``"subscribed"`` presence to the peer; if the peer has
previously asked for a subscription, this will seal the deal and create
the subscription.
If the peer has not requested a subscription (yet), it is marked as
pre-approved by the server. A future subscription request by the peer
will then be confirmed by the server automatically.
.. note::
Pre-approval is an OPTIONAL feature in :rfc:`6121`. It is announced
as a stream feature. |
13,769 | def save_state(self, fname=None):
if not fname:
date = datetime.datetime.now().strftime("%Y-%m-%dT%Hh%Mm%Ss")
fname = date + "_energy_" + str(self.energy()) + ".state"
with open(fname, "wb") as fh:
pickle.dump(self.state, fh) | Saves state to pickle |
13,770 | def _read(self):
raw_response = self.transport.receive()
response = Packet.parse(raw_response)
if response.response_type == Packet.EVENT and response.event_type == "log":
self.log_events.append(response)
self._read()
else:
return response | Get next packet from transport.
:return: parsed packet in a tuple with message type and payload
:rtype: :py:class:`collections.namedtuple` |
13,771 | def get_paths_from_to(self, goobj_start, goid_end=None, dn0_up1=True):
paths = []
working_q = cx.deque([[goobj_start]])
adjfnc = self.adjdir[dn0_up1]
while working_q:
path_curr = working_q.popleft()
goobj_curr = path_curr[-1]
go_adjlst = adjfnc(goobj_curr)
if (goid_end is not None and goobj_curr.id == goid_end) or \
(goid_end is None and not go_adjlst):
paths.append(path_curr)
else:
for go_neighbor in go_adjlst:
if go_neighbor not in path_curr:
working_q.append(new_path)
return paths | Get a list of paths from goobj_start to either top or goid_end. |
13,772 | def has(self, relation, operator=">=", count=1, boolean="and", extra=None):
if relation.find(".") >= 0:
return self._has_nested(relation, operator, count, boolean, extra)
relation = self._get_has_relation_query(relation)
query = relation.get_relation_count_query(
relation.get_related().new_query(), self
)
if extra:
if callable(extra):
extra(query)
return self._add_has_where(
query.apply_scopes(), relation, operator, count, boolean
) | Add a relationship count condition to the query.
:param relation: The relation to count
:type relation: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:type: Builder |
13,773 | def _include_exclude(file_path, include=None, exclude=None):
if exclude is not None and exclude:
for pattern in exclude:
if file_path.match(pattern):
return False
if include is not None and include:
for pattern in include:
if file_path.match(pattern):
return True
return False
return True | Check if file matches one of include filters and not in exclude filter.
:param file_path: Path to the file.
:param include: Tuple containing patterns to which include from result.
:param exclude: Tuple containing patterns to which exclude from result. |
13,774 | def make_links_absolute(self, base_url=None, resolve_base_href=True,
handle_failures=None):
if base_url is None:
base_url = self.base_url
if base_url is None:
raise TypeError(
"No base_url given, and the document has no base_url")
if resolve_base_href:
self.resolve_base_href()
if handle_failures == :
def link_repl(href):
try:
return urljoin(base_url, href)
except ValueError:
return href
elif handle_failures == :
def link_repl(href):
try:
return urljoin(base_url, href)
except ValueError:
return None
elif handle_failures is None:
def link_repl(href):
return urljoin(base_url, href)
else:
raise ValueError(
"unexpected value for handle_failures: %r" % handle_failures)
self.rewrite_links(link_repl) | Make all links in the document absolute, given the
``base_url`` for the document (the full URL where the document
came from), or if no ``base_url`` is given, then the ``.base_url``
of the document.
If ``resolve_base_href`` is true, then any ``<base href>``
tags in the document are used *and* removed from the document.
If it is false then any such tag is ignored.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed. |
13,775 | def execute_operation(self, method="GET", ops_path="", payload=""):
operation_path_URL = "".join([self.api_server, ops_path])
logging.debug("%s %s" %(method, operation_path_URL))
if payload == "":
res = requests.request(method, operation_path_URL)
else:
logging.debug("PAYLOAD:\n%s" %(payload))
res = requests.request(method, operation_path_URL, data=payload)
logging.debug("RESPONSE:\n%s" %(res.json()))
return res | Executes a Kubernetes operation using the specified method against a path.
This is part of the low-level API.
:Parameters:
- `method`: The HTTP method to use, defaults to `GET`
- `ops_path`: The path of the operation, for example, `/api/v1/events` which would result in an overall: `GET http://localhost:8080/api/v1/events`
- `payload`: The optional payload which is relevant for `POST` or `PUT` methods only |
13,776 | def convertToPDF(self, from_page=0, to_page=-1, rotate=0):
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document_convertToPDF(self, from_page, to_page, rotate) | Convert document to PDF selecting page range and optional rotation. Output bytes object. |
13,777 | def get_pages_from_id_list(id_list):
page_list = []
for id_ in id_list:
try:
page_list.append(
Page.objects.get(id=id_).specific)
except ObjectDoesNotExist:
logging.error(
"Attempted to fetch non-existent"
" page with id of {}".format(id_))
return page_list | Accepts: list of page ids
Returns: list of specific page objects |
13,778 | def is_error(self):
qstat = self._grep_qstat()
err = self._grep_status()
if qstat and err:
return True
return False | Checks to see if the job errored out. |
13,779 | def connect(self):
logger.info("Connecting to Redis on {host}:{port}...".format(
host=self.host, port=self.port))
super(RedisSubscriber, self).connect()
logger.info("Successfully connected to Redis")
self.pubsub = self.client.pubsub()
self.pubsub.subscribe(self.channel)
logger.info("Subscribed to [{channel}] Redis channel".format(
channel=self.channel))
t = Thread(target=self.listen)
t.setDaemon(True)
t.start() | Connects to Redis |
13,780 | def _to_dict(self):
_dict = {}
if hasattr(self, ) and self.id is not None:
_dict[] = self.id
if hasattr(self, ) and self.metadata is not None:
_dict[] = self.metadata
if hasattr(self, ) and self.collection_id is not None:
_dict[] = self.collection_id
if hasattr(self,
) and self.result_metadata is not None:
_dict[] = self.result_metadata._to_dict()
if hasattr(self, ) and self.title is not None:
_dict[] = self.title
if hasattr(self, ):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict | Return a json dictionary representing this model. |
13,781 | def collapse_spaces(text):
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS) | Remove newlines, tabs and multiple spaces with single spaces. |
13,782 | def _reverse_index(self):
if self.y == 0:
self.display = [u" " * self.size[1]] + self.display[:-1]
else:
self.y -= 1 | Move the cursor up one row in the same column. If the cursor is at the
first row, create a new row at the top. |
13,783 | def thumbnail(self, img_url, size, crop=None, bg=None, quality=85,
storage_type=None, bucket_name=None):
width, height = [int(x) for x in size.split()]
thumb_size = (width, height)
url_path, img_name = os.path.split(img_url)
name, fm = os.path.splitext(img_name)
miniature = self._get_name(name, fm, size, crop, bg, quality)
original_filename = os.path.join(self.app.config[], url_path, img_name)
thumb_filename = os.path.join(self.app.config[], url_path, miniature)
thumb_url = os.path.join(self.app.config[], url_path, miniature)
if not (storage_type and bucket_name):
return self._thumbnail_local(original_filename,
thumb_filename,
thumb_size,
thumb_url,
crop=crop,
bg=bg,
quality=quality)
else:
if storage_type != :
raise ValueError( % storage_type)
return self._thumbnail_s3(original_filename,
thumb_filename,
thumb_size,
thumb_url,
bucket_name,
crop=crop,
bg=bg,
quality=quality) | :param img_url: url img - '/assets/media/summer.jpg'
:param size: size return thumb - '100x100'
:param crop: crop return thumb - 'fit' or None
:param bg: tuple color or None - (255, 255, 255, 0)
:param quality: JPEG quality 1-100
:param storage_type: either 's3' or None
:param bucket_name: s3 bucket name
:return: :thumb_url: |
13,784 | def _parse_variable(s: str, curr_row: int, curr_col: int) -> Tuple:
def parse_expression(ss, curr_row, curr_col):
ss = ss.replace(, str(curr_row))
ss = ss.replace(, str(curr_col))
ss = ExcelExtractor._re_row_identifier.sub(
lambda x: str(ExcelExtractor._row_name_to_num(x.group()[1:])) if len(x.group()) > 0 else , ss)
ss = ExcelExtractor._re_col_identifier.sub(
lambda x: str(ExcelExtractor._col_name_to_num(x.group()[1:])) if len(x.group()) > 0 else , ss)
return eval(ss)
ss = s.split()
if len(ss) == 1:
return parse_expression(ss[0], curr_row, curr_col),
elif len(ss) == 2:
rr, cc = (ss[1], ss[0])
return parse_expression(rr, curr_row, curr_col), parse_expression(cc, curr_row, curr_col)
else:
raise ValueError() | $A,$2 <- constant col and row
$row,$2 <- current col, row 2
$A+1,$2 <- col A + 1 = 2, row 2
$row+1,$2 <- current col + 1, row 2
$A,$2-1 <-- col A, row 2 - 1 = 1 |
13,785 | def edit(self, **kwargs):
if not in kwargs:
kwargs[] = self.ratingKey
if not in kwargs:
kwargs[] = utils.searchType(self.type)
part = % (self.librarySectionID,
urlencode(kwargs))
self._server.query(part, method=self._server._session.put) | Edit an object.
Parameters:
kwargs (dict): Dict of settings to edit.
Example:
{'type': 1,
'id': movie.ratingKey,
'collection[0].tag.tag': 'Super',
'collection.locked': 0} |
13,786 | def extend_course(course, enterprise_customer, request):
course_run_id = course[][0][]
try:
catalog_api_client = CourseCatalogApiServiceClient(enterprise_customer.site)
except ImproperlyConfigured:
error_code =
LOGGER.error(
.format(
error_code=error_code,
userid=request.user.id,
enterprise_customer=enterprise_customer.uuid,
course_run_id=course_run_id,
)
)
messages.add_generic_error_message_with_code(request, error_code)
return ({}, error_code)
course_details, course_run_details = catalog_api_client.get_course_and_course_run(course_run_id)
if not course_details or not course_run_details:
error_code =
LOGGER.error(
.format(
userid=request.user.id,
enterprise_customer=enterprise_customer.uuid,
course_run_id=course_run_id,
error_code=error_code,
)
)
messages.add_generic_error_message_with_code(request, error_code)
return ({}, error_code)
weeks_to_complete = course_run_details[]
course_run_image = course_run_details[] or {}
course.update({
: course_run_image.get(, ),
: course_run_details[],
: course_run_details.get(, ),
: course_run_details[] or ,
: clean_html_for_template_rendering(course_run_details[] or ),
: course_details.get(, []),
: course_run_details.get(, []),
: ungettext_min_max(
,
,
,
course_run_details[] or None,
course_run_details[] or None,
) or ,
: ungettext(
,
,
weeks_to_complete
).format(weeks_to_complete) if weeks_to_complete else ,
})
return course, None | Extend a course with more details needed for the program landing page.
In particular, we add the following:
* `course_image_uri`
* `course_title`
* `course_level_type`
* `course_short_description`
* `course_full_description`
* `course_effort`
* `expected_learning_items`
* `staff` |
13,787 | def render(self, context, instance, placeholder):
context = super(LocationListPlugin,self).render(context,instance,placeholder)
context[] = Location.objects.filter(status=Location.StatusChoices.active)
return context | Allows this plugin to use templates designed for a list of locations. |
13,788 | def make_backups(self, block_id):
assert self.setup, "Not set up yet. Call .db_setup() first!"
if self.backup_frequency is not None:
if (block_id % self.backup_frequency) == 0:
backup_dir = config.get_backups_directory(self.impl, self.working_dir)
if not os.path.exists(backup_dir):
try:
os.makedirs(backup_dir)
except Exception, e:
log.exception(e)
log.error("FATAL: failed to make backup directory " % backup_dir)
traceback.print_stack()
os.abort()
for p in self.get_state_paths(self.impl, self.working_dir):
if os.path.exists(p):
try:
pbase = os.path.basename(p)
backup_path = os.path.join(backup_dir, pbase + (".bak.{}".format(block_id - 1)))
if not os.path.exists(backup_path):
rc = sqlite3_backup(p, backup_path)
if not rc:
log.warning("Failed to back up as an SQLite db. Falling back to /bin/cp")
shutil.copy(p, backup_path)
else:
log.error("Will not overwrite " % backup_path)
except Exception, e:
log.exception(e)
log.error("FATAL: failed to back up " % p)
traceback.print_stack()
os.abort()
return | If we're doing backups on a regular basis, then
carry them out here if it is time to do so.
This method does nothing otherwise.
Return None on success
Abort on failure |
13,789 | def run(self, host=None, port=None, debug=None, use_reloader=None,
open_browser=False):
if host is None:
host = self.config[]
if port is None:
port = self.config[]
if debug is None:
debug = self.debug
if use_reloader is None:
use_reloader = self.config[]
with self._run_mutex:
if self._shutdown_event:
raise AlreadyRunningError()
self._shutdown_event = threading.Event()
if self.auth and not self.quiet:
if isinstance(self.auth, tuple):
username, password = self.auth
auth_method = (.format(username)
if username
else )
else:
auth_method = type(self.auth).__name__
print(, auth_method, file=sys.stderr)
if port == 0 and open_browser:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((, 0))
port = sock.getsockname()[1]
sock.close()
browser_thread = (
start_browser_when_ready(host, port, self._shutdown_event)
if open_browser else None)
super(Grip, self).run(host, port, debug=debug,
use_reloader=use_reloader,
threaded=True)
if not self.quiet:
print()
self._shutdown_event.set()
if browser_thread:
browser_thread.join()
self._shutdown_event = None | Starts a server to render the README. |
13,790 | def select_inverse(self, name="default", executor=None):
def create(current):
return selections.SelectionInvert(current)
self._selection(create, name, executor=executor) | Invert the selection, i.e. what is selected will not be, and vice versa
:param str name:
:param executor:
:return: |
13,791 | def do_set_logical_switch_config(self, line):
def f(p, args):
try:
target, lsw, key, value = args
except:
print("argument error")
return
o = p.get_config(target)
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
**{key: value}
)]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f) | set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode |
13,792 | async def load_message(obj, msg_type, msg=None, field_archiver=None):
msg = msg_type() if msg is None else msg
fields = msg_type.f_specs() if msg_type else msg.__class__.f_specs()
for field in fields:
await load_message_field(obj, msg, field, field_archiver=field_archiver)
return msg | Loads message if the given type from the object.
Supports reading directly to existing message.
:param obj:
:param msg_type:
:param msg:
:param field_archiver:
:return: |
13,793 | def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None):
env = None
kwargs = {}
if cmd_env:
env = os.environ.copy()
env.update(cmd_env)
kwargs[] = env
if not retry_message:
retry_message = "Failed executing ".format(" ".join(cmd))
retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
retry_count = 0
result = None
retry_results = (None,) + retry_exitcodes
while result in retry_results:
try:
result = subprocess.check_call(cmd, **kwargs)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > max_retries:
raise
result = e.returncode
log(retry_message)
time.sleep(CMD_RETRY_DELAY) | Run a command and retry until success or max_retries is reached.
:param: cmd: str: The apt command to run.
:param: max_retries: int: The number of retries to attempt on a fatal
command. Defaults to CMD_RETRY_COUNT.
:param: retry_exitcodes: tuple: Optional additional exit codes to retry.
Defaults to retry on exit code 1.
:param: retry_message: str: Optional log prefix emitted during retries.
:param: cmd_env: dict: Environment variables to add to the command run. |
13,794 | def _set_rsvp(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=rsvp.rsvp, is_container=, presence=False, yang_name="rsvp", rest_name="rsvp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=False)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__rsvp = t
if hasattr(self, ):
self._set() | Setter method for rsvp, mapped from YANG variable /mpls_state/rsvp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rsvp() directly.
YANG Description: MPLS RSVP Operational Information |
13,795 | def queue(p_queue, host=None):
if host is not None:
return _path(_c.FSQ_QUEUE, root=_path(host, root=hosts(p_queue)))
return _path(p_queue, _c.FSQ_QUEUE) | Construct a path to the queue dir for a queue |
13,796 | def strace_set_buffer_size(self, size):
size = ctypes.c_uint32(size)
res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.SET_BUFFER_SIZE, size)
if res < 0:
raise errors.JLinkException()
return None | Sets the STRACE buffer size.
Args:
self (JLink): the ``JLink`` instance.
Returns:
``None``
Raises:
JLinkException: on error. |
13,797 | def allocate_port():
sock = socket.socket()
try:
sock.bind(("localhost", 0))
return get_port(sock)
finally:
sock.close() | Allocate an unused port.
There is a small race condition here (between the time we allocate the
port, and the time it actually gets used), but for the purposes for which
this function gets used it isn't a problem in practice. |
13,798 | def getNetworkSummary(self, suid, verbose=None):
surl=self.___url
sv=surl.split()[-1]
surl=surl.rstrip(sv+)
response=api(url=surl++sv++str(suid)+, method="GET", verbose=verbose, parse_params=False)
return response | Returns summary of collection containing the specified network.
:param suid: Cytoscape Collection/Subnetwork SUID
:param verbose: print more
:returns: 200: successful operation |
13,799 | def js_click(self, selector, by=By.CSS_SELECTOR):
selector, by = self.__recalculate_selector(selector, by)
if by == By.LINK_TEXT:
message = (
"Pure JavaScript doesnt blocked. "
"For now, self.js_click() will use a regular WebDriver click.")
logging.debug(message)
self.click(selector, by=by)
return
element = self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
self.__js_click(selector, by=by)
self.__demo_mode_pause_if_active() | Clicks an element using pure JS. Does not use jQuery. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.