Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
11,200 | def maybe_gzip_open(path, *args, **kwargs):
path = path_to_str(path)
if path.endswith() or path.endswith():
_open = gzip.open
else:
_open = open
return _open(path, *args, **kwargs) | Open file with either open or gzip.open, depending on file extension.
This function doesn't handle json lines format, just opens a file
in a way it is decoded transparently if needed. |
11,201 | def __get_data_en_intervalo(self, d0=None, df=None):
params = {: self.DATE_FMT,
: self.USAR_MULTITHREAD, : self.MAX_THREADS_REQUESTS,
: self.TIMEOUT, : self.NUM_RETRIES,
: self.procesa_data_dia, : self.url_data_dia,
: self.MAX_ACT_EXEC,
: {: self.HEADERS,
: self.JSON_REQUESTS,
: self.PARAMS_REQUESTS},
: self.verbose}
data_get, hay_errores, str_import = get_data_en_intervalo(d0, df, **params)
if not hay_errores:
self.integridad_data(data_get)
self.printif(str_import, )
if type(data_get) is pd.DataFrame:
data_get = {self.masterkey: data_get}
return data_get
else:
return None | Obtiene los datos en bruto de la red realizando múltiples requests al tiempo
Procesa los datos en bruto obtenidos de la red convirtiendo a Pandas DataFrame |
11,202 | def match(self, regex, flags=0):
if isinstance(regex, str):
regex = re.compile(regex, flags)
match = regex.match(self.text, self.index)
if not match:
return None
start, end = match.start(), match.end()
lines = self.text.count(, start, end)
self.index = end
if lines:
self.colno = end - self.text.rfind(, start, end) - 1
self.lineno += lines
else:
self.colno += end - start
return match | Matches the specified *regex* from the current character of the *scanner*
and returns the result. The Scanners column and line numbers are updated
respectively.
# Arguments
regex (str, Pattern): The regex to match.
flags (int): The flags to use when compiling the pattern. |
11,203 | def _sample_mvn(mean, cov, cov_structure=None, num_samples=None):
mean_shape = tf.shape(mean)
S = num_samples if num_samples is not None else 1
D = mean_shape[-1]
leading_dims = mean_shape[:-2]
num_leading_dims = tf.size(leading_dims)
if cov_structure == "diag":
with tf.control_dependencies([tf.assert_equal(tf.rank(mean), tf.rank(cov))]):
eps_shape = tf.concat([leading_dims, [S], mean_shape[-2:]], 0)
eps = tf.random_normal(eps_shape, dtype=settings.float_type)
samples = mean[..., None, :, :] + tf.sqrt(cov)[..., None, :, :] * eps
elif cov_structure == "full":
with tf.control_dependencies([tf.assert_equal(tf.rank(mean) + 1, tf.rank(cov))]):
jittermat = (
tf.eye(D, batch_shape=mean_shape[:-1], dtype=settings.float_type)
* settings.jitter
)
eps_shape = tf.concat([mean_shape, [S]], 0)
eps = tf.random_normal(eps_shape, dtype=settings.float_type)
chol = tf.cholesky(cov + jittermat)
samples = mean[..., None] + tf.matmul(chol, eps)
samples = misc.leading_transpose(samples, [..., -1, -3, -2])
else:
raise NotImplementedError
if num_samples is None:
return samples[..., 0, :, :]
return samples | Returns a sample from a D-dimensional Multivariate Normal distribution
:param mean: [..., N, D]
:param cov: [..., N, D] or [..., N, D, D]
:param cov_structure: "diag" or "full"
- "diag": cov holds the diagonal elements of the covariance matrix
- "full": cov holds the full covariance matrix (without jitter)
:return: sample from the MVN of shape [..., (S), N, D], S = num_samples |
11,204 | def _process_witness(self, witness, matches):
tokens = witness.get_tokens()
full_text = witness.get_token_content()
fields = [constants.NGRAM_FIELDNAME, constants.SIZE_FIELDNAME]
match_slices = []
for index, (ngram, size) in matches[fields].iterrows():
pattern = re.compile(re.escape(ngram))
start = 0
while True:
match = pattern.search(full_text, start)
if match is None:
break
match_slices.append([match.start(), match.end()])
start = match.start() + 1
merged_slices = self._merge_slices(match_slices)
match_content = self._generate_text_from_slices(
full_text, merged_slices)
match_text = Text(match_content, self._tokenizer)
return len(tokens), len(match_text.get_tokens()) | Return the counts of total tokens and matching tokens in `witness`.
:param witness: witness text
:type witness: `tacl.WitnessText`
:param matches: n-gram matches
:type matches: `pandas.DataFrame`
:rtype: `tuple` of `int` |
11,205 | def append_this_package_path(depth=1):
from .caller import caller
logg.debug(, caller.modulename(depth + 1))
c = caller.abspath(depth + 1)
logg.debug(, c)
p = guess_package_path(dirname(c))
if p:
logg.debug(, p)
append_sys_path(p)
else:
logg.debug(, c) | this_package.py 에서 사용
import snipy.this_package |
11,206 | def getpackagepath():
moduleDirectory = os.path.dirname(__file__)
packagePath = os.path.dirname(__file__) + "/../"
return packagePath | *Get the root path for this python package - used in unit testing code* |
11,207 | def subsample_n(X, n=0, seed=0):
if n < 0:
raise ValueError()
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows | Subsample n samples from rows of array.
Parameters
----------
X : np.ndarray
Data array.
seed : int
Seed for sampling.
Returns
-------
Xsampled : np.ndarray
Subsampled X.
rows : np.ndarray
Indices of rows that are stored in Xsampled. |
11,208 | def load_metadata_for_topics(self, *topics, **kwargs):
if in kwargs:
ignore_leadernotavailable = kwargs[]
else:
ignore_leadernotavailable = False
if topics:
self.reset_topic_metadata(*topics)
else:
self.reset_all_metadata()
resp = self.send_metadata_request(topics)
log.debug(, resp.brokers)
log.debug(, [topic for _, topic, _ in resp.topics])
self.brokers = dict([(nodeId, BrokerMetadata(nodeId, host, port, None))
for nodeId, host, port in resp.brokers])
for error, topic, partitions in resp.topics:
if error:
error_type = kafka.errors.kafka_errors.get(error, UnknownError)
if error_type in (UnknownTopicOrPartitionError, LeaderNotAvailableError):
log.error(,
topic, error_type, error)
if topic not in topics:
continue
elif (error_type is LeaderNotAvailableError and
ignore_leadernotavailable):
continue
raise error_type(topic)
self.topic_partitions[topic] = {}
for error, partition, leader, _, _ in partitions:
self.topic_partitions[topic][partition] = leader
topic_part = TopicPartition(topic, partition)
if error:
error_type = kafka.errors.kafka_errors.get(error, UnknownError)
if error_type is LeaderNotAvailableError:
log.error(, topic, partition)
self.topics_to_brokers[topic_part] = None
continue
elif error_type is ReplicaNotAvailableError:
log.debug(, topic, partition)
else:
raise error_type(topic_part)
if leader in self.brokers:
self.topics_to_brokers[topic_part] = self.brokers[leader]
else:
self.topics_to_brokers[topic_part] = BrokerMetadata(
leader, None, None, None
) | Fetch broker and topic-partition metadata from the server.
Updates internal data: broker list, topic/partition list, and
topic/partition -> broker map. This method should be called after
receiving any error.
Note: Exceptions *will not* be raised in a full refresh (i.e. no topic
list). In this case, error codes will be logged as errors.
Partition-level errors will also not be raised here (a single partition
w/o a leader, for example).
Arguments:
*topics (optional): If a list of topics is provided,
the metadata refresh will be limited to the specified topics
only.
ignore_leadernotavailable (bool): suppress LeaderNotAvailableError
so that metadata is loaded correctly during auto-create.
Default: False.
Raises:
UnknownTopicOrPartitionError: Raised for topics that do not exist,
unless the broker is configured to auto-create topics.
LeaderNotAvailableError: Raised for topics that do not exist yet,
when the broker is configured to auto-create topics. Retry
after a short backoff (topics/partitions are initializing). |
11,209 | def tls_session_update(self, msg_str):
super(SSLv2ServerHello, self).tls_session_update(msg_str)
s = self.tls_session
client_cs = s.sslv2_common_cs
css = [cs for cs in client_cs if cs in self.ciphers]
s.sslv2_common_cs = css
s.sslv2_connection_id = self.connection_id
s.tls_version = self.version
if self.cert is not None:
s.server_certs = [self.cert] | XXX Something should be done about the session ID here. |
11,210 | def map_alignment(self, prot_seq, nucl_seq):
if prot_seq.id != nucl_seq.id:
logging.warning(
,
prot_seq.id, nucl_seq.id)
codons = batch(str(nucl_seq.seq.ungap()), 3)
codons = [.join(i) for i in codons]
codon_iter = iter(codons)
ungapped_prot = str(prot_seq.seq).replace(, )
if len(ungapped_prot) != len(codons):
table = self.translation_table.forward_table
prot_str = .join( + p + for p in ungapped_prot)
codon_str = .join(codons)
trans_str = .join( + table.get(codon, ) +
for codon in codons)
raise ValueError(.format(len(codons), len(ungapped_prot), nucl_seq.id, prot_str,
codon_str, trans_str))
try:
nucl_align = [ if p == else next(codon_iter)
for p in str(prot_seq.seq)]
except StopIteration:
assert False
result = SeqRecord(Seq(.join(nucl_align)), id=nucl_seq.id,
description=nucl_seq.description)
self._validate_translation(prot_seq.seq.upper(), result.seq.upper())
return result | Use aligned prot_seq to align nucl_seq |
11,211 | def lessons(self):
if hasattr(self, ):
return self._lessons
else:
self.get_lesson()
return self._lessons | 返回lessons,如果未调用过``get_lesson()``会自动调用
:return: list of lessons
:rtype: list |
11,212 | def mme_matches(case_obj, institute_obj, mme_base_url, mme_token):
data = {
: institute_obj,
: case_obj,
: []
}
matches = {}
if not case_obj.get():
return None
for patient in case_obj[][]:
patient_id = patient[]
matches[patient_id] = None
url = .join([ mme_base_url, , patient_id])
server_resp = matchmaker_request(url=url, token=mme_token, method=)
if in server_resp:
pat_matches = []
if server_resp.get():
pat_matches = parse_matches(patient_id, server_resp[])
matches[patient_id] = pat_matches
else:
LOG.warning(.format(server_resp[]))
data[].append(server_resp[])
data[] = matches
return data | Show Matchmaker submission data for a sample and eventual matches.
Args:
case_obj(dict): a scout case object
institute_obj(dict): an institute object
mme_base_url(str) base url of the MME server
mme_token(str) auth token of the MME server
Returns:
data(dict): data to display in the html template |
11,213 | def rug(x, label=None, opacity=None):
x = _try_pydatetime(x)
x = np.atleast_1d(x)
data = [
go.Scatter(
x=x,
y=np.ones_like(x),
name=label,
opacity=opacity,
mode=,
marker=dict(symbol=),
)
]
layout = dict(
barmode=,
hovermode=,
legend=dict(traceorder=),
xaxis1=dict(zeroline=False),
yaxis1=dict(
domain=[0.85, 1],
showline=False,
showgrid=False,
zeroline=False,
anchor=,
position=0.0,
showticklabels=False,
),
)
return Chart(data=data, layout=layout) | Rug chart.
Parameters
----------
x : array-like, optional
label : TODO, optional
opacity : TODO, optional
Returns
-------
Chart |
11,214 | def get_application_access_token(application_id, application_secret_key, api_version=None):
graph = GraphAPI(version=api_version)
response = graph.get(
path=,
client_id=application_id,
client_secret=application_secret_key,
grant_type=
)
try:
data = parse_qs(response)
try:
return data[][0]
except KeyError:
raise GraphAPI.FacebookError()
except AttributeError:
return response[], None | Get an OAuth access token for the given application.
:param application_id: An integer describing a Facebook application's ID.
:param application_secret_key: A string describing a Facebook application's secret key. |
11,215 | def other_supplementary_files(self):
if self._other_supplementary_files is not None:
return self._other_supplementary_files
return getattr(self.nb.metadata, , None) | The supplementary files of this notebook |
11,216 | def transformToRef(self):
if in self.pars and self.pars[] == :
log.info()
self.outxy = np.column_stack([self.all_radec[0][:,np.newaxis],self.all_radec[1][:,np.newaxis]])
skypos = self.wcs.wcs_pix2world(self.all_radec[0],self.all_radec[1],self.origin)
self.all_radec[0] = skypos[0]
self.all_radec[1] = skypos[1]
else:
log.info(%self.name+
)
self.refWCS = self.wcs
outxy = self.wcs.wcs_world2pix(self.all_radec[0],self.all_radec[1],self.origin)
self.outxy = np.column_stack([outxy[0][:,np.newaxis],outxy[1][:,np.newaxis]]) | Transform reference catalog sky positions (self.all_radec)
to reference tangent plane (self.wcs) to create output X,Y positions. |
11,217 | def main():
now = datetime.datetime.now
try:
while True:
sys.stdout.write(str(now()) + )
time.sleep(1)
except KeyboardInterrupt:
pass
except IOError as exc:
if exc.errno != errno.EPIPE:
raise | Slowly writes to stdout, without emitting a newline so any output
buffering (or input for next pipeline command) can be detected. |
11,218 | def tree_build(self):
from skbio.tree import TreeNode
nodes = {}
for tax_id in self.taxonomy.index:
node = TreeNode(name=tax_id, length=1)
node.tax_name = self.taxonomy["name"][tax_id]
node.rank = self.taxonomy["rank"][tax_id]
node.parent_tax_id = self.taxonomy["parent_tax_id"][tax_id]
nodes[tax_id] = node
for tax_id in self.taxonomy.index:
try:
parent = nodes[nodes[tax_id].parent_tax_id]
except KeyError:
if tax_id != "1":
warnings.warn(
"tax_id={} has parent_tax_id={} which is not in tree"
"".format(tax_id, nodes[tax_id].parent_tax_id)
)
continue
parent.append(nodes[tax_id])
return nodes["1"] | Build a tree from the taxonomy data present in this `ClassificationsDataFrame` or
`SampleCollection`.
Returns
-------
`skbio.tree.TreeNode`, the root node of a tree that contains all the taxa in the current
analysis and their parents leading back to the root node. |
11,219 | def read_full(data, size):
default_read_size = 32768
chunk = io.BytesIO()
chunk_size = 0
while chunk_size < size:
read_size = default_read_size
if (size - chunk_size) < default_read_size:
read_size = size - chunk_size
current_data = data.read(read_size)
if not current_data or len(current_data) == 0:
break
chunk.write(current_data)
chunk_size+= len(current_data)
return chunk.getvalue() | read_full reads exactly `size` bytes from reader. returns
`size` bytes.
:param data: Input stream to read from.
:param size: Number of bytes to read from `data`.
:return: Returns :bytes:`part_data` |
11,220 | def get_other_props(all_props, reserved_props):
if hasattr(all_props, ) and callable(all_props.items):
return dict([(k,v) for (k,v) in list(all_props.items()) if k not in
reserved_props])
return None | Retrieve the non-reserved properties from a dictionary of properties
@args reserved_props: The set of reserved properties to exclude |
11,221 | def add_model(self, model_type: str, model_uuid: str, meta: dict,
template_model: Template, update_default: bool=False):
if update_default or model_type not in self.meta:
self.meta[model_type] = meta["default"]
model_meta = meta["model"]
self.models.setdefault(model_type, {})[model_uuid] = model_meta
model_directory = os.path.join(self.cached_repo, model_type)
os.makedirs(model_directory, exist_ok=True)
model = os.path.join(model_directory, model_uuid + ".md")
if os.path.exists(model):
os.remove(model)
links = {}
for m_type, items in self.models.items():
for uuid in items:
if uuid in model_meta["dependencies"]:
links[uuid] = os.path.join("/", m_type, "%s.md" % uuid)
with open(model, "w") as fout:
fout.write(template_model.render(model_type=model_type, model_uuid=model_uuid,
meta=model_meta, links=links))
git.add(self.cached_repo, [model])
self._log.info("Added %s", model) | Add a new model to the registry. Call `upload()` to update the remote side. |
11,222 | def file_checksum(filepath):
checksum = hashlib.md5()
with open(filepath, ) as f:
for fragment in iter(lambda: f.read(65536), ):
checksum.update(fragment)
return checksum.hexdigest() | Calculate md5 checksum on file
:param filepath: Full path to file (e.g. /home/stack/image.qcow2)
:type filepath: string |
11,223 | def script(script, interpreter=, suffix=, args=, **kwargs):
return SoS_ExecuteScript(script, interpreter, suffix, args).run(**kwargs) | Execute specified script using specified interpreter. This action accepts common
action arguments such as input, active, workdir, docker_image and args. In particular,
content of one or more files specified by option input would be prepended before
the specified script. |
11,224 | def execute(self, sql, args):
c = None
try:
c = self._con.cursor()
LOGGER.debug("execute sql: " + sql + " args:" + str(args))
if type(args) is tuple:
c.execute(sql, args)
elif type(args) is list:
if len(args) > 1 and type(args[0]) in (list, tuple):
c.executemany(sql, args)
else:
c.execute(sql, args)
elif args is None:
c.execute(sql)
if sql.lstrip()[:6].upper() == :
return c.lastrowid
return c.rowcount
finally:
c and c.close() | Execute sql
:param sql string: the sql stamtement like 'select * from %s'
:param args list: Wen set None, will use dbi execute(sql), else
db execute(sql, args), the args keep the original rules, it shuld be tuple or list of list |
11,225 | def xmoe_dense_4k():
hparams = mtf_transformer.mtf_transformer_base_lm()
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.batch_size = 128
hparams.d_model = 512
hparams.d_kv = 128
hparams.num_heads = 4
hparams.decoder_layers = ["att", "drd"] * 4
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.d_ff = 4096
hparams.layout = "batch:batch;vocab:model;d_ff:model;heads:model"
hparams.mesh_shape = "batch:8"
return hparams | Series of architectural experiments on cheap language models.
For all of these architectures, we run on languagemodel_lm1b8k_packed
for 32000 steps.
All log-perplexities are per-token - multiply by 1.298 for per-word
Results:
model params(M) einsum alltoall mxu-util log-ppl
xmoe_dense_4k 30 3.0e12 0 45% 3.31
xmoe_dense_8k 46 4.7e12 0 49% 3.24
xmoe_dense_64k 282 2.8e13 0 3.06
xmoe_top_2 282 4.0e12 3.4e8 36% 3.07
xmoe_top_2_c15 282 4.5e12 4.0e8 38% 3.07
xmoe_2d 282 5.3e12 7.6e8 34% 3.06
Trained at 4x the batch size:
xmoe_2d_88 1090 2.1e13 3.0e9 24% 3.07
Note: configurations and code are likely to change without notice.
Returns:
a hparams |
11,226 | def local_path(self, url, filename=None, decompress=False, download=False):
if download:
return self.fetch(url=url, filename=filename, decompress=decompress)
else:
filename = self.local_filename(url, filename, decompress)
return join(self.cache_directory_path, filename) | What will the full local path be if we download the given file? |
11,227 | def get_resolved_res_configs(self, rid, config=None):
resolver = ARSCParser.ResourceResolver(self, config)
return resolver.resolve(rid) | Return a list of resolved resource IDs with their corresponding configuration.
It has a similar return type as :meth:`get_res_configs` but also handles complex entries
and references.
Also instead of returning :class:`ARSCResTableEntry` in the tuple, the actual values are resolved.
This is the preferred way of resolving resource IDs to their resources.
:param int rid: the numerical ID of the resource
:param ARSCTableResConfig config: the desired configuration or None to retrieve all
:return: A list of tuples of (ARSCResTableConfig, str) |
11,228 | def save_formset_with_author(formset, user):
instances = formset.save(commit=False)
for obj in formset.deleted_objects:
obj.delete()
for instance in instances:
if user.is_authenticated() and hasattr(instance, ) and not instance.author:
instance.author = user
instance.save()
formset.save_m2m() | Проставляет моделям из набора форм автора
:param formset: набор форм
:param user: автор
:return: |
11,229 | def keep_on_one_line():
class CondensedStream:
def __init__(self):
self.sys_stdout = sys.stdout
def write(self, string):
with swap_streams(self.sys_stdout):
string = string.replace(, )
string = truncate_to_fit_terminal(string)
if string.strip():
update(string)
def flush(self):
with swap_streams(self.sys_stdout):
flush()
with swap_streams(CondensedStream()):
yield | Keep all the output generated within a with-block on one line. Whenever a
new line would be printed, instead reset the cursor to the beginning of the
line and print the new line without a line break. |
11,230 | def create(annot=None, config=None, id=None, ui=None):
return Network(
annot=annot,
config=config,
id=id,
ui=ui,
) | :type annot: dict
:type config: NetworkConfig
:type id: str
:type ui: dict
:rtype: Network |
11,231 | def qft(circ, q, n):
for j in range(n):
for k in range(j):
circ.cu1(math.pi / float(2**(j - k)), q[j], q[k])
circ.h(q[j]) | n-qubit QFT on q in circ. |
11,232 | def _ParseBooleanValue(self, byte_stream):
if byte_stream == b:
return False
if byte_stream == b:
return True
raise errors.ParseError() | Parses a boolean value.
Args:
byte_stream (bytes): byte stream.
Returns:
bool: boolean value.
Raises:
ParseError: when the boolean value cannot be parsed. |
11,233 | def delete_profile_extension_members(self, profile_extension, query_column, ids_to_delete):
profile_extension = profile_extension.get_soap_object(self.client)
result = self.call(
, profile_extension, query_column, ids_to_delete)
if hasattr(result, ):
return [DeleteResult(delete_result) for delete_result in result]
return [DeleteResult(result)] | Responsys.deleteProfileExtensionRecords call
Accepts:
InteractObject profile_extension
list field_list
list ids_to_retrieve
string query_column
default: 'RIID'
Returns list of DeleteResults |
11,234 | def load(pattern, *args, **kw):
cssmypackage:static/style/**.css
spec = pattern
if not in pattern:
raise ValueError()
pkgname, pkgpat = pattern.split(, 1)
pkgdir, pattern = globre.compile(pkgpat, split_prefix=True, flags=globre.EXACT)
if pkgdir:
idx = pkgdir.rfind()
pkgdir = pkgdir[:idx] if idx >= 0 else
group = AssetGroup(pkgname, pkgdir, pattern, spec)
if globre.iswild(pkgpat):
return group
return Asset(group, pkgname, pkgpat) | Given a package asset-spec glob-pattern `pattern`, returns an
:class:`AssetGroup` object, which in turn can act as a generator of
:class:`Asset` objects that match the pattern.
Example:
.. code-block:: python
import asset
# concatenate all 'css' files into one string:
css = asset.load('mypackage:static/style/**.css').read() |
11,235 | def main(arguments=None):
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="qubits"
)
arguments, settings, log, dbConn = su.setup()
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if varname == "import":
varname = "iimport"
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = " % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug( % (varname, val,))
startTime = times.get_now_sql_datetime()
log.info(
%
(startTime,))
if init:
from . import workspace
ws = workspace(
log=log,
pathToWorkspace=pathToWorkspace
)
ws.setup()
return
(allSettings,
programSettings,
limitingMags,
sampleNumber,
peakMagnitudeDistributions,
explosionDaysFromSettings,
extendLightCurveTail,
relativeSNRates,
lowerRedshiftLimit,
upperRedshiftLimit,
redshiftResolution,
restFrameFilter,
kCorrectionTemporalResolution,
kCorPolyOrder,
kCorMinimumDataPoints,
extinctionType,
extinctionConstant,
hostExtinctionDistributions,
galacticExtinctionDistribution,
surveyCadenceSettings,
snLightCurves,
surveyArea,
CCSNRateFraction,
transientToCCSNRateFraction,
extraSurveyConstraints,
lightCurvePolyOrder,
logLevel) = cu.read_in_survey_parameters(
log,
pathToSettingsFile=pathToSettingsFile
)
logFilePath = pathToOutputDirectory + "/qubits.log"
del log
log = _set_up_command_line_tool(
level=str(logLevel),
logFilePath=logFilePath
)
startTime = dcu.get_now_sql_datetime()
log.info( % (startTime,))
resultsDict = {}
pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/"
dcu.dryx_mkdir(
log,
directoryPath=pathToOutputPlotDirectory
)
pathToResultsFolder = pathToOutputDirectory + "/results/"
dcu.dryx_mkdir(
log,
directoryPath=pathToResultsFolder
)
if not programSettings[] and not programSettings[] and not programSettings[] and not programSettings[]:
print "All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the in the settings file `%(pathToSettingsFile)s`" % locals()
if programSettings[]:
log.info()
dg.generate_model_lightcurves(
log=log,
pathToSpectralDatabase=pathToSpectralDatabase,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
explosionDaysFromSettings=explosionDaysFromSettings,
extendLightCurveTail=extendLightCurveTail,
polyOrder=lightCurvePolyOrder
)
print "The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml" % locals()
print "The lightcurve plots can be found in %(pathToOutputPlotDirectory)s" % locals()
if programSettings[]:
log.info()
dg.generate_kcorrection_listing_database(
log,
pathToOutputDirectory=pathToOutputDirectory,
pathToSpectralDatabase=pathToSpectralDatabase,
restFrameFilter=restFrameFilter,
temporalResolution=kCorrectionTemporalResolution,
redshiftResolution=redshiftResolution,
redshiftLower=lowerRedshiftLimit,
redshiftUpper=upperRedshiftLimit + redshiftResolution)
log.info()
dg.generate_kcorrection_polynomial_database(
log,
pathToOutputDirectory=pathToOutputDirectory,
restFrameFilter=restFrameFilter,
kCorPolyOrder=kCorPolyOrder,
kCorMinimumDataPoints=kCorMinimumDataPoints,
redshiftResolution=redshiftResolution,
redshiftLower=lowerRedshiftLimit,
redshiftUpper=upperRedshiftLimit + redshiftResolution,
plot=programSettings[])
print "The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections" % locals()
if programSettings[]:
print "The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections" % locals()
if programSettings[]:
log.info()
redshiftArray = u.random_redshift_array(
log,
sampleNumber,
lowerRedshiftLimit,
upperRedshiftLimit,
redshiftResolution=redshiftResolution,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings[])
resultsDict[] = redshiftArray.tolist()
log.info()
snTypesArray = u.random_sn_types_array(
log,
sampleNumber,
relativeSNRates,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings[])
resultsDict[] = snTypesArray.tolist()
log.info()
peakMagnitudesArray = u.random_peak_magnitudes(
log,
peakMagnitudeDistributions,
snTypesArray,
plot=programSettings[])
log.info()
hostExtinctionArray = u.random_host_extinction(
log,
sampleNumber,
extinctionType,
extinctionConstant,
hostExtinctionDistributions,
plot=programSettings[])
log.info()
galacticExtinctionArray = u.random_galactic_extinction(
log,
sampleNumber,
extinctionType,
extinctionConstant,
galacticExtinctionDistribution,
plot=programSettings[])
log.info()
rawLightCurveDict = u.generate_numpy_polynomial_lightcurves(
log,
snLightCurves=snLightCurves,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings[])
log.info()
kCorrectionArray = u.build_kcorrection_array(
log,
redshiftArray,
snTypesArray,
snLightCurves,
pathToOutputDirectory=pathToOutputDirectory,
plot=programSettings[])
log.info()
observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame(
log,
snLightCurves=snLightCurves,
rawLightCurveDict=rawLightCurveDict,
redshiftArray=redshiftArray,
snTypesArray=snTypesArray,
peakMagnitudesArray=peakMagnitudesArray,
kCorrectionArray=kCorrectionArray,
hostExtinctionArray=hostExtinctionArray,
galacticExtinctionArray=galacticExtinctionArray,
restFrameFilter=restFrameFilter,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
polyOrder=lightCurvePolyOrder,
plot=programSettings[])
log.info()
cadenceDictionary = ss.survey_cadence_arrays(
log,
surveyCadenceSettings,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings[])
log.info()
discoverableList = ss.determine_if_sne_are_discoverable(
log,
redshiftArray=redshiftArray,
limitingMags=limitingMags,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings[])
log.info(
)
ripeDayList = ss.determine_when_sne_are_ripe_for_discovery(
log,
redshiftArray=redshiftArray,
limitingMags=limitingMags,
discoverableList=discoverableList,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
plot=programSettings[])
log.info()
lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered(
log,
limitingMags=limitingMags,
ripeDayList=ripeDayList,
cadenceDictionary=cadenceDictionary,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
extraSurveyConstraints=extraSurveyConstraints,
plot=programSettings[])
resultsDict[
] = lightCurveDiscoveryDayList
resultsDict[
] = surveyDiscoveryDayList
resultsDict[] = snCampaignLengthList
resultsDict[] = cadenceDictionary
resultsDict[] = peakAppMagList
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S")
fileName = pathToOutputDirectory + \
"/simulation_results_%s.yaml" % (now,)
stream = file(fileName, )
yamlContent = dict(allSettings.items() + resultsDict.items())
yaml.dump(yamlContent, stream, default_flow_style=False)
stream.close()
print "The simulation output file can be found here: %(fileName)s. Remember to update your settings file parameter with this filename before compiling the results." % locals()
if programSettings[]:
print "The simulation helper-plots found in %(pathToOutputPlotDirectory)s" % locals()
if programSettings[]:
pathToYamlFile = pathToOutputDirectory + "/" + \
programSettings[]
result_log = r.log_the_survey_settings(log, pathToYamlFile)
snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results(
log, pathToYamlFile)
snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate(
log,
lightCurveDiscoveryTimes,
snSurveyDiscoveryTimes,
redshifts,
surveyCadenceSettings=surveyCadenceSettings,
lowerRedshiftLimit=lowerRedshiftLimit,
upperRedshiftLimit=upperRedshiftLimit,
redshiftResolution=redshiftResolution,
surveyArea=surveyArea,
CCSNRateFraction=CCSNRateFraction,
transientToCCSNRateFraction=transientToCCSNRateFraction,
peakAppMagList=peakAppMagList,
snCampaignLengthList=snCampaignLengthList,
extraSurveyConstraints=extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += % (totalRate, tooFaintRate, shortCampaignRate, extraSurveyConstraints["Observable for at least ? number of days"])
cadenceWheelLink = r.plot_cadence_wheel(
log,
cadenceDictionary,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += % (cadenceWheelLink,)
discoveryMapLink = r.plot_sn_discovery_map(
log,
snSurveyDiscoveryTimes,
peakAppMagList,
snCampaignLengthList,
redshifts,
extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += % (discoveryMapLink,)
ratioMapLink = r.plot_sn_discovery_ratio_map(
log,
snSurveyDiscoveryTimes,
redshifts,
peakAppMagList,
snCampaignLengthList,
extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += % (ratioMapLink,)
result_log += % (snRatePlotLink,)
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S")
mdLogPath = pathToResultsFolder + \
"simulation_result_log_%s.md" % (now,)
mdLog = open(mdLogPath, )
mdLog.write(result_log)
mdLog.close()
dmd.convert_to_html(
log=log,
pathToMMDFile=mdLogPath,
css="amblin"
)
print "Results can be found here: %(pathToResultsFolder)s" % locals()
html = mdLogPath.replace(".md", ".html")
print "Open this file in your browser: %(html)s" % locals()
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info( %
(endTime, runningTime, ))
return | *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command* |
11,236 | def str_eta(self):
if self.done:
return eta_hms(self._eta.elapsed, always_show_hours=True, hours_leading_zero=True)
if not self._eta_string:
return
return .format(self._eta_string) | Returns a formatted ETA value for the progress bar. |
11,237 | def convert_zero_consonant(pinyin):
if pinyin.startswith():
no_y_py = pinyin[1:]
first_char = no_y_py[0] if len(no_y_py) > 0 else None
if first_char in U_TONES:
pinyin = UV_MAP[first_char] + pinyin[2:]
elif first_char in I_TONES:
pinyin = no_y_py
else:
pinyin = + no_y_py
return pinyin
if pinyin.startswith():
no_w_py = pinyin[1:]
first_char = no_w_py[0] if len(no_w_py) > 0 else None
if first_char in U_TONES:
pinyin = pinyin[1:]
else:
pinyin = + pinyin[1:]
return pinyin
return pinyin | 零声母转换,还原原始的韵母
i行的韵母,前面没有声母的时候,写成yi(衣),ya(呀),ye(耶),yao(腰),
you(忧),yan(烟),yin(因),yang(央),ying(英),yong(雍)。
u行的韵母,前面没有声母的时候,写成wu(乌),wa(蛙),wo(窝),wai(歪),
wei(威),wan(弯),wen(温),wang(汪),weng(翁)。
ü行的韵母,前面没有声母的时候,写成yu(迂),yue(约),yuan(冤),
yun(晕);ü上两点省略。 |
11,238 | def as_tuple(ireq):
if not is_pinned_requirement(ireq):
raise TypeError("Expected a pinned InstallRequirement, got {}".format(ireq))
name = key_from_req(ireq.req)
version = first(ireq.specifier._specs)._spec[1]
extras = tuple(sorted(ireq.extras))
return name, version, extras | Pulls out the (name: str, version:str, extras:(str)) tuple from the pinned InstallRequirement. |
11,239 | def clusterStatus(self):
servers = yield self.getClusterServers()
d = {
: {},
: {},
: {}
}
now = time.time()
reverse_map = {}
for sname in servers:
last = yield self.get( % sname)
status = yield self.get( % sname)
uuid = yield self.get( % sname)
reverse_map[uuid] = sname
if not last:
last = 0
last = float(last)
if (status == ) and (now - last > 5):
status =
if not sname in d[]:
d[][sname] = []
d[][sname].append({
: last,
: status,
: uuid
})
crons = yield self.keys()
for key in crons:
segments = key.split()
queue = segments[2]
if queue not in d[]:
d[][queue] = {: {}}
if len(segments)==4:
last = yield self.get(key)
d[][queue][][segments[3]] = float(last)
else:
uid = yield self.get(key)
d[][queue][] = % (uid, reverse_map[uid])
queue_keys = yield self.keys()
for key in queue_keys:
qname = key.split()[2]
if qname not in d[]:
qlen = yield self.queueSize(qname)
stats = yield self.getQueueMessageStats(qname)
d[][qname] = {
: qlen,
: stats
}
defer.returnValue(d) | Returns a dict of cluster nodes and their status information |
11,240 | def calcKYratioDifference(self):
KYratioSim = np.mean(np.array(self.KtoYnow_hist)[self.ignore_periods:])
diff = KYratioSim - self.KYratioTarget
return diff | Returns the difference between the simulated capital to income ratio and the target ratio.
Can only be run after solving all AgentTypes and running makeHistory.
Parameters
----------
None
Returns
-------
diff : float
Difference between simulated and target capital to income ratio. |
11,241 | def SunLongitude(jdn):
T = (jdn - 2451545.0) / 36525.
T2 = T * T
dr = math.pi / 180.
M = 357.52910 + 35999.05030 * T \
- 0.0001559 * T2 - 0.00000048 * T * T2
L0 = 280.46645 + 36000.76983 * T + 0.0003032 * T2
DL = (1.914600 - 0.004817 * T - 0.000014 * T2) \
* math.sin(dr * M)
DL += (0.019993 - 0.000101 * T) * math.sin(dr * 2 * M) \
+ 0.000290 * math.sin(dr * 3 * M)
L = L0 + DL
L = L * dr
L = L - math.pi * 2 * (float(L / (math.pi * 2)))
return L | def SunLongitude(jdn): Compute the longitude of the sun at any time.
Parameter: floating number jdn, the number of days since 1/1/4713 BC noon. |
11,242 | def _parse_stsd(self, atom, fileobj):
assert atom.name == b"stsd"
ok, data = atom.read(fileobj)
if not ok:
raise MP4StreamInfoError("Invalid stsd")
try:
version, flags, data = parse_full_atom(data)
except ValueError as e:
raise MP4StreamInfoError(e)
if version != 0:
raise MP4StreamInfoError("Unsupported stsd version")
try:
num_entries, offset = cdata.uint32_be_from(data, 0)
except cdata.error as e:
raise MP4StreamInfoError(e)
if num_entries == 0:
return
entry_fileobj = cBytesIO(data[offset:])
try:
entry_atom = Atom(entry_fileobj)
except AtomError as e:
raise MP4StreamInfoError(e)
try:
entry = AudioSampleEntry(entry_atom, entry_fileobj)
except ASEntryError as e:
raise MP4StreamInfoError(e)
else:
self.channels = entry.channels
self.bits_per_sample = entry.sample_size
self.sample_rate = entry.sample_rate
self.bitrate = entry.bitrate
self.codec = entry.codec
self.codec_description = entry.codec_description | Sets channels, bits_per_sample, sample_rate and optionally bitrate.
Can raise MP4StreamInfoError. |
11,243 | def PrintFieldValue(self, field, value):
out = self.out
if self.pointy_brackets:
openb =
closeb =
else:
openb =
closeb =
if field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_MESSAGE:
if self.as_one_line:
out.write( % openb)
self.PrintMessage(value)
out.write(closeb)
else:
out.write( % openb)
self.indent += 2
self.PrintMessage(value)
self.indent -= 2
out.write( * self.indent + closeb)
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_ENUM:
enum_value = field.enum_type.values_by_number.get(value, None)
if enum_value is not None:
out.write(enum_value.name)
else:
out.write(str(value))
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_STRING:
out.write()
if isinstance(value, six.text_type):
out_value = value.encode()
else:
out_value = value
if field.type == descriptor.FieldDescriptor.TYPE_BYTES:
out_as_utf8 = False
else:
out_as_utf8 = self.as_utf8
out.write(text_encoding.CEscape(out_value, out_as_utf8))
out.write()
elif field.cpp_type == descriptor.FieldDescriptor.CPPTYPE_BOOL:
if value:
out.write()
else:
out.write()
elif field.cpp_type in _FLOAT_TYPES and self.float_format is not None:
out.write(.format(self.float_format, value))
else:
out.write(str(value)) | Print a single field value (not including name).
For repeated fields, the value should be a single element.
Args:
field: The descriptor of the field to be printed.
value: The value of the field. |
11,244 | def restart_on_change(restart_map, stopstart=False, restart_functions=None):
def wrap(f):
@functools.wraps(f)
def wrapped_f(*args, **kwargs):
return restart_on_change_helper(
(lambda: f(*args, **kwargs)), restart_map, stopstart,
restart_functions)
return wrapped_f
return wrap | Restart services based on configuration files changing
This function is used a decorator, for example::
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
'/etc/apache/sites-enabled/*': [ 'apache2' ]
})
def config_changed():
pass # your code here
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
ceph_client_changed function. The apache2 service would be
restarted if any file matching the pattern got changed, created
or removed. Standard wildcards are supported, see documentation
for the 'glob' module for more information.
@param restart_map: {path_file_name: [service_name, ...]
@param stopstart: DEFAULT false; whether to stop, start OR restart
@param restart_functions: nonstandard functions to use to restart services
{svc: func, ...}
@returns result from decorated function |
11,245 | def run_benchmark(monitor):
url = urlparse(monitor.cfg.test_url)
name = slugify(url.path) or
name = % (name, monitor.cfg.workers)
monitor.logger.info(, name)
total = REQUESTS//monitor.cfg.workers
with open(name, ) as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=FIELDNAMES)
writer.writeheader()
for pool_size in POOL_SIZES:
size = pool_size//monitor.cfg.workers
if size*monitor.cfg.workers != pool_size:
monitor.logger.error(
)
monitor._loop.stop()
requests = [monitor.send(worker, , wormup, size, total) for
worker in monitor.managed_actors]
yield from wait(requests)
requests = [monitor.send(worker, , bench) for
worker in monitor.managed_actors]
results, pending = yield from wait(requests)
assert not pending,
results = [r.result() for r in results]
summary = {: pool_size}
for name in results[0]:
summary[name] = reduce(add(name), results, 0)
writer.writerow(summary)
persec = summary[]/summary[]
monitor.logger.info(
,
pool_size,
summary[],
summary[],
summary[],
persec) | Run the benchmarks |
11,246 | def get_income_in_period(self, start: datetime, end: datetime) -> Decimal:
accounts = self.get_income_accounts()
income = Decimal(0)
for acct in accounts:
acc_agg = AccountAggregate(self.book, acct)
acc_bal = acc_agg.get_balance_in_period(start, end)
income += acc_bal
return income | Returns all income in the given period |
11,247 | def FULL_TEXT(val):
if isinstance(val, float):
val = repr(val)
elif val in (None, ):
return None
elif not isinstance(val, six.string_types):
if six.PY3 and isinstance(val, bytes):
val = val.decode()
else:
val = str(val)
r = sorted(set([x for x in [s.lower().strip(string.punctuation) for s in val.split()] if x]))
if not isinstance(val, str):
return [s.encode() for s in r]
return r | This is a basic full-text index keygen function. Words are lowercased, split
by whitespace, and stripped of punctuation from both ends before an inverted
index is created for term searching. |
11,248 | def _merge_update_item(self, model_item, data):
data_item = self.edit_model_schema.dump(model_item, many=False).data
for _col in self.edit_columns:
if _col not in data.keys():
data[_col] = data_item[_col]
return data | Merge a model with a python data structure
This is useful to turn PUT method into a PATCH also
:param model_item: SQLA Model
:param data: python data structure
:return: python data structure |
11,249 | def retrieve(self, request, project, pk=None):
log = JobLog.objects.get(id=pk)
return Response(self._log_as_dict(log)) | Returns a job_log_url object given its ID |
11,250 | def get_project_config_file(path, default_config_file_name):
_path, _config_file_path = None, None
path = os.path.abspath(path)
if os.path.isdir(path):
_path = path
_config_file_path = os.path.join(_path, default_config_file_name)
logger.debug("Using default project configuration file path: %s", _config_file_path)
elif path.endswith(".yml"):
_path = os.path.dirname(path)
_config_file_path = path
logger.debug("Using custom project configuration file path: %s", _config_file_path)
return _path, _config_file_path | Attempts to extract the project config file's absolute path from the given path. If the path is a
directory, it automatically assumes a "config.yml" file will be in that directory. If the path is to
a .yml file, it assumes that that is the root configuration file for the project. |
11,251 | def language(self):
language_item = [subtag for subtag in self.subtags if subtag.type == ]
return language_item[0] if len(language_item) > 0 else None | Get the language :class:`language_tags.Subtag.Subtag` of the tag.
:return: language :class:`language_tags.Subtag.Subtag` that is part of the tag.
The return can be None. |
11,252 | def extract_meta(self, text):
first_line = True
metadata = []
content = []
metadata_parsed = False
for line in text.split():
if first_line:
first_line = False
if line.strip() != :
raise MetaParseException()
else:
continue
if line.strip() == and not metadata_parsed:
continue
if line.strip() == and not metadata_parsed:
metadata_parsed = True
elif not metadata_parsed:
metadata.append(line)
else:
content.append(line)
content = .join(content)
try:
metadata = yaml.load(.join(metadata))
except:
raise
content = text
metadata = yaml.load()
return content, metadata | Takes input as the entire file.
Reads the first yaml document as metadata.
and the rest of the document as text |
11,253 | def _job_completed(self, job_name, success, message):
job = self._objects[job_name][Interface[]]
action = self._action_by_operation.get(job[])
if not action:
return
object_path, = job[]
device = self[object_path]
if success:
if self._check_action_success[action](device):
event_name = self._event_by_action[action]
self.trigger(event_name, device)
else:
self.trigger(, device, action, message) | Internal method.
Called when a job of a long running task completes. |
11,254 | async def wait_event(signals: Sequence[],
filter: Callable[[T_Event], bool] = None) -> T_Event:
if sys.version_info >= (3, 5, 3):
assert check_argument_types()
async with aclosing(stream_events(signals, filter)) as events:
return await events.asend(None) | Wait until any of the given signals dispatches an event that satisfies the filter (if any).
If no filter has been given, the first event dispatched from the signal is returned.
:param signals: the signals to get events from
:param filter: a callable that takes an event object as an argument and returns ``True`` if
the event should pass, ``False`` if not
:return: the event that was dispatched |
11,255 | def _adjust_beforenext(self, real_wave_mfcc, algo_parameters):
def new_time(nsi):
delay = max(algo_parameters[0], TimeValue("0.000"))
return max(nsi.end - delay, nsi.begin)
self.log(u"Called _adjust_beforenext")
self._adjust_on_nonspeech(real_wave_mfcc, new_time) | BEFORENEXT |
11,256 | def parseprint(code, filename="<string>", mode="exec", **kwargs):
node = parse(code, mode=mode)
print(dump(node, **kwargs)) | Parse some code from a string and pretty-print it. |
11,257 | def _run(self):
up_cnt = 0
down_cnt = 0
check_state =
for key, value in self.config.items():
self.log.debug("%s=%s:%s", key, value, type(value))
if self._check_disabled():
return
if self.splay_startup is not None:
sleep_time = float("%.3f" % random.uniform(0, self.splay_startup))
self.log.info("delaying startup for %ssecs", sleep_time)
time.sleep(sleep_time)
interval = self.config[]
start_offset = time.time() % interval
while True:
timestamp = time.time()
if not self._ip_assigned():
up_cnt = 0
self.extra[] =
self.log.warning("status DOWN because %s isnDOWNDOWNcheck_risestatusupUPUPcheck_rises a BUG!",
up_cnt,
extra=self.extra)
down_cnt = 0
else:
if down_cnt == (self.config[] - 1):
self.extra[] =
self.log.info("status DOWN", extra=self.extra)
if check_state != :
check_state =
self.log.info("adding %s in the queue",
self.ip_with_prefixlen,
extra=self.extra)
self.action.put(self.del_operation)
elif down_cnt < self.config[]:
down_cnt += 1
self.log.info("going down %s", down_cnt, extra=self.extra)
else:
self.log.error("up_cnt is higher %s, it's a BUG!",
up_cnt,
extra=self.extra)
up_cnt = 0
self.log.info("wall clock time %.3fms",
(time.time() - timestamp) * 1000,
extra=self.extra)
sleep = start_offset - time.time() % interval
if sleep < 0:
sleep += interval
self.log.debug("sleeping for %.3fsecs", sleep, extra=self.extra)
time.sleep(sleep) | Discovers the health of a service.
Runs until it is being killed from main program and is responsible to
put an item into the queue based on the status of the health check.
The status of service is consider UP after a number of consecutive
successful health checks, in that case it asks main program to add the
IP prefix associated with service to BIRD configuration, otherwise ask
for a removal.
Rise and fail options prevent unnecessary configuration changes when
check is flapping. |
11,258 | def calc_mean_time(timepoints, weights):
timepoints = numpy.array(timepoints)
weights = numpy.array(weights)
validtools.test_equal_shape(timepoints=timepoints, weights=weights)
validtools.test_non_negative(weights=weights)
return numpy.dot(timepoints, weights)/numpy.sum(weights) | Return the weighted mean of the given timepoints.
With equal given weights, the result is simply the mean of the given
time points:
>>> from hydpy import calc_mean_time
>>> calc_mean_time(timepoints=[3., 7.],
... weights=[2., 2.])
5.0
With different weights, the resulting mean time is shifted to the larger
ones:
>>> calc_mean_time(timepoints=[3., 7.],
... weights=[1., 3.])
6.0
Or, in the most extreme case:
>>> calc_mean_time(timepoints=[3., 7.],
... weights=[0., 4.])
7.0
There will be some checks for input plausibility perfomed, e.g.:
>>> calc_mean_time(timepoints=[3., 7.],
... weights=[-2., 2.])
Traceback (most recent call last):
...
ValueError: While trying to calculate the weighted mean time, \
the following error occurred: For the following objects, at least \
one value is negative: weights. |
11,259 | def split(self, file):
with open(file, ) as f:
for record in sagemaker.amazon.common.read_recordio(f):
yield record | Split a file into records using a specific strategy
This RecordIOSplitter splits the data into individual RecordIO records.
Args:
file (str): path to the file to split
Returns: generator for the individual records that were split from the file |
11,260 | def publish():
print("To be ready for release, remember:")
print(" 1) Update the version number (and associated test)")
print(" 2) Update the ChangeLog.rst (and other documentation)")
print(
" ChangeLog should have an line (title) consisting of the version number"
)
print(" 3) Tag Mercurial with ")
print(" 4) Push updates to BitBucket")
print(" 5) Set the RELEASE environment variable")
print(" $ export RELEASE=formic")
cmd = [
, ,
]
published_version = subprocess.check_output(cmd).strip()
our_version = open(os.path.join("formic", "VERSION.txt"), "r").read()
if "\n" in published_version or\
len(published_version) < 3 or len(published_version) > 10:
raise Exception(
"Published version number seems weird: " + published_version)
print("Published version:", published_version)
print("Current version: ", our_version)
if our_version == published_version:
raise Exception(
"You are attempting to republish version " + our_version)
changelog = open("CHANGELOG.rst", "r")
found = False
for line in changelog.readlines():
if line.strip() == our_version:
print("ChangeLog has an entry")
found = True
break
changelog.close()
if not found:
raise Exception(
"The ChangeLog does not appear to include comments on this release"
)
tags = subprocess.check_output("hg tags".split())
found = False
looking_for = "Release " + our_version
for line in tags.split("\n"):
match = re.match(r"^(.*)\s+[0-9]+:[0-9a-f]+$", line)
if match:
tag = match.group(1).strip()
if tag == looking_for:
print("Found tag", tag)
found = True
break
if not found:
raise Exception(
"Mercurial does not have the release tag: " + looking_for)
status = subprocess.check_output(["hg", "status"])
for line in status.split("\n"):
if len(line) > 0:
raise Exception("Uncommitted changes present")
try:
v = os.environ["RELEASE"]
if v != "formic":
raise KeyError()
except KeyError:
print("$RELEASE environment variable is not set")
raise
subprocess.check_call("python setup.py bdist_egg upload".split())
subprocess.check_call("python setup.py sdist upload".split()) | Publishes Formic to PyPi (don't run unless you are the maintainer) |
11,261 | def lambda_A_calc(classes, table, P, POP):
try:
result = 0
maxreference = max(list(P.values()))
length = POP
for i in classes:
col = []
for col_item in table.values():
col.append(col_item[i])
result += max(col)
result = (result - maxreference) / (length - maxreference)
return result
except Exception:
return "None" | Calculate Goodman and Kruskal's lambda A.
:param classes: confusion matrix classes
:type classes : list
:param table: confusion matrix table
:type table : dict
:param P: condition positive
:type P : dict
:param POP: population
:type POP : int
:return: Goodman and Kruskal's lambda A as float |
11,262 | def handle_log_data(self, m):
if self.download_file is None:
return
if m.ofs != self.download_ofs:
self.download_file.seek(m.ofs)
self.download_ofs = m.ofs
if m.count != 0:
s = bytearray(m.data[:m.count])
self.download_file.write(s)
self.download_set.add(m.ofs // 90)
self.download_ofs += m.count
self.download_last_timestamp = time.time()
if m.count == 0 or (m.count < 90 and len(self.download_set) == 1 + (m.ofs // 90)):
dt = time.time() - self.download_start
self.download_file.close()
size = os.path.getsize(self.download_filename)
speed = size / (1000.0 * dt)
print("Finished downloading %s (%u bytes %u seconds, %.1f kbyte/sec %u retries)" % (
self.download_filename,
size,
dt, speed,
self.retries))
self.download_file = None
self.download_filename = None
self.download_set = set()
self.master.mav.log_request_end_send(self.target_system,
self.target_component)
if len(self.download_queue):
self.log_download_next() | handling incoming log data |
11,263 | def set_global_fontsize_from_fig(fig, scale=1.5):
fig_size_inch = fig.get_size_inches()
fig_size_len_geom_mean = (fig_size_inch[0] * fig_size_inch[1]) ** 0.5
rcParams[] = fig_size_len_geom_mean * scale
return rcParams[] | Set matplotlib.rcParams['font.size'] value so that
all texts on a plot would look nice in terms of fontsize.
[NOTE] The formula for the font size is:
fontsize = sqrt(fig_area) * 'scale'
where fig_area = fig_height * fig_width (in inch) |
11,264 | def pre_render(self):
self.add_styles()
self.add_scripts()
self.root.set(
, % (self.graph.width, self.graph.height)
)
if self.graph.explicit_size:
self.root.set(, str(self.graph.width))
self.root.set(, str(self.graph.height)) | Last things to do before rendering |
11,265 | def set_cpuid_leaf(self, idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx):
if not isinstance(idx, baseinteger):
raise TypeError("idx can only be an instance of type baseinteger")
if not isinstance(idx_sub, baseinteger):
raise TypeError("idx_sub can only be an instance of type baseinteger")
if not isinstance(val_eax, baseinteger):
raise TypeError("val_eax can only be an instance of type baseinteger")
if not isinstance(val_ebx, baseinteger):
raise TypeError("val_ebx can only be an instance of type baseinteger")
if not isinstance(val_ecx, baseinteger):
raise TypeError("val_ecx can only be an instance of type baseinteger")
if not isinstance(val_edx, baseinteger):
raise TypeError("val_edx can only be an instance of type baseinteger")
self._call("setCPUIDLeaf",
in_p=[idx, idx_sub, val_eax, val_ebx, val_ecx, val_edx]) | Sets the virtual CPU cpuid information for the specified leaf. Note that these values
are not passed unmodified. VirtualBox clears features that it doesn't support.
Currently supported index values for cpuid:
Standard CPUID leaves: 0 - 0x1f
Extended CPUID leaves: 0x80000000 - 0x8000001f
VIA CPUID leaves: 0xc0000000 - 0xc000000f
The subleaf index is only applicable to certain leaves (see manuals as this is
subject to change).
See the Intel, AMD and VIA programmer's manuals for detailed information
about the cpuid instruction and its leaves.
Do not use this method unless you know exactly what you're doing. Misuse can lead to
random crashes inside VMs.
in idx of type int
CPUID leaf index.
in idx_sub of type int
CPUID leaf sub-index (ECX). Set to 0xffffffff (or 0) if not applicable.
The 0xffffffff causes it to remove all other subleaves before adding one
with sub-index 0.
in val_eax of type int
CPUID leaf value for register eax.
in val_ebx of type int
CPUID leaf value for register ebx.
in val_ecx of type int
CPUID leaf value for register ecx.
in val_edx of type int
CPUID leaf value for register edx.
raises :class:`OleErrorInvalidarg`
Invalid index. |
11,266 | def get_monitor(self, topics):
for monitor in self.get_monitors(MON_TOPIC_ATTR == ",".join(topics)):
return monitor
return None | Attempts to find a Monitor in device cloud that matches the provided topics
:param topics: a string list of topics (e.g. ``['DeviceCore[U]', 'FileDataCore'])``)
Returns a :class:`DeviceCloudMonitor` if found, otherwise None. |
11,267 | def beholder_ng(func):
@functools.wraps(func)
def behold(file, length, *args, **kwargs):
seek_cur = file.tell()
try:
return func(file, length, *args, **kwargs)
except Exception:
from pcapkit.protocols.raw import Raw
error = traceback.format_exc(limit=1).strip().split(os.linesep)[-1]
file.seek(seek_cur, os.SEEK_SET)
next_ = Raw(file, length, error=error)
return next_
return behold | Behold analysis procedure. |
11,268 | def _transpose_chars(text, pos):
if len(text) < 2 or pos == 0:
return text, pos
if pos == len(text):
return text[:pos - 2] + text[pos - 1] + text[pos - 2], pos
return text[:pos - 1] + text[pos] + text[pos - 1] + text[pos + 1:], pos + 1 | Drag the character before pos forward over the character at pos,
moving pos forward as well. If pos is at the end of text, then this
transposes the two characters before pos. |
11,269 | def _getLayer(self, name, **kwargs):
for layer in self.layers:
if layer.name == name:
return layer | This is the environment implementation of
:meth:`BaseFont.getLayer`. **name** will
be a :ref:`type-string`. It will have been
normalized with :func:`normalizers.normalizeLayerName`
and it will have been verified as an existing layer.
This must return an instance of :class:`BaseLayer`.
Subclasses may override this method. |
11,270 | def next(self):
try:
child = self.children[self.pos]
self.pos += 1
return child
except:
raise StopIteration() | Get the next child.
@return: The next child.
@rtype: L{Element}
@raise StopIterator: At the end. |
11,271 | def sync(self, command, arguments, tags=None, id=None):
response = self.raw(command, arguments, tags=tags, id=id)
result = response.get()
if result.state != :
raise ResultError(msg= % result.data, code=result.code)
return result | Same as self.raw except it do a response.get() waiting for the command execution to finish and reads the result
:param command: Command name to execute supported by the node (ex: core.system, info.cpu, etc...)
check documentation for list of built in commands
:param arguments: A dict of required command arguments depends on the command name.
:param tags: job tags
:param id: job id. Generated if not supplied
:return: Result object |
11,272 | def get_paying_proxy_contract(w3: Web3, address=None):
return w3.eth.contract(address,
abi=PAYING_PROXY_INTERFACE[],
bytecode=PAYING_PROXY_INTERFACE[]) | Get Paying Proxy Contract. This should be used just for contract creation/changing master_copy
If you want to call Safe methods you should use `get_safe_contract` with the Proxy address,
so you can access every method of the Safe
:param w3: Web3 instance
:param address: address of the proxy contract
:return: Paying Proxy Contract |
11,273 | def remove_metadata_key(self, container, key):
meta_dict = {key: ""}
return self.set_metadata(container, meta_dict) | Removes the specified key from the container's metadata. If the key
does not exist in the metadata, nothing is done. |
11,274 | def _kvmatrix2d(km,vm):
ab
d = {}
kmwfs = get_kmwfs(km)
vmwfs = elel.get_wfs(vm)
lngth = vmwfs.__len__()
for i in range(0,lngth):
value = elel.getitem_via_pathlist(vm,vmwfs[i])
cond = elel.is_leaf(value)
if(cond):
_setitem_via_pathlist(d,kmwfs[i],value)
else:
_setdefault_via_pathlist(d,kmwfs[i])
return(d) | km = [[[1], [3]], [[1, 2], [3, 'a']], [[1, 2, 22]]]
show_kmatrix(km)
vm = [[[222]], ['b']]
show_vmatrix(vm)
d = _kvmatrix2d(km,vm) |
11,275 | def background_knowledge(self):
modeslist, getters = [self.mode(self.__target_predicate(), [(, self.db.target_table)], head=True)], []
determinations, types = [], []
for (table, ref_table) in self.db.connected.keys():
if ref_table == self.db.target_table:
continue
modeslist.append(
self.mode( % (table.lower(), ref_table), [(, table), (, ref_table)], recall=))
determinations.append(
% (self.__target_predicate(), table.lower(), ref_table))
types.extend(self.concept_type_def(table))
types.extend(self.concept_type_def(ref_table))
getters.extend(self.connecting_clause(table, ref_table))
for table, atts in self.db.cols.items():
for att in atts:
if att == self.db.target_att and table == self.db.target_table or \
att in self.db.fkeys[table] or att == self.db.pkeys[table]:
continue
modeslist.append(self.mode( % (table, att), [(, table), (, att.lower())], recall=))
determinations.append( % (self.__target_predicate(), table, att))
types.extend(self.constant_type_def(table, att))
getters.extend(self.attribute_clause(table, att))
return .join(self.user_settings() + modeslist + determinations + types + getters + self.dump_tables()) | Emits the background knowledge in prolog form for Aleph. |
11,276 | def _get_migrate_funcs(cls, orig_version, target_version):
direction = 1 if target_version > orig_version else -1
versions = range(orig_version, target_version + direction, direction)
transitions = recipes.pairwise(versions)
return itertools.starmap(cls._get_func, transitions) | >>> @Manager.register
... def v1_to_2(manager, doc):
... doc['foo'] = 'bar'
>>> @Manager.register
... def v2_to_1(manager, doc):
... del doc['foo']
>>> @Manager.register
... def v2_to_3(manager, doc):
... doc['foo'] = doc['foo'] + ' baz'
>>> funcs = list(Manager._get_migrate_funcs(1, 3))
>>> len(funcs)
2
>>> funcs == [v1_to_2, v2_to_3]
True
>>> funcs = list(Manager._get_migrate_funcs(2, 1))
>>> len(funcs)
1
>>> funcs == [v2_to_1]
True
>>> Manager._upgrade_funcs.clear() |
11,277 | def read_multiple(
self, points_list, *, points_per_request=1, discover_request=(None, 6)
):
if isinstance(points_list, list):
for each in points_list:
self.read_single(
each, points_per_request=1, discover_request=discover_request
)
else:
self.read_single(
points_list, points_per_request=1, discover_request=discover_request
) | Functions to read points from a device using the ReadPropertyMultiple request.
Using readProperty request can be very slow to read a lot of data.
:param points_list: (list) a list of all point_name as str
:param points_per_request: (int) number of points in the request
Using too many points will create big requests needing segmentation.
It's better to use just enough request so the message will not require
segmentation.
:Example:
device.read_multiple(['point1', 'point2', 'point3'], points_per_request = 10) |
11,278 | def mgf1(mgf_seed, mask_len, hash_class=hashlib.sha1):
h_len = hash_class().digest_size
if mask_len > 0x10000:
raise ValueError()
T = b
for i in range(0, integer_ceil(mask_len, h_len)):
C = i2osp(i, 4)
T = T + hash_class(mgf_seed + C).digest()
return T[:mask_len] | Mask Generation Function v1 from the PKCS#1 v2.0 standard.
mgs_seed - the seed, a byte string
mask_len - the length of the mask to generate
hash_class - the digest algorithm to use, default is SHA1
Return value: a pseudo-random mask, as a byte string |
11,279 | def get_url_kwargs(self, request_kwargs=None, **kwargs):
if not request_kwargs:
request_kwargs = getattr(self, , {})
for k in self.bundle.url_params:
if k in request_kwargs and not k in kwargs:
kwargs[k] = request_kwargs[k]
return kwargs | Get the kwargs needed to reverse this url.
:param request_kwargs: The kwargs from the current request. \
These keyword arguments are only retained if they are present \
in this bundle's known url_parameters.
:param kwargs: Keyword arguments that will always be kept. |
11,280 | def addFromTex(self,name,img,category):
texreg = self.categoriesTexBin[category].add(img)
self.categories[category][name]=texreg
target = texreg.target
texid = texreg.id
texcoords = texreg.tex_coords
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST_MIPMAP_LINEAR)
glGenerateMipmap(GL_TEXTURE_2D)
out = target,texid,texcoords
self.categoriesTexCache[category][name]=out
return out | Adds a new texture from the given image.
``img`` may be any object that supports Pyglet-style copying in form of the ``blit_to_texture()`` method.
This can be used to add textures that come from non-file sources, e.g. Render-to-texture. |
11,281 | def regroup(self, group):
if util.config.future_deprecations:
self.param.warning(
% type(self).__name__)
new_items = [el.relabel(group=group) for el in self.data.values()]
return reduce(lambda x,y: x+y, new_items) | Deprecated method to apply new group to items.
Equivalent functionality possible using:
ViewableTree(tree.relabel(group='Group').values()) |
11,282 | def engagement_context(self):
if self._engagement_context is None:
self._engagement_context = EngagementContextList(
self._version,
flow_sid=self._solution[],
engagement_sid=self._solution[],
)
return self._engagement_context | Access the engagement_context
:returns: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList
:rtype: twilio.rest.studio.v1.flow.engagement.engagement_context.EngagementContextList |
11,283 | def drawing_end(self):
from MAVProxy.modules.mavproxy_map import mp_slipmap
if self.draw_callback is None:
return
self.draw_callback(self.draw_line)
self.draw_callback = None
self.map.add_object(mp_slipmap.SlipDefaultPopup(self.default_popup, combine=True))
self.map.add_object(mp_slipmap.SlipClearLayer()) | end line drawing |
11,284 | def altitudes_encode(self, time_boot_ms, alt_gps, alt_imu, alt_barometric, alt_optical_flow, alt_range_finder, alt_extra):
return MAVLink_altitudes_message(time_boot_ms, alt_gps, alt_imu, alt_barometric, alt_optical_flow, alt_range_finder, alt_extra) | The altitude measured by sensors and IMU
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
alt_gps : GPS altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
alt_imu : IMU altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
alt_barometric : barometeric altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
alt_optical_flow : Optical flow altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
alt_range_finder : Rangefinder Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
alt_extra : Extra altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t) |
11,285 | def apply_filter(self, expr, value):
if self.skip(value):
return expr
if not self._valid_value(value):
msg = "Invalid value {value} passed to filter {name} - ".format(
value=repr(value),
name=self.name)
if self.default is not None:
warn(msg + "defaulting to {}".format(self.default))
value = self.default
else:
warn(msg + "skipping")
return expr
return self.func(expr, value) | Returns the given expression filtered by the given value.
Args:
expr (xpath.expression.AbstractExpression): The expression to filter.
value (object): The desired value with which the expression should be filtered.
Returns:
xpath.expression.AbstractExpression: The filtered expression. |
11,286 | def GetFile(message=None, title=None, directory=None, fileName=None,
allowsMultipleSelection=False, fileTypes=None):
return dispatcher["GetFile"](message=message, title=title, directory=directory,
fileName=fileName,
allowsMultipleSelection=allowsMultipleSelection,
fileTypes=fileTypes) | An get file dialog.
Optionally a `message`, `title`, `directory`, `fileName` and
`allowsMultipleSelection` can be provided.
::
from fontParts.ui import GetFile
print(GetFile()) |
11,287 | def calculate(self, film, substrate, elasticity_tensor=None,
film_millers=None, substrate_millers=None,
ground_state_energy=0, lowest=False):
self.film = film
self.substrate = substrate
if film_millers is None:
film_millers = sorted(get_symmetrically_distinct_miller_indices(
self.film, self.film_max_miller))
if substrate_millers is None:
substrate_millers = sorted(
get_symmetrically_distinct_miller_indices(self.substrate,
self.substrate_max_miller))
surface_vector_sets = self.generate_surface_vectors(film_millers, substrate_millers)
for [film_vectors, substrate_vectors, film_miller, substrate_miller] in surface_vector_sets:
for match in self.zsl(film_vectors, substrate_vectors, lowest):
match[] = film_miller
match[] = substrate_miller
if (elasticity_tensor is not None):
energy, strain = self.calculate_3D_elastic_energy(
film, match, elasticity_tensor, include_strain=True)
match["elastic_energy"] = energy
match["strain"] = strain
if (ground_state_energy is not 0):
match[] = match.get(, 0) + ground_state_energy
yield match | Finds all topological matches for the substrate and calculates elastic
strain energy and total energy for the film if elasticity tensor and
ground state energy are provided:
Args:
film(Structure): conventional standard structure for the film
substrate(Structure): conventional standard structure for the
substrate
elasticity_tensor(ElasticTensor): elasticity tensor for the film
in the IEEE orientation
film_millers(array): film facets to consider in search as defined by
miller indicies
substrate_millers(array): substrate facets to consider in search as
defined by miller indicies
ground_state_energy(float): ground state energy for the film
lowest(bool): only consider lowest matching area for each surface |
11,288 | def unicode_symbol(self, *, invert_color: bool = False) -> str:
symbol = self.symbol().swapcase() if invert_color else self.symbol()
return UNICODE_PIECE_SYMBOLS[symbol] | Gets the Unicode character for the piece. |
11,289 | def authenticate_application(self, api_token, admin_token,
override=False, fetch=True):
if (self.context.has_auth_params() and not override):
raise OverrideError()
if (not api_token or not admin_token or
not self.context.authorize(,
api_token=api_token,
admin_token=admin_token)):
raise AuthUsageError(self.context, )
return self.application if fetch else True | Set credentials for Application authentication.
Important Note: Do not use Application auth on any end-user device.
Application auth provides read-access to all Users who have
authorized an Application. Use on a secure application server only.
Args:
api_token (str): Token issued to your Application through the Gem
Developer Console.
admin_token (str): Token issued to run an instance of your App
THIS IS A SECRET. TREAT IT LIKE A SECRET.
override (boolean): Replace existing Application credentials.
fetch (boolean): Return the authenticated Application.
Returns:
An Application object if `fetch` is True. |
11,290 | def create_server(self,
loop=None,
as_coroutine=False,
protocol_factory=None,
**server_config):
if loop is None:
import asyncio
loop = asyncio.get_event_loop()
if protocol_factory is None:
from growler.aio import GrowlerHTTPProtocol
protocol_factory = GrowlerHTTPProtocol.get_factory
create_server = loop.create_server(
protocol_factory(self, loop=loop),
**server_config
)
if as_coroutine:
return create_server
else:
return loop.run_until_complete(create_server) | Helper function which constructs a listening server, using the
default growler.http.protocol.Protocol which responds to this
app.
This function exists only to remove boilerplate code for
starting up a growler app when using asyncio.
Args:
as_coroutine (bool): If True, this function does not wait
for the server to be created, and only returns the
coroutine generator object returned by loop.create_server.
This mode should be used when already inside an async
function.
The default mode is to call :method:`run_until_complete`
on the loop paramter, blocking until the server is
created and added to the event loop.
server_config (mixed): These keyword arguments parameters
are passed directly to the BaseEventLoop.create_server
function. Consult their documentation for details.
loop (BaseEventLoop): This is the asyncio event loop used
to provide the underlying `create_server` method, and,
if as_coroutine is False, will block until the server
is created.
protocol_factory (callable): Function returning an asyncio
protocol object (or more specifically, a
`growler.aio.GrowlerProtocol` object) to be called upon
client connection.
The default is the :class:`GrowlerHttpProtocol` factory
function.
**server_config (mixed): These keyword arguments parameters are
passed directly to the BaseEventLoop.create_server function.
Consult their documentation for details.
Returns:
asyncio.Server: The result of asyncio.BaseEventLoop.create_server
which has been passed to the event loop and setup with
the provided parameters. This is returned if gen_coroutine
is False (default).
asyncio.coroutine: An asyncio.coroutine which will
produce the asyncio.Server from the provided configuration parameters.
This is returned if gen_coroutine is True. |
11,291 | def properties(obj, type=None, set=None):
**ro=false,label="My Storage"
if type and type not in [, , , , , , , ]:
raise CommandExecutionError("Unknown property type: \"{0}\" specified".format(type))
cmd = []
cmd.append()
cmd.append(set and or )
if type:
cmd.append(.format(type))
cmd.append(obj)
if set:
try:
for key, value in [[item.strip() for item in keyset.split("=")]
for keyset in set.split(",")]:
cmd.append(key)
cmd.append(value)
except Exception as ex:
raise CommandExecutionError(ex)
out = __salt__[](.join(cmd))
salt.utils.fsutils._verify_run(out)
if not set:
ret = {}
for prop, descr in six.iteritems(_parse_proplist(out[])):
ret[prop] = {: descr}
value = __salt__[](
"btrfs property get {0} {1}".format(obj, prop))[]
ret[prop][] = value and value.split("=")[-1] or "N/A"
return ret | List properties for given btrfs object. The object can be path of BTRFS device,
mount point, or any directories/files inside the BTRFS filesystem.
General options:
* **type**: Possible types are s[ubvol], f[ilesystem], i[node] and d[evice].
* **force**: Force overwrite existing filesystem on the disk
* **set**: <key=value,key1=value1...> Options for a filesystem properties.
CLI Example:
.. code-block:: bash
salt '*' btrfs.properties /mountpoint
salt '*' btrfs.properties /dev/sda1 type=subvol set='ro=false,label="My Storage"' |
11,292 | def create(self, name, img_format=None, data=None, container=None,
obj=None, metadata=None):
return self._manager.create(name, img_format, data=data,
container=container, obj=obj) | Creates a new image with the specified name. The image data can either
be supplied directly in the 'data' parameter, or it can be an image
stored in the object storage service. In the case of the latter, you
can either supply the container and object names, or simply a
StorageObject reference. |
11,293 | def default_instance():
if ConfigManager._instance is None:
with threading.Lock():
if ConfigManager._instance is None:
ConfigManager._instance = ConfigManager()
return ConfigManager._instance | For use like a singleton, return the existing instance
of the object or a new instance |
11,294 | def remote_log_data_block_encode(self, target_system, target_component, seqno, data):
return MAVLink_remote_log_data_block_message(target_system, target_component, seqno, data) | Send a block of log data to remote location
target_system : System ID (uint8_t)
target_component : Component ID (uint8_t)
seqno : log data block sequence number (uint32_t)
data : log data block (uint8_t) |
11,295 | def dict_to_object(self, obj):
if not isinstance(obj, dict):
return obj
if in obj:
sensor = Sensor(obj[])
for key, val in obj.items():
setattr(sensor, key, val)
return sensor
if all(k in obj for k in [, , ]):
child = ChildSensor(
obj[], obj[], obj.get(, ))
child.values = obj[]
return child
if all(k.isdigit() for k in obj.keys()):
return {int(k): v for k, v in obj.items()}
return obj | Return object from dict. |
11,296 | def _get_generators(self):
generators = [ep.name for ep in
pkg_resources.iter_entry_points(self.group)]
return generators | Get installed banana plugins.
:return: dictionary of installed generators name: distribution |
11,297 | def kill(self):
self.process.kill()
self.set_status(self.S_ERROR, "status set to Error by task.kill")
self._returncode = self.process.returncode | Kill the child. |
11,298 | def _manage_args(parser, args):
for item in data.CONFIGURABLE_OPTIONS:
action = parser._option_string_actions[item]
choices = default =
input_value = getattr(args, action.dest)
new_val = None
if not args.noinput:
if action.choices:
choices = .format(.join(action.choices))
if input_value:
if type(input_value) == list:
default = .format(.join(input_value))
else:
default = .format(input_value)
while not new_val:
prompt = .format(action.help, choices, default)
if action.choices in (, ):
new_val = utils.query_yes_no(prompt)
else:
new_val = compat.input(prompt)
new_val = compat.clean(new_val)
if not new_val and input_value:
new_val = input_value
if new_val and action.dest == :
if new_val != and not os.path.isdir(new_val):
sys.stdout.write()
new_val = False
if new_val and action.dest == :
action(parser, args, new_val, action.option_strings)
new_val = getattr(args, action.dest)
else:
if not input_value and action.required:
raise ValueError(
.format(action.dest)
)
new_val = input_value
if action.dest == :
action(parser, args, new_val, action.option_strings)
new_val = getattr(args, action.dest)
if action.dest == and (new_val == or not os.path.isdir(new_val)):
new_val = False
if action.dest in (, ):
new_val = (new_val == )
setattr(args, action.dest, new_val)
return args | Checks and validate provided input |
11,299 | def patterson_f3(acc, aca, acb):
aca = AlleleCountsArray(aca, copy=False)
assert aca.shape[1] == 2,
acb = AlleleCountsArray(acb, copy=False)
assert acb.shape[1] == 2,
acc = AlleleCountsArray(acc, copy=False)
assert acc.shape[1] == 2,
check_dim0_aligned(aca, acb, acc)
sc = acc.sum(axis=1)
hc = h_hat(acc)
a = aca.to_frequencies()[:, 1]
b = acb.to_frequencies()[:, 1]
c = acc.to_frequencies()[:, 1]
T = ((c - a) * (c - b)) - (hc / sc)
B = 2 * hc
return T, B | Unbiased estimator for F3(C; A, B), the three-population test for
admixture in population C.
Parameters
----------
acc : array_like, int, shape (n_variants, 2)
Allele counts for the test population (C).
aca : array_like, int, shape (n_variants, 2)
Allele counts for the first source population (A).
acb : array_like, int, shape (n_variants, 2)
Allele counts for the second source population (B).
Returns
-------
T : ndarray, float, shape (n_variants,)
Un-normalized f3 estimates per variant.
B : ndarray, float, shape (n_variants,)
Estimates for heterozygosity in population C.
Notes
-----
See Patterson (2012), main text and Appendix A.
For un-normalized f3 statistics, ignore the `B` return value.
To compute the f3* statistic, which is normalized by heterozygosity in
population C to remove numerical dependence on the allele frequency
spectrum, compute ``np.sum(T) / np.sum(B)``. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.