Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
23,200 | def configure(host=DEFAULT_HOST, port=DEFAULT_PORT, prefix=):
global _client
logging.info("Reconfiguring metrics: {}:{}/{}".format(host, port, prefix))
_client = statsdclient.StatsdClient(host, port, prefix) | >>> configure()
>>> configure('localhost', 8125, 'mymetrics') |
23,201 | def shrunk_covariance(self, delta=0.2):
self.delta = delta
N = self.S.shape[1]
mu = np.trace(self.S) / N
F = np.identity(N) * mu
shrunk_cov = delta * F + (1 - delta) * self.S
return self.format_and_annualise(shrunk_cov) | Shrink a sample covariance matrix to the identity matrix (scaled by the average
sample variance). This method does not estimate an optimal shrinkage parameter,
it requires manual input.
:param delta: shrinkage parameter, defaults to 0.2.
:type delta: float, optional
:return: shrunk sample covariance matrix
:rtype: np.ndarray |
23,202 | def main():
mlperf_log.ROOT_DIR_GNMT = os.path.dirname(os.path.abspath(__file__))
mlperf_log.LOGGER.propagate = False
args = parse_args()
device = utils.set_device(args.cuda, args.local_rank)
distributed = utils.init_distributed(args.cuda)
gnmt_print(key=mlperf_log.RUN_START, sync=True)
args.rank = utils.get_rank()
if not args.cudnn:
torch.backends.cudnn.enabled = False
save_path = os.path.join(args.results_dir, args.save)
args.save_path = save_path
os.makedirs(save_path, exist_ok=True)
log_filename = f
utils.setup_logging(os.path.join(save_path, log_filename))
if args.env:
utils.log_env_info()
logging.info(f)
logging.info(f)
if args.train_global_batch_size is not None:
global_bs = args.train_global_batch_size
bs = args.train_batch_size
world_size = utils.get_world_size()
assert global_bs % (bs * world_size) == 0
args.train_iter_size = global_bs // (bs * world_size)
logging.info(f
f)
worker_seeds, shuffling_seeds = utils.setup_seeds(args.seed, args.epochs,
device)
worker_seed = worker_seeds[args.rank]
logging.info(f)
torch.manual_seed(worker_seed)
pad_vocab = utils.pad_vocabulary(args.math)
tokenizer = Tokenizer(os.path.join(args.dataset_dir, config.VOCAB_FNAME),
pad_vocab)
gnmt_print(key=mlperf_log.PREPROC_TOKENIZE_TRAINING, sync=False)
gnmt_print(key=mlperf_log.TRAIN_HP_MAX_SEQ_LEN,
value=args.max_length_train, sync=False)
train_data = LazyParallelDataset(
src_fname=os.path.join(args.dataset_dir, config.SRC_TRAIN_FNAME),
tgt_fname=os.path.join(args.dataset_dir, config.TGT_TRAIN_FNAME),
tokenizer=tokenizer,
min_len=args.min_length_train,
max_len=args.max_length_train,
sort=False,
max_size=args.max_size)
gnmt_print(key=mlperf_log.PREPROC_NUM_TRAIN_EXAMPLES,
value=len(train_data), sync=False)
val_data = ParallelDataset(
src_fname=os.path.join(args.dataset_dir, config.SRC_VAL_FNAME),
tgt_fname=os.path.join(args.dataset_dir, config.TGT_VAL_FNAME),
tokenizer=tokenizer,
min_len=args.min_length_val,
max_len=args.max_length_val,
sort=True)
gnmt_print(key=mlperf_log.PREPROC_TOKENIZE_EVAL, sync=False)
test_data = TextDataset(
src_fname=os.path.join(args.dataset_dir, config.SRC_TEST_FNAME),
tokenizer=tokenizer,
min_len=args.min_length_test,
max_len=args.max_length_test,
sort=True)
gnmt_print(key=mlperf_log.PREPROC_NUM_EVAL_EXAMPLES,
value=len(test_data), sync=False)
vocab_size = tokenizer.vocab_size
gnmt_print(key=mlperf_log.PREPROC_VOCAB_SIZE,
value=vocab_size, sync=False)
model_config = {: args.hidden_size,
: args.num_layers,
: args.dropout, : False,
: args.share_embedding}
model = GNMT(vocab_size=vocab_size, **model_config)
logging.info(model)
batch_first = model.batch_first
criterion = build_criterion(vocab_size, config.PAD, args.smoothing)
opt_config = {: args.optimizer, : args.lr}
opt_config.update(literal_eval(args.optimizer_extra))
logging.info(f)
scheduler_config = {: args.warmup_steps,
: args.remain_steps,
: args.decay_interval,
: args.decay_steps,
: args.decay_factor}
logging.info(f)
num_parameters = sum([l.nelement() for l in model.parameters()])
logging.info(f)
batching_opt = {: args.shard_size,
: args.num_buckets}
train_loader = train_data.get_loader(batch_size=args.train_batch_size,
seeds=shuffling_seeds,
batch_first=batch_first,
shuffle=True,
batching=args.batching,
batching_opt=batching_opt,
num_workers=args.train_loader_workers)
gnmt_print(key=mlperf_log.INPUT_BATCH_SIZE,
value=args.train_batch_size * utils.get_world_size(),
sync=False)
gnmt_print(key=mlperf_log.INPUT_SIZE,
value=train_loader.sampler.num_samples, sync=False)
val_loader = val_data.get_loader(batch_size=args.val_batch_size,
batch_first=batch_first,
shuffle=False,
num_workers=args.val_loader_workers)
test_loader = test_data.get_loader(batch_size=args.test_batch_size,
batch_first=batch_first,
shuffle=False,
pad=True,
num_workers=args.test_loader_workers)
gnmt_print(key=mlperf_log.EVAL_SIZE,
value=len(test_loader.dataset), sync=False)
translator = Translator(model=model,
tokenizer=tokenizer,
loader=test_loader,
beam_size=args.beam_size,
max_seq_len=args.max_length_test,
len_norm_factor=args.len_norm_factor,
len_norm_const=args.len_norm_const,
cov_penalty_factor=args.cov_penalty_factor,
cuda=args.cuda,
print_freq=args.print_freq,
dataset_dir=args.dataset_dir,
target_bleu=args.target_bleu,
save_path=args.save_path)
total_train_iters = len(train_loader) // args.train_iter_size * args.epochs
save_info = {: model_config, : args, :
tokenizer.get_state()}
trainer_options = dict(
criterion=criterion,
grad_clip=args.grad_clip,
iter_size=args.train_iter_size,
save_path=save_path,
save_freq=args.save_freq,
save_info=save_info,
opt_config=opt_config,
scheduler_config=scheduler_config,
train_iterations=total_train_iters,
batch_first=batch_first,
keep_checkpoints=args.keep_checkpoints,
math=args.math,
print_freq=args.print_freq,
cuda=args.cuda,
distributed=distributed,
intra_epoch_eval=args.intra_epoch_eval,
translator=translator)
trainer_options[] = model
trainer = trainers.Seq2SeqTrainer(**trainer_options)
if args.resume:
checkpoint_file = args.resume
if os.path.isdir(checkpoint_file):
checkpoint_file = os.path.join(
checkpoint_file, )
if os.path.isfile(checkpoint_file):
trainer.load(checkpoint_file)
else:
logging.error(f)
best_loss = float()
break_training = False
test_bleu = None
gnmt_print(key=mlperf_log.TRAIN_LOOP, sync=True)
for epoch in range(args.start_epoch, args.epochs):
logging.info(f)
gnmt_print(key=mlperf_log.TRAIN_EPOCH,
value=epoch, sync=True)
train_loader.sampler.set_epoch(epoch)
trainer.epoch = epoch
train_loss, train_perf = trainer.optimize(train_loader)
if args.eval:
logging.info(f)
val_loss, val_perf = trainer.evaluate(val_loader)
gnmt_print(key=mlperf_log.TRAIN_CHECKPOINT, sync=False)
if args.rank == 0:
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
trainer.save(save_all=args.save_all, is_best=is_best)
if args.eval:
gnmt_print(key=mlperf_log.EVAL_START, value=epoch, sync=True)
test_bleu, break_training = translator.run(calc_bleu=True,
epoch=epoch)
gnmt_print(key=mlperf_log.EVAL_ACCURACY,
value={"epoch": epoch, "value": round(test_bleu, 2)},
sync=False)
gnmt_print(key=mlperf_log.EVAL_TARGET,
value=args.target_bleu, sync=False)
gnmt_print(key=mlperf_log.EVAL_STOP, sync=True)
acc_log = []
acc_log += [f]
acc_log += [f]
if args.eval:
acc_log += [f]
acc_log += [f]
perf_log = []
perf_log += [f]
perf_log += [f]
if args.eval:
perf_log += [f]
if args.rank == 0:
logging.info(.join(acc_log))
logging.info(.join(perf_log))
logging.info(f)
if break_training:
break
gnmt_print(key=mlperf_log.RUN_STOP,
value={"success": bool(break_training)}, sync=True)
gnmt_print(key=mlperf_log.RUN_FINAL, sync=False) | Launches data-parallel multi-gpu training. |
23,203 | def minimize_memory(self):
if self.__samples is None:
self.log(u"Not initialized, returning")
else:
self.log(u"Initialized, minimizing memory...")
self.preallocate_memory(self.__samples_length)
self.log(u"Initialized, minimizing memory... done") | Reduce the allocated memory to the minimum
required to store the current audio samples.
This function is meant to be called
when building a wave incrementally,
after the last append operation.
.. versionadded:: 1.5.0 |
23,204 | def _slice_take_blocks_ax0(self, slice_or_indexer, fill_tuple=None):
allow_fill = fill_tuple is not None
sl_type, slobj, sllen = _preprocess_slice_or_indexer(
slice_or_indexer, self.shape[0], allow_fill=allow_fill)
if self._is_single_block:
blk = self.blocks[0]
if sl_type in (, ):
return [blk.getitem_block(slobj, new_mgr_locs=slice(0, sllen))]
elif not allow_fill or self.ndim == 1:
if allow_fill and fill_tuple[0] is None:
_, fill_value = maybe_promote(blk.dtype)
fill_tuple = (fill_value, )
return [blk.take_nd(slobj, axis=0,
new_mgr_locs=slice(0, sllen),
fill_tuple=fill_tuple)]
if sl_type in (, ):
blknos = self._blknos[slobj]
blklocs = self._blklocs[slobj]
else:
blknos = algos.take_1d(self._blknos, slobj, fill_value=-1,
allow_fill=allow_fill)
blklocs = algos.take_1d(self._blklocs, slobj, fill_value=-1,
allow_fill=allow_fill)
blocks = []
for blkno, mgr_locs in libinternals.get_blkno_placements(blknos,
self.nblocks,
group=True):
if blkno == -1:
for mgr_loc in mgr_locs:
newblk = blk.copy(deep=True)
newblk.mgr_locs = slice(mgr_loc, mgr_loc + 1)
blocks.append(newblk)
else:
blocks.append(blk.take_nd(blklocs[mgr_locs.indexer],
axis=0, new_mgr_locs=mgr_locs,
fill_tuple=None))
return blocks | Slice/take blocks along axis=0.
Overloaded for SingleBlock
Returns
-------
new_blocks : list of Block |
23,205 | def serialize(self, value, greedy=True):
if greedy and not isinstance(value, Column):
value = self.normalize(value)
if isinstance(value, Column):
return value.id
else:
return value | Greedy serialization requires the value to either be a column
or convertible to a column, whereas non-greedy serialization
will pass through any string as-is and will only serialize
Column objects.
Non-greedy serialization is useful when preparing queries with
custom filters or segments. |
23,206 | async def verifier_verify_proof(proof_request_json: str,
proof_json: str,
schemas_json: str,
credential_defs_json: str,
rev_reg_defs_json: str,
rev_regs_json: str) -> bool:
logger = logging.getLogger(__name__)
logger.debug("verifier_verify_proof: >>> proof_request_json: %r, proof_json: %r, schemas_json: %r, "
"credential_defs_jsons: %r, rev_reg_defs_json: %r, rev_regs_json: %r",
proof_request_json,
proof_json,
schemas_json,
credential_defs_json,
rev_reg_defs_json,
rev_regs_json)
if not hasattr(verifier_verify_proof, "cb"):
logger.debug("verifier_verify_proof: Creating callback")
verifier_verify_proof.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_bool))
c_proof_request_json = c_char_p(proof_request_json.encode())
c_proof_json = c_char_p(proof_json.encode())
c_schemas_json = c_char_p(schemas_json.encode())
c_credential_defs_json = c_char_p(credential_defs_json.encode())
c_rev_reg_defs_json = c_char_p(rev_reg_defs_json.encode())
c_rev_regs_json = c_char_p(rev_regs_json.encode())
res = await do_call(,
c_proof_request_json,
c_proof_json,
c_schemas_json,
c_credential_defs_json,
c_rev_reg_defs_json,
c_rev_regs_json,
verifier_verify_proof.cb)
logger.debug("verifier_verify_proof: <<< res: %r", res)
return res | Verifies a proof (of multiple credential).
All required schemas, public keys and revocation registries must be provided.
:param proof_request_json:
{
"name": string,
"version": string,
"nonce": string,
"requested_attributes": { // set of requested attributes
"<attr_referent>": <attr_info>, // see below
...,
},
"requested_predicates": { // set of requested predicates
"<predicate_referent>": <predicate_info>, // see below
...,
},
"non_revoked": Optional<<non_revoc_interval>>, // see below,
// If specified prover must proof non-revocation
// for date in this interval for each attribute
// (can be overridden on attribute level)
}
:param proof_json: created for request proof json
{
"requested_proof": {
"revealed_attrs": {
"requested_attr1_id": {sub_proof_index: number, raw: string, encoded: string},
"requested_attr4_id": {sub_proof_index: number: string, encoded: string},
},
"unrevealed_attrs": {
"requested_attr3_id": {sub_proof_index: number}
},
"self_attested_attrs": {
"requested_attr2_id": self_attested_value,
},
"requested_predicates": {
"requested_predicate_1_referent": {sub_proof_index: int},
"requested_predicate_2_referent": {sub_proof_index: int},
}
}
"proof": {
"proofs": [ <credential_proof>, <credential_proof>, <credential_proof> ],
"aggregated_proof": <aggregated_proof>
}
"identifiers": [{schema_id, cred_def_id, Optional<rev_reg_id>, Optional<timestamp>}]
}
:param schemas_json: all schema jsons participating in the proof
{
<schema1_id>: <schema1_json>,
<schema2_id>: <schema2_json>,
<schema3_id>: <schema3_json>,
}
:param credential_defs_json: all credential definitions json participating in the proof
{
"cred_def1_id": <credential_def1_json>,
"cred_def2_id": <credential_def2_json>,
"cred_def3_id": <credential_def3_json>,
}
:param rev_reg_defs_json: all revocation registry definitions json participating in the proof
{
"rev_reg_def1_id": <rev_reg_def1_json>,
"rev_reg_def2_id": <rev_reg_def2_json>,
"rev_reg_def3_id": <rev_reg_def3_json>,
}
:param rev_regs_json: all revocation registries json participating in the proof
{
"rev_reg_def1_id": {
"timestamp1": <rev_reg1>,
"timestamp2": <rev_reg2>,
},
"rev_reg_def2_id": {
"timestamp3": <rev_reg3>
},
"rev_reg_def3_id": {
"timestamp4": <rev_reg4>
},
}
:return: valid: true - if signature is valid, false - otherwise |
23,207 | def _gen_from_dircmp(dc, lpath, rpath):
left_only = dc.left_only
left_only.sort()
for f in left_only:
fp = join(dc.left, f)
if isdir(fp):
for r, _ds, fs in walk(fp):
r = relpath(r, lpath)
for f in fs:
yield(LEFT, join(r, f))
else:
yield (LEFT, relpath(fp, lpath))
right_only = dc.right_only
right_only.sort()
for f in right_only:
fp = join(dc.right, f)
if isdir(fp):
for r, _ds, fs in walk(fp):
r = relpath(r, rpath)
for f in fs:
yield(RIGHT, join(r, f))
else:
yield (RIGHT, relpath(fp, rpath))
diff_files = dc.diff_files
diff_files.sort()
for f in diff_files:
yield (DIFF, join(relpath(dc.right, rpath), f))
same_files = dc.same_files
same_files.sort()
for f in same_files:
yield (BOTH, join(relpath(dc.left, lpath), f))
subdirs = dc.subdirs.values()
subdirs = sorted(subdirs)
for sub in subdirs:
for event in _gen_from_dircmp(sub, lpath, rpath):
yield event | do the work of comparing the dircmp |
23,208 | def snake2ucamel(value):
UNDER, LETTER, OTHER = object(), object(), object()
def group_key_function(char):
if char == "_":
return UNDER
if char in string.ascii_letters:
return LETTER
return OTHER
def process_group(idx, key, chars):
if key is LETTER:
return "".join([chars[0].upper()] + chars[1:])
if key is OTHER \
or len(chars) != 1 \
or idx in [0, last] \
or LETTER not in (groups[idx-1][1], groups[idx+1][1]):
return "".join(chars)
return ""
raw_groups_gen = itertools.groupby(value, key=group_key_function)
groups = [(idx, key, list(group_gen))
for idx, (key, group_gen) in enumerate(raw_groups_gen)]
last = len(groups) - 1
return "".join(itertools.starmap(process_group, groups)) | Casts a snake_case string to an UpperCamelCase string. |
23,209 | def in_base(self, unit_system=None):
us = _sanitize_unit_system(unit_system, self)
try:
conv_data = _check_em_conversion(
self.units, unit_system=us, registry=self.units.registry
)
except MKSCGSConversionError:
raise UnitsNotReducible(self.units, us)
if any(conv_data):
to_units, (conv, offset) = _em_conversion(
self.units, conv_data, unit_system=us
)
else:
to_units = self.units.get_base_equivalent(unit_system)
conv, offset = self.units.get_conversion_factor(to_units, self.dtype)
new_dtype = np.dtype("f" + str(self.dtype.itemsize))
conv = new_dtype.type(conv)
ret = self.v * conv
if offset:
ret = ret - offset
return type(self)(ret, to_units) | Creates a copy of this array with the data in the specified unit
system, and returns it in that system's base units.
Parameters
----------
unit_system : string, optional
The unit system to be used in the conversion. If not specified,
the configured default base units of are used (defaults to MKS).
Examples
--------
>>> from unyt import erg, s
>>> E = 2.5*erg/s
>>> print(E.in_base("mks"))
2.5e-07 W |
23,210 | def list_container_processes(container):
cmd = [, , , .join(PsRow.columns())]
ps_lines = output_lines(container.exec_run(cmd))
header = ps_lines.pop(0)
ps_rows = [row for row in ps_rows if row.args != cmd_string]
return ps_rows | List the processes running inside a container.
We use an exec rather than `container.top()` because we want to run 'ps'
inside the container. This is because we want to get PIDs and usernames in
the container's namespaces. `container.top()` uses 'ps' from outside the
container in the host's namespaces. Note that this requires the container
to have a 'ps' that responds to the arguments we give it-- we use BusyBox's
(Alpine's) 'ps' as a baseline for available functionality.
:param container: the container to query
:return: a list of PsRow objects |
23,211 | def output_extras(self, output_file):
output_directory = dirname(output_file)
def local_path(name):
return join(output_directory, self.path_helper.local_name(name))
files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator)
names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents)
return dict(map(lambda name: (local_path(name), name), names)) | Returns dict mapping local path to remote name. |
23,212 | def parse_value(parser, event, node):
value =
(next_event, next_node) = six.next(parser)
if next_event == pulldom.CHARACTERS:
value = next_node.nodeValue
(next_event, next_node) = six.next(parser)
if not _is_end(next_event, next_node, ):
raise ParseError()
return value | Parse CIM/XML VALUE element and return the value |
23,213 | def _apply_backwards_compatibility(df):
df.row_count = types.MethodType(lambda self: len(self.index), df)
df.col_count = types.MethodType(lambda self: len(self.columns), df)
df.dataframe = df | Attach properties to the Dataframe to make it backwards compatible with older versions of this library
:param df: The dataframe to be modified |
23,214 | def selectrangeopenleft(table, field, minv, maxv, complement=False):
minv = Comparable(minv)
maxv = Comparable(maxv)
return select(table, field, lambda v: minv <= v < maxv,
complement=complement) | Select rows where the given field is greater than or equal to `minv` and
less than `maxv`. |
23,215 | def pull_screenrecord(self, bit_rate: int = 5000000, time_limit: int = 180, remote: _PATH = , local: _PATH = ) -> None:
self.screenrecord(bit_rate, time_limit, filename=remote)
self.pull(remote, local) | Recording the display of devices running Android 4.4 (API level 19) and higher. Then copy it to your computer.
Args:
bit_rate:You can increase the bit rate to improve video quality, but doing so results in larger movie files.
time_limit: Sets the maximum recording time, in seconds, and the maximum value is 180 (3 minutes). |
23,216 | def from_dict(data, ctx):
data = data.copy()
if data.get() is not None:
data[] = \
ctx.instrument.CandlestickData.from_dict(
data[], ctx
)
if data.get() is not None:
data[] = \
ctx.instrument.CandlestickData.from_dict(
data[], ctx
)
if data.get() is not None:
data[] = \
ctx.instrument.CandlestickData.from_dict(
data[], ctx
)
return Candlestick(**data) | Instantiate a new Candlestick from a dict (generally from loading a
JSON response). The data used to instantiate the Candlestick is a
shallow copy of the dict passed in, with any complex child types
instantiated appropriately. |
23,217 | def connect(self, db_uri, debug=False):
kwargs = {: debug, : True}
if in db_uri:
kwargs[] = 3600
elif not in db_uri:
logger.debug("detected sqlite path URI: {}".format(db_uri))
db_path = os.path.abspath(os.path.expanduser(db_uri))
db_uri = "sqlite:///{}".format(db_path)
self.engine = create_engine(db_uri, **kwargs)
logger.debug()
BASE.metadata.bind = self.engine
self.session = scoped_session(sessionmaker(bind=self.engine))
self.query = self.session.query
return self | Configure connection to a SQL database.
Args:
db_uri (str): path/URI to the database to connect to
debug (Optional[bool]): whether to output logging information |
23,218 | def add(self, schema, data):
binding = self.get_binding(schema, data)
uri, triples = triplify(binding)
for triple in triples:
self.graph.add(triple)
return uri | Stage ``data`` as a set of statements, based on the given
``schema`` definition. |
23,219 | def _AddVariable(self, variable):
if isinstance(variable, Signal):
if not variable in self.signals:
self.signals.append(variable)
elif isinstance(variable, Variable):
if not variable in self.variables:
self.variables.append(variable)
else:
raise TypeError
self._utd_graph = False | Add a variable to the model. Should not be used by end-user |
23,220 | def set_position(self, decl_pos):
if decl_pos["typehint"] == "LineSourcePosition":
self.editor.set_cursor(decl_pos[], 0)
else:
point = decl_pos["offset"]
row, col = self.editor.point2pos(point + 1)
self.editor.set_cursor(row, col) | Set editor position from ENSIME declPos data. |
23,221 | def add(self, item_numid, collect_type, shared, session):
request = TOPRequest()
request[] = item_numid
request[] = collect_type
request[] = shared
self.create(self.execute(request, session))
return self.result | taobao.favorite.add 添加收藏夹
根据用户昵称和收藏目标的数字id以及收藏目标的类型,实现收藏行为 |
23,222 | def iter_create_panes(self, w, wconf):
assert isinstance(w, Window)
pane_base_index = int(w.show_window_option(, g=True))
p = None
for pindex, pconf in enumerate(wconf[], start=pane_base_index):
if pindex == int(pane_base_index):
p = w.attached_pane
else:
def get_pane_start_directory():
if in pconf:
return pconf[]
elif in wconf:
return wconf[]
else:
return None
p = w.split_window(
attach=True, start_directory=get_pane_start_directory(), target=p.id
)
assert isinstance(p, Pane)
if in wconf:
w.select_layout(wconf[])
if in pconf:
suppress = pconf[]
elif in wconf:
suppress = wconf[]
else:
suppress = True
for cmd in pconf[]:
p.send_keys(cmd, suppress_history=suppress)
if in pconf and pconf[]:
w.select_pane(p[])
w.server._update_panes()
yield p, pconf | Return :class:`libtmux.Pane` iterating through window config dict.
Run ``shell_command`` with ``$ tmux send-keys``.
Parameters
----------
w : :class:`libtmux.Window`
window to create panes for
wconf : dict
config section for window
Returns
-------
tuple of (:class:`libtmux.Pane`, ``pconf``)
Newly created pane, and the section from the tmuxp configuration
that was used to create the pane. |
23,223 | def _set_ldp_fec_vcs(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=ldp_fec_vcs.ldp_fec_vcs, is_container=, presence=False, yang_name="ldp-fec-vcs", rest_name="ldp-fec-vcs", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u: {u: u, u: None}}, namespace=, defining_module=, yang_type=, is_config=False)
except (TypeError, ValueError):
raise ValueError({
: ,
: "container",
: ,
})
self.__ldp_fec_vcs = t
if hasattr(self, ):
self._set() | Setter method for ldp_fec_vcs, mapped from YANG variable /mpls_state/ldp/fec/ldp_fec_vcs (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_ldp_fec_vcs is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ldp_fec_vcs() directly. |
23,224 | def get_unused_port(port=None):
if port is None or port < 1024 or port > 65535:
port = random.randint(1024, 65535)
assert(1024 <= port <= 65535)
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind((, port))
except socket.error as e:
if e.errno in (98, 10048):
return get_unused_port(None)
raise e
s.close()
return port | Checks if port is already in use. |
23,225 | async def process_graph_input(graph, stream, value, rpc_executor):
graph.sensor_log.push(stream, value)
if stream.important:
associated_output = stream.associated_stream()
graph.sensor_log.push(associated_output, value)
to_check = deque([x for x in graph.roots])
while len(to_check) > 0:
node = to_check.popleft()
if node.triggered():
try:
results = node.process(rpc_executor, graph.mark_streamer)
for result in results:
if inspect.iscoroutine(result.value):
result.value = await asyncio.ensure_future(result.value)
result.raw_time = value.raw_time
graph.sensor_log.push(node.stream, result)
except:
logging.getLogger(__name__).exception("Unhandled exception in graph node processing function for node %s", str(node))
if len(results) > 0:
to_check.extend(node.outputs) | Process an input through this sensor graph.
The tick information in value should be correct and is transfered
to all results produced by nodes acting on this tick. This coroutine
is an asyncio compatible version of SensorGraph.process_input()
Args:
stream (DataStream): The stream the input is part of
value (IOTileReading): The value to process
rpc_executor (RPCExecutor): An object capable of executing RPCs
in case we need to do that. |
23,226 | def get_value(self, name):
factory = self._registered.get(name)
if not factory:
raise KeyError()
if factory._giveme_singleton:
if name in self._singletons:
return self._singletons[name]
self._singletons[name] = factory()
return self._singletons[name]
elif factory._giveme_threadlocal:
if hasattr(self._threadlocals, name):
return getattr(self._threadlocals, name)
setattr(self._threadlocals, name, factory())
return getattr(self._threadlocals, name)
return factory() | Get return value of a dependency factory or
a live singleton instance. |
23,227 | def is_date(self):
dt = DATA_TYPES[]
if type(self.data) is dt[] and in str(self.data) and str(self.data).count() == 2:
date_split = str(self.data).split()
y, m, d = date_split[0], date_split[1], date_split[2]
valid_year, valid_months, valid_days = int(y) in YEARS, int(m) in MONTHS, int(d) in DAYS
if all(i is True for i in (valid_year, valid_months, valid_days)):
self.type = .upper()
self.len = None
return True | Determine if a data record is of type DATE. |
23,228 | def get_environments():
environments = set()
cwd = os.getcwd()
for d in os.listdir(cwd):
if d == :
environments.add(VirtualEnvironment(cwd))
continue
path = unipath(cwd, d)
if utils.is_environment(path):
environments.add(VirtualEnvironment(path))
home = get_home_path()
for d in os.listdir(home):
path = unipath(home, d)
if utils.is_environment(path):
environments.add(VirtualEnvironment(path))
for env in EnvironmentCache:
environments.add(env)
return sorted(list(environments), key=lambda x: x.name) | Returns a list of all known virtual environments as
:class:`VirtualEnvironment` instances. This includes those in CPENV_HOME
and any others that are cached(created by the current user or activated
once by full path.) |
23,229 | def add_nodes_to_axes(self):
if all([i == "" for i in self.node_labels]):
return
marks = []
for nidx in self.ttree.get_node_values(, 1, 1):
nlabel = self.node_labels[nidx]
nsize = self.node_sizes[nidx]
nmarker = self.node_markers[nidx]
nstyle = deepcopy(self.style.node_style)
nlstyle = deepcopy(self.style.node_labels_style)
nstyle["fill"] = self.node_colors[nidx]
if (nlabel or nsize):
mark = toyplot.marker.create(
shape=nmarker,
label=str(nlabel),
size=nsize,
mstyle=nstyle,
lstyle=nlstyle,
)
else:
mark = ""
marks.append(mark)
if self.style.node_hover is True:
title = self.get_hover()
elif isinstance(self.style.node_hover, list):
title = self.style.node_hover
else:
title = None
self.axes.scatterplot(
self.coords.verts[:, 0],
self.coords.verts[:, 1],
marker=marks,
title=title,
) | Creates a new marker for every node from idx indexes and lists of
node_values, node_colors, node_sizes, node_style, node_labels_style.
Pulls from node_color and adds to a copy of the style dict for each
node to create marker.
Node_colors has priority to overwrite node_style['fill'] |
23,230 | def majmin(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
maj_semitones = np.array(QUALITIES[][:8])
min_semitones = np.array(QUALITIES[][:8])
ref_roots, ref_semitones, _ = encode_many(reference_labels, False)
est_roots, est_semitones, _ = encode_many(estimated_labels, False)
eq_root = ref_roots == est_roots
eq_quality = np.all(np.equal(ref_semitones[:, :8],
est_semitones[:, :8]), axis=1)
comparison_scores = (eq_root * eq_quality).astype(np.float)
is_maj = np.all(np.equal(ref_semitones[:, :8], maj_semitones), axis=1)
is_min = np.all(np.equal(ref_semitones[:, :8], min_semitones), axis=1)
is_none = np.logical_and(ref_roots < 0, np.all(ref_semitones == 0, axis=1))
comparison_scores[(is_maj + is_min + is_none) == 0] = -1
return comparison_scores | Compare chords along major-minor rules. Chords with qualities outside
Major/minor/no-chord are ignored.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.majmin(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut. |
23,231 | def paddingsize(self, namedstruct):
if self.base is not None:
return self.base.paddingsize(namedstruct)
realsize = namedstruct._realsize()
return (realsize + self.padding - 1) // self.padding * self.padding | Return the size of the padded struct (including the "real" size and the padding bytes)
:param namedstruct: a NamedStruct object of this type.
:returns: size including both data and padding. |
23,232 | def read(self, sync_map_format, input_file_path, parameters=None):
if sync_map_format is None:
self.log_exc(u"Sync map format is None", None, True, ValueError)
if sync_map_format not in SyncMapFormat.CODE_TO_CLASS:
self.log_exc(u"Sync map format is not allowed" % (sync_map_format), None, True, ValueError)
if not gf.file_can_be_read(input_file_path):
self.log_exc(u"Cannot read sync map file . Wrong permissions?" % (input_file_path), None, True, OSError)
self.log([u"Input format: ", sync_map_format])
self.log([u"Input path: ", input_file_path])
self.log([u"Input parameters: ", parameters])
reader = (SyncMapFormat.CODE_TO_CLASS[sync_map_format])(
variant=sync_map_format,
parameters=parameters,
rconf=self.rconf,
logger=self.logger
)
self.log(u"Reading input file...")
with io.open(input_file_path, "r", encoding="utf-8") as input_file:
input_text = input_file.read()
reader.parse(input_text=input_text, syncmap=self)
self.log(u"Reading input file... done")
language = gf.safe_get(parameters, gc.PPN_SYNCMAP_LANGUAGE, None)
if language is not None:
self.log([u"Overwriting language to ", language])
for fragment in self.fragments:
fragment.text_fragment.language = language | Read sync map fragments from the given file in the specified format,
and add them the current (this) sync map.
Return ``True`` if the call succeeded,
``False`` if an error occurred.
:param sync_map_format: the format of the sync map
:type sync_map_format: :class:`~aeneas.syncmap.SyncMapFormat`
:param string input_file_path: the path to the input file to read
:param dict parameters: additional parameters (e.g., for ``SMIL`` input)
:raises: ValueError: if ``sync_map_format`` is ``None`` or it is not an allowed value
:raises: OSError: if ``input_file_path`` does not exist |
23,233 | def is_unicode_string(string):
if string is None:
return None
if PY2:
return isinstance(string, unicode)
return isinstance(string, str) | Return ``True`` if the given string is a Unicode string,
that is, of type ``unicode`` in Python 2 or ``str`` in Python 3.
Return ``None`` if ``string`` is ``None``.
:param str string: the string to be checked
:rtype: bool |
23,234 | def graph(self):
if not hasattr(self, ) or self._graph is None:
self._graph = ConjunctiveGraph(store=self.store,
identifier=self.base_uri)
return self._graph | A conjunctive graph of all statements in the current instance. |
23,235 | def stream_download(url, target_path, verbose=False):
response = requests.get(url, stream=True)
handle = open(target_path, "wb")
if verbose:
print("Beginning streaming download of %s" % url)
start = datetime.now()
try:
content_length = int(response.headers[])
content_MB = content_length/1048576.0
print("Total file size: %.2f MB" % content_MB)
except KeyError:
pass
for chunk in response.iter_content(chunk_size=512):
if chunk:
handle.write(chunk)
if verbose:
print(
"Download completed to %s in %s" %
(target_path, datetime.now() - start)) | Download a large file without loading it into memory. |
23,236 | def gevent_worker(self):
while not self.task_queue.empty():
task_kwargs = self.task_queue.get()
handler_type = task_kwargs.pop()
if handler_type == :
super(Command, self).link_file(**task_kwargs)
else:
super(Command, self).copy_file(**task_kwargs) | Process one task after another by calling the handler (`copy_file` or `copy_link`) method of the super class. |
23,237 | def _set_attributes(self):
for parameter, data in self._data.items():
if isinstance(data, dict) or isinstance(data, OrderedDict):
field_names, field_values = zip(*data.items())
sorted_indices = np.argsort(field_names)
attr = namedtuple(
parameter, [field_names[i] for i in sorted_indices]
)
setattr(
self, parameter,
attr(*[field_values[i] for i in sorted_indices])
)
else:
setattr(self, parameter, data) | Traverse the internal dictionary and set the getters |
23,238 | def register_plugin(self):
self.focus_changed.connect(self.main.plugin_focus_changed)
self.main.add_dockwidget(self)
self.main.console.set_help(self)
self.internal_shell = self.main.console.shell
self.console = self.main.console | Register plugin in Spyder's main window |
23,239 | def est_entropy(self):
r
nz_weights = self.particle_weights[self.particle_weights > 0]
return -np.sum(np.log(nz_weights) * nz_weights) | r"""
Estimates the entropy of the current particle distribution
as :math:`-\sum_i w_i \log w_i` where :math:`\{w_i\}`
is the set of particles with nonzero weight. |
23,240 | def load_x509_certificates(buf):
if not isinstance(buf, basestring):
raise ValueError( % type(buf))
for pem in re.findall(, buf):
yield load_certificate(crypto.FILETYPE_PEM, pem[0]) | Load one or multiple X.509 certificates from a buffer.
:param str buf: A buffer is an instance of `basestring` and can contain multiple
certificates.
:return: An iterator that iterates over certificates in a buffer.
:rtype: list[:class:`OpenSSL.crypto.X509`] |
23,241 | def prompt_gui(path):
import subprocess
filepath, extension = os.path.splitext(path)
basename = os.path.basename(filepath)
dirname = os.path.dirname(filepath)
retry_text =
icon =
if have():
args = [,
,
,
,
+ basename,
,
+ icon]
retry_args = args + [ + retry_text + ,
]
elif have():
base = [,
,
+ basename,
,
]
args = base + []
retry_args = base + [ + retry_text]
else:
fatal()
try:
new_basename = subprocess.check_output(
args, universal_newlines=True).strip()
except subprocess.CalledProcessError:
sys.exit(1)
while os.path.exists(os.path.join(dirname, new_basename + extension)) and \
new_basename != basename:
try:
new_basename = subprocess.check_output(
retry_args, universal_newlines=True).strip()
except subprocess.CalledProcessError:
sys.exit(1)
if new_basename == :
new_basename = basename
return os.path.join(dirname, new_basename + extension) | Prompt for a new filename via GUI. |
23,242 | def strtimezone ():
if time.daylight:
zone = time.altzone
else:
zone = time.timezone
return "%+04d" % (-zone//SECONDS_PER_HOUR) | Return timezone info, %z on some platforms, but not supported on all. |
23,243 | def routingAreaUpdateComplete(ReceiveNpduNumbersList_presence=0):
a = TpPd(pd=0x3)
b = MessageType(mesType=0xa)
packet = a / b
if ReceiveNpduNumbersList_presence is 1:
c = ReceiveNpduNumbersList(ieiRNNL=0x26)
packet = packet / c
return packet | ROUTING AREA UPDATE COMPLETE Section 9.4.16 |
23,244 | def __writeDocstring(self):
while True:
firstLineNum, lastLineNum, lines = (yield)
newDocstringLen = lastLineNum - firstLineNum + 1
while len(lines) < newDocstringLen:
lines.append()
self.docLines[firstLineNum: lastLineNum + 1] = lines | Runs eternally, dumping out docstring line batches as they get fed in.
Replaces original batches of docstring lines with modified versions
fed in via send. |
23,245 | def version(ctx, version=None, force=False):
updates = {
ctx.directory.joinpath("setup.cfg"): [
(r"^(version\s?=\s?)(.*)", "\\g<1>{version}")
],
ctx.package.directory.joinpath("__version__.py"): [
(r"(__version__\s?=\s?)(.*)", )
],
}
previous_version = get_previous_version(ctx)
if isinstance(version, str):
version = parver.Version.parse(version)
if not force and version <= previous_version:
error_message = (
f"version {version!s} is <= to previous version {previous_version!s}"
)
report.error(ctx, "package.version", error_message)
raise ValueError(error_message)
else:
version = previous_version.bump_release(index=len(previous_version.release) - 1)
report.info(ctx, "package.version", f"updating version to {version!s}")
for (path, replacements) in updates.items():
if path.is_file():
content = path.read_text()
for (pattern, sub) in replacements:
report.debug(
ctx,
"package.version",
f"applying replacement ({pattern!r}, {sub!r}) to {path!s}",
)
content = re.sub(pattern, sub.format(version=version), content, re.M)
path.write_text(content) | Specify a new version for the package.
.. important:: If no version is specified, will take the most recent parsable git
tag and bump the patch number.
:param str version: The new version of the package.
:param bool force: If True, skips version check |
23,246 | def resp_set_lightpower(self, resp, power_level=None):
if power_level is not None:
self.power_level=power_level
elif resp:
self.power_level=resp.power_level | Default callback for set_power |
23,247 | def edge(self, from_node, to_node, edge_type="", **args):
self._stream.write(
% (self._indent, edge_type, from_node, to_node)
)
self._write_attributes(EDGE_ATTRS, **args)
self._stream.write("}\n") | draw an edge from a node to another. |
23,248 | def get_acronyms(fulltext):
acronyms = {}
for m in ACRONYM_BRACKETS_REGEX.finditer(fulltext):
acronym = DOTS_REGEX.sub("", m.group(1))
potential_expansion = fulltext[m.start() - 80:m.start()].replace("\n",
" ")
potential_expansion = re.sub("(\W).(\W)", "\1\2", potential_expansion)
potential_expansion = re.sub("(\w)\(s\)\W", "\1", potential_expansion)
potential_expansion = re.sub(, "", potential_expansion)
potential_expansion = re.sub("[[(].+[\])]", "", potential_expansion)
potential_expansion = re.sub(" {2,}", " ", potential_expansion)
match = re.search(, potential_expansion)
if match is None:
match = re.search(, potential_expansion)
if match is not None:
if acronym in match.group(1):
continue
pattern = ""
for char in acronym[:-1]:
pattern += "%s\w+\W*" % char
pattern += "%s\w+" % acronym[-1]
if re.search(pattern, match.group(1), re.I) is not None:
_add_expansion_to_acronym_dict(acronym, match.group(1), 0,
acronyms)
continue
pattern = "\W("
for char in acronym[:-1]:
pattern += "%s\w+\W+" % char
pattern += "%s\w+)$" % acronym[-1]
match = re.search(pattern, potential_expansion)
if match is not None:
_add_expansion_to_acronym_dict(
acronym, match.group(1), 1, acronyms)
continue
match = re.search(pattern, potential_expansion, re.I)
if match is not None:
_add_expansion_to_acronym_dict(
acronym, match.group(1), 2, acronyms)
continue
potential_expansion_stripped = " ".join([word for word in
_words(potential_expansion) if
word not in STOPLIST])
match = re.search(pattern, potential_expansion_stripped, re.I)
if match is not None:
first_expansion_word = re.search("\w+", match.group(1)).group()
start = potential_expansion.lower().rfind(first_expansion_word)
_add_expansion_to_acronym_dict(
acronym, potential_expansion[start:],
3, acronyms
)
continue
reversed_words = _words(potential_expansion_stripped)
reversed_words.reverse()
reversed_acronym = list(acronym.lower())
reversed_acronym.reverse()
index0 = 0
index1 = 0
word = ""
try:
while index0 < len(reversed_acronym) and index1 < len(
reversed_words):
word = reversed_words[index1]
if index0 + 1 < len(reversed_words):
next_word = reversed_words[index0 + 1]
else:
next_word = "_"
char = reversed_acronym[index0]
if index0 + 1 < len(reversed_acronym):
next_char = reversed_acronym[index0 + 1]
else:
next_char = "_"
if char == next_char and \
word.startswith(char) and \
word.count(char) > 1 and \
not next_word.startswith(char):
index0 += 2
index1 += 1
if word.startswith(char):
index0 += 1
index1 += 1
elif char in word and \
not word.endswith(char) and \
word.startswith(next_char):
index0 += 2
index1 += 1
else:
word = ""
break
except IndexError:
word = ""
if not word.startswith(char):
word = ""
if word:
start = potential_expansion.lower().rfind(word)
_add_expansion_to_acronym_dict(acronym,
potential_expansion[start:], 4,
acronyms)
continue
reversed_words = _words(potential_expansion.lower())
reversed_words.reverse()
reversed_acronym = list(acronym.lower())
reversed_acronym.reverse()
index0 = 0
index1 = 0
word = ""
try:
while index0 < len(reversed_acronym) and index1 < len(
reversed_words):
word = reversed_words[index1]
if index0 + 1 < len(reversed_words):
next_word = reversed_words[index0 + 1]
else:
next_word = ""
char = reversed_acronym[index0]
if index0 + 1 < len(reversed_acronym):
next_char = reversed_acronym[index0 + 1]
else:
next_char = ""
if char == next_char and word.startswith(char) and \
word.count(char) > 1 and \
not next_word.startswith(char):
index0 += 2
index1 += 1
if word.startswith(char):
index0 += 1
index1 += 1
elif char in word and \
not word.endswith(char) and \
word.startswith(next_char):
index0 += 2
index1 += 1
else:
word = ""
break
except IndexError:
word = ""
if not word.startswith(char):
word = ""
if word:
start = potential_expansion.lower().rfind(word)
_add_expansion_to_acronym_dict(acronym,
potential_expansion[start:], 5,
acronyms)
continue
return acronyms | Find acronyms and expansions from the fulltext.
If needed, acronyms can already contain a dictionary of previously found
acronyms that will be merged with the current results. |
23,249 | def find(objs, selector, context=None):
typetypetypetypeleftlayoutleftplot
return (obj for obj in objs if match(obj, selector, context)) | Query a collection of Bokeh models and yield any that match the
a selector.
Args:
obj (Model) : object to test
selector (JSON-like) : query selector
context (dict) : kwargs to supply callable query attributes
Yields:
Model : objects that match the query
Queries are specified as selectors similar to MongoDB style query
selectors, as described for :func:`~bokeh.core.query.match`.
Examples:
.. code-block:: python
# find all objects with type Grid
find(p.references(), {'type': Grid})
# find all objects with type Grid or Axis
find(p.references(), {OR: [
{'type': Grid}, {'type': Axis}
]})
# same query, using IN operator
find(p.references(), {'type': {IN: [Grid, Axis]}})
# find all plot objects on the 'left' layout of the Plot
# here layout is a method that takes a plot as context
find(p.references(), {'layout': 'left'}, {'plot': p}) |
23,250 | def get_counter(self, name, start=0):
item = self.get_item(hash_key=name, start=start)
counter = Counter(dynamo_item=item, pool=self)
return counter | Gets the DynamoDB item behind a counter and ties it to a Counter
instace. |
23,251 | def rnn(name, input, state, kernel, bias, new_state, number_of_gates = 2):
nn = Build(name)
nn.tanh(
nn.mad(kernel=kernel, bias=bias,
x=nn.concat(input, state)),
out=new_state);
return nn.layers; | - Ht = f(Xt*Wi + Ht_1*Ri + Wbi + Rbi) |
23,252 | def upload_directory_contents(input_dict, environment_dict):
if environment_dict["currentkeyname"] is None:
raise seash_exceptions.UserError()
if environment_dict["currenttarget"] is None:
raise seash_exceptions.UserError()
file_list = os.listdir(source_directory)
for filename in file_list:
path_and_filename = source_directory + os.sep + filename
if not os.path.isdir(path_and_filename):
print "Uploading ..."
| This function serves to upload every file in a user-supplied
source directory to all of the vessels in the current target group.
It essentially calls seash's `upload` function repeatedly, each
time with a file name taken from the source directory.
A note on the input_dict argument:
`input_dict` contains our own `command_dict` (see below), with
the `"[ARGUMENT]"` sub-key of `children` renamed to what
argument the user provided. In our case, this will be the source
dir to read from. (If not, this is an error!) |
23,253 | def V_vertical_torispherical_concave(D, f, k, h):
r
alpha = asin((1-2*k)/(2.*(f-k)))
a1 = f*D*(1-cos(alpha))
a2 = k*D*cos(alpha)
D1 = 2*f*D*sin(alpha)
s = (k*D*sin(alpha))**2
t = 2*a2
def V1(h):
u = h-f*D*(1-cos(alpha))
v1 = pi/4*(2*a1**3/3. + a1*D1**2/2.) + pi*u*((D/2.-k*D)**2 +s)
v1 += pi*t*u**2/2. - pi*u**3/3.
v1 += pi*D*(1-2*k)*((2*u-t)/4.*(s+t*u-u**2)**0.5 + t*s**0.5/4.
+ k**2*D**2/2.*(acos((t-2*u)/(2*k*D)) -alpha))
return v1
def V2(h):
v2 = pi*h**2/4.*(2*a1 + D1**2/(2.*a1) - 4*h/3.)
return v2
if 0 <= h < a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V1(a1+a2-h)
elif a2 <= h < a1 + a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V2(a1+a2-h)
else:
Vf = pi*D**2*h/4 - V1(a1+a2)
return Vf | r'''Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\; 0 \le h < a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\; a_2 \le h < a_1 + a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\; h \ge a_1 + a_2
.. math::
v_1 = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right)
+\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right]
+ \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[
\frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4}
+ \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right]
.. math::
v_2 = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right)
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
.. math::
D_1 = 2fD\sin\alpha
.. math::
s = (kD\sin\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\cos\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/ |
23,254 | def _get_isolated(self, hostport):
assert hostport, "hostport is required"
if hostport not in self._peers:
peer = self.peer_class(
tchannel=self.tchannel,
hostport=hostport,
)
self._peers[peer.hostport] = peer
return self._peers[hostport] | Get a Peer for the given destination for a request.
A new Peer is added and returned if one does not already exist for the
given host-port. Otherwise, the existing Peer is returned.
**NOTE** new peers will not be added to the peer heap. |
23,255 | def get_processing_block_ids(self):
_processing_block_ids = []
pattern =
block_ids = self._db.get_ids(pattern)
for block_id in block_ids:
id_split = block_id.split()[-1]
_processing_block_ids.append(id_split)
return sorted(_processing_block_ids) | Get list of processing block ids using the processing block id |
23,256 | def MultiOpenOrdered(self, urns, **kwargs):
precondition.AssertIterableType(urns, rdfvalue.RDFURN)
urn_filedescs = {}
for filedesc in self.MultiOpen(urns, **kwargs):
urn_filedescs[filedesc.urn] = filedesc
filedescs = []
for urn in urns:
try:
filedescs.append(urn_filedescs[urn])
except KeyError:
raise IOError("No associated AFF4 object for `%s`" % urn)
return filedescs | Opens many URNs and returns handles in the same order.
`MultiOpen` can return file handles in arbitrary order. This makes it more
efficient and in most cases the order does not matter. However, there are
cases where order is important and this function should be used instead.
Args:
urns: A list of URNs to open.
**kwargs: Same keyword arguments as in `MultiOpen`.
Returns:
A list of file-like objects corresponding to the specified URNs.
Raises:
IOError: If one of the specified URNs does not correspond to the AFF4
object. |
23,257 | def load(self):
def on_list_task(info, error=None):
self.loading_spin.stop()
self.loading_spin.hide()
if not info:
self.app.toast(_())
if error or not info:
logger.error( % (info, error))
return
tasks = info[]
for task in tasks:
self.liststore.append([
task[],
task[],
task[],
task[],
0,
0,
int(task[]),
0,
,
gutil.escape(task[])
])
self.scan_tasks()
nonlocal start
start = start + len(tasks)
if info[] > start:
gutil.async_call(pcs.cloud_list_task, self.app.cookie,
self.app.tokens, start, callback=on_list_task)
self.loading_spin.start()
self.loading_spin.show_all()
start = 0
gutil.async_call(pcs.cloud_list_task, self.app.cookie, self.app.tokens,
start, callback=on_list_task) | 获取当前的离线任务列表 |
23,258 | def make_job_graph(infiles, fragfiles, blastcmds):
joblist = []
dbjobdict = build_db_jobs(infiles, blastcmds)
jobnum = len(dbjobdict)
for idx, fname1 in enumerate(fragfiles[:-1]):
for fname2 in fragfiles[idx + 1 :]:
jobnum += 1
jobs = [
pyani_jobs.Job(
"%s_exe_%06d_a" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname1, fname2.replace("-fragments", "")),
),
pyani_jobs.Job(
"%s_exe_%06d_b" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname2, fname1.replace("-fragments", "")),
),
]
jobs[0].add_dependency(dbjobdict[fname1.replace("-fragments", "")])
jobs[1].add_dependency(dbjobdict[fname2.replace("-fragments", "")])
joblist.extend(jobs)
return joblist | Return a job dependency graph, based on the passed input sequence files.
- infiles - a list of paths to input FASTA files
- fragfiles - a list of paths to fragmented input FASTA files
By default, will run ANIb - it *is* possible to make a mess of passing the
wrong executable for the mode you're using.
All items in the returned graph list are BLAST executable jobs that must
be run *after* the corresponding database creation. The Job objects
corresponding to the database creation are contained as dependencies.
How those jobs are scheduled depends on the scheduler (see
run_multiprocessing.py, run_sge.py) |
23,259 | def _run_in_reactor(self, function, _, args, kwargs):
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result | Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned. |
23,260 | def uninstall(self,
name: str,
force: bool = False,
noprune: bool = False
) -> None:
try:
self.__docker.images.remove(image=name,
force=force,
noprune=noprune)
except docker.errors.ImageNotFound as e:
if force:
return
raise e | Attempts to uninstall a given Docker image.
Parameters:
name: the name of the Docker image.
force: a flag indicating whether or not an exception should be
thrown if the image associated with the given build
instructions is not installed. If `True`, no exception
will be thrown; if `False`, exception will be thrown.
noprune: a flag indicating whether or not dangling image layers
should also be removed.
Raises:
docker.errors.ImageNotFound: if the image associated with the given
instructions can't be found. |
23,261 | def fractal_dimension(image):
pixels = []
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if image[i, j] > 0:
pixels.append((i, j))
lx = image.shape[1]
ly = image.shape[0]
pixels = np.array(pixels)
if len(pixels) < 2:
return 0
scales = np.logspace(1, 4, num=20, endpoint=False, base=2)
Ns = []
for scale in scales:
H, edges = np.histogramdd(pixels,
bins=(np.arange(0, lx, scale),
np.arange(0, ly, scale)))
H_sum = np.sum(H > 0)
if H_sum == 0:
H_sum = 1
Ns.append(H_sum)
coeffs = np.polyfit(np.log(scales), np.log(Ns), 1)
hausdorff_dim = -coeffs[0]
return hausdorff_dim | Estimates the fractal dimension of an image with box counting.
Counts pixels with value 0 as empty and everything else as non-empty.
Input image has to be grayscale.
See, e.g `Wikipedia <https://en.wikipedia.org/wiki/Fractal_dimension>`_.
:param image: numpy.ndarray
:returns: estimation of fractal dimension
:rtype: float |
23,262 | async def jsk_load(self, ctx: commands.Context, *extensions: ExtensionConverter):
paginator = commands.Paginator(prefix=, suffix=)
for extension in itertools.chain(*extensions):
method, icon = (
(self.bot.reload_extension, "\N{CLOCKWISE RIGHTWARDS AND LEFTWARDS OPEN CIRCLE ARROWS}")
if extension in self.bot.extensions else
(self.bot.load_extension, "\N{INBOX TRAY}")
)
try:
method(extension)
except Exception as exc:
traceback_data = .join(traceback.format_exception(type(exc), exc, exc.__traceback__, 1))
paginator.add_line(
f"{icon}\N{WARNING SIGN} `{extension}`\n```py\n{traceback_data}\n```",
empty=True
)
else:
paginator.add_line(f"{icon} `{extension}`", empty=True)
for page in paginator.pages:
await ctx.send(page) | Loads or reloads the given extension names.
Reports any extensions that failed to load. |
23,263 | def is_free(self):
raise NotImplementedError("%s not implemented for %s" % (self.is_free.__func__.__name__,
self.__class__.__name__)) | Returns a concrete determination as to whether the chunk is free. |
23,264 | def pad_line_to_ontonotes(line, domain) -> List[str]:
word_ind, word = line[ : 2]
pos =
oie_tags = line[2 : ]
line_num = 0
parse = "-"
lemma = "-"
return [domain, line_num, word_ind, word, pos, parse, lemma, ,\
, , ] + list(oie_tags) + [, ] | Pad line to conform to ontonotes representation. |
23,265 | def get_fs(path):
scheme =
if in path:
scheme = path.partition()[0]
for schemes, fs_class in FILE_EXTENSIONS:
if scheme in schemes:
return fs_class
return FileSystem | Find the file system implementation for this path. |
23,266 | def method(value, arg):
if hasattr(value, str(arg)):
return getattr(value, str(arg))
return "[%s has no method %s]" % (value, arg) | Method attempts to see if the value has a specified method.
Usage:
{% load custom_filters %}
{% if foo|method:"has_access" %} |
23,267 | def threshold_monitor_hidden_threshold_monitor_sfp_policy_area_alert_above_above_highthresh_action(self, **kwargs):
config = ET.Element("config")
threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor")
threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor")
sfp = ET.SubElement(threshold_monitor, "sfp")
policy = ET.SubElement(sfp, "policy")
policy_name_key = ET.SubElement(policy, "policy_name")
policy_name_key.text = kwargs.pop()
area = ET.SubElement(policy, "area")
type_key = ET.SubElement(area, "type")
type_key.text = kwargs.pop()
area_value_key = ET.SubElement(area, "area_value")
area_value_key.text = kwargs.pop()
alert = ET.SubElement(area, "alert")
above = ET.SubElement(alert, "above")
above_highthresh_action = ET.SubElement(above, "above-highthresh-action")
above_highthresh_action.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config) | Auto Generated Code |
23,268 | def set_instances(name, instances, test=False, region=None, key=None, keyid=None,
profile=None):
ret = True
current = set([i[] for i in get_instance_health(name, region, key, keyid, profile)])
desired = set(instances)
add = desired - current
remove = current - desired
if test:
return bool(add or remove)
if remove:
if deregister_instances(name, list(remove), region, key, keyid, profile) is False:
ret = False
if add:
if register_instances(name, list(add), region, key, keyid, profile) is False:
ret = False
return ret | Set the instances assigned to an ELB to exactly the list given
CLI example:
.. code-block:: bash
salt myminion boto_elb.set_instances myelb region=us-east-1 instances="[instance_id,instance_id]" |
23,269 | def _combine_season_stats(self, table_rows, career_stats, all_stats_dict):
most_recent_season = self._most_recent_season
if not table_rows:
table_rows = []
for row in table_rows:
season = self._parse_season(row)
try:
all_stats_dict[season][] += str(row)
except KeyError:
all_stats_dict[season] = {: str(row)}
most_recent_season = season
self._most_recent_season = most_recent_season
if not career_stats:
return all_stats_dict
try:
all_stats_dict[][] += str(next(career_stats))
except KeyError:
try:
all_stats_dict[] = {: str(next(career_stats))}
except StopIteration:
return all_stats_dict
return all_stats_dict | Combine all stats for each season.
Since all of the stats are spread across multiple tables, they should
be combined into a single field which can be used to easily query stats
at once.
Parameters
----------
table_rows : generator
A generator where each element is a row in a stats table.
career_stats : generator
A generator where each element is a row in the footer of a stats
table. Career stats are kept in the footer, hence the usage.
all_stats_dict : dictionary
A dictionary of all stats separated by season where each key is the
season ``string``, such as '2017', and the value is a
``dictionary`` with a ``string`` of 'data' and ``string``
containing all of the data.
Returns
-------
dictionary
Returns an updated version of the passed all_stats_dict which
includes more metrics from the provided table. |
23,270 | def copy_pkg(self, filename, id_=-1):
for repo in self._children:
repo.copy_pkg(filename, id_) | Copy a pkg, dmg, or zip to all repositories.
Args:
filename: String path to the local file to copy.
id_: Integer ID you wish to associate package with for a JDS
or CDP only. Default is -1, which is used for creating
a new package object in the database. |
23,271 | def get_hosted_zone_by_name(client, zone_name):
p = client.get_paginator("list_hosted_zones")
for i in p.paginate():
for zone in i["HostedZones"]:
if zone["Name"] == zone_name:
return parse_zone_id(zone["Id"])
return None | Get the zone id of an existing zone by name.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The Id of the Hosted Zone. |
23,272 | def prune_missing(table):
try:
for item in table.select():
if not os.path.isfile(item.file_path):
logger.info("File disappeared: %s", item.file_path)
item.delete()
except:
logger.exception("Error pruning %s", table) | Prune any files which are missing from the specified table |
23,273 | def predict(self,param_dict):
encoder_dict = self._designmatrix_object.encoder
X, col_names = self._designmatrix_object.run_encoder(param_dict, encoder_dict)
Y_pred = self._compute_prediction(X)
return Y_pred | predict new waveforms using multivar fit |
23,274 | def _xml_namespace_strip(root):
if not in root.tag:
return
for element in root.iter():
if in element.tag:
element.tag = element.tag.split()[1]
else:
pass | Strip the XML namespace prefix from all element tags under the given root Element. |
23,275 | def desc(self):
doc = inspect.getdoc(self.controller_class)
if not doc: doc =
return doc | return the description of this endpoint |
23,276 | def errint(marker, number):
marker = stypes.stringToCharP(marker)
number = ctypes.c_int(number)
libspice.errint_c(marker, number) | Substitute an integer for the first occurrence of a marker found
in the current long error message.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/errint_c.html
:param marker: A substring of the error message to be replaced.
:type marker: str
:param number: The integer to substitute for marker.
:type number: int |
23,277 | def list_settings(self):
result = super().list_settings()
result.append((self.SETTING_FLAG_HEADER, True))
result.append((self.SETTING_HEADER_CONTENT, ))
result.append((self.SETTING_HEADER_FORMATING, {: }))
result.append((self.SETTING_FLAG_ENUMERATE, False))
result.append((self.SETTING_COLUMNS, None))
result.append((self.SETTING_ROW_HIGHLIGHT, None))
return result | Get list of all appropriate settings and their default values. |
23,278 | def load_creditscoring1(cost_mat_parameters=None):
module_path = dirname(__file__)
raw_data = pd.read_csv(join(module_path, , ), delimiter=, compression=)
descr = open(join(module_path, , )).read()
raw_data = raw_data.dropna()
raw_data = raw_data.loc[(raw_data[] > 0)]
raw_data = raw_data.loc[(raw_data[] < 1)]
target = raw_data[].values.astype(np.int)
data = raw_data.drop([, ], 1)
if cost_mat_parameters is None:
cost_mat_parameters = {: 0.0479 / 12,
: 0.0294 / 12,
: 25000,
: 24,
: 3,
: .75}
pi_1 = target.mean()
cost_mat = _creditscoring_costmat(data[].values, data[].values, pi_1, cost_mat_parameters)
return Bunch(data=data.values, target=target, cost_mat=cost_mat,
target_names=[, ], DESCR=descr,
feature_names=data.columns.values, name=) | Load and return the credit scoring Kaggle Credit competition dataset (classification).
The credit scoring is a easily transformable example-dependent cost-sensitive classification dataset.
Parameters
----------
cost_mat_parameters : Dictionary-like object, optional (default=None)
If not None, must include 'int_r', 'int_cf', 'cl_max', 'n_term', 'k','lgd'
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'cost_mat', the cost matrix of each example,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the full description of the dataset.
References
----------
.. [1] A. Correa Bahnsen, D.Aouada, B, Ottersten,
"Example-Dependent Cost-Sensitive Logistic Regression for Credit Scoring",
in Proceedings of the International Conference on Machine Learning and Applications,
, 2014.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50
>>> from costcla.datasets import load_creditscoring1
>>> data = load_creditscoring1()
>>> data.target[[10, 17, 400]]
array([0, 1, 0])
>>> data.cost_mat[[10, 17, 400]]
array([[ 1023.73054104, 18750. , 0. , 0. ],
[ 717.25781516, 6749.25 , 0. , 0. ],
[ 1004.32819923, 17990.25 , 0. , 0. ]]) |
23,279 | def _parse_node(graph, text):
match = _NODEPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), shape=)
return node
match = _LEAFPAT.match(text)
if match is not None:
node = match.group(1)
graph.node(node, label=match.group(2), shape=)
return node
raise ValueError(.format(text)) | parse dumped node |
23,280 | def arch_prefixes(self):
archs = self.archs(as_list=True)
prefixes = []
for i in range(1, len(archs)+1):
prefixes.append(archs[:i])
return prefixes | Return the initial 1, 2, ..., N architectures as a prefix list
For arch1/arch2/arch3, this returns
[arch1],[arch1/arch2],[arch1/arch2/arch3] |
23,281 | def get_innerclasses(self):
buff = self.get_attribute("InnerClasses")
if buff is None:
return tuple()
with unpack(buff) as up:
return tuple(up.unpack_objects(JavaInnerClassInfo, self.cpool)) | sequence of JavaInnerClassInfo instances describing the inner
classes of this class definition
reference: http://docs.oracle.com/javase/specs/jvms/se7/html/jvms-4.html#jvms-4.7.6 |
23,282 | def save_intermediate_array(self, array, name):
if self.intermediate_results:
fits.writeto(name, array, overwrite=True) | Save intermediate array object as FITS. |
23,283 | def export_path(self, relname, export_dir=None):
name, ext = relname.replace(, ).rsplit(, 1)
newname = % (name, self.calc_id, ext)
if export_dir is None:
export_dir = self.export_dir
return os.path.join(export_dir, newname) | Return the path of the exported file by adding the export_dir in
front, the calculation ID at the end.
:param relname: relative file name
:param export_dir: export directory (if None use .export_dir) |
23,284 | def replacebranch1(idf, loop, branchname, listofcomponents_tuples, fluid=None,
debugsave=False):
if fluid is None:
fluid =
listofcomponents_tuples = _clean_listofcomponents_tuples(listofcomponents_tuples)
branch = idf.getobject(, branchname)
listofcomponents = []
for comp_type, comp_name, compnode in listofcomponents_tuples:
comp = getmakeidfobject(idf, comp_type.upper(), comp_name)
listofcomponents.append((comp, compnode))
newbr = replacebranch(idf, loop, branch, listofcomponents,
debugsave=debugsave, fluid=fluid)
return newbr | do I even use this ? .... yup! I do |
23,285 | def get_backup_end_segment_and_time(self, db_conn, backup_mode):
cursor = db_conn.cursor()
cursor.execute("SELECT now(), pg_is_in_recovery()")
backup_end_time, in_recovery = cursor.fetchone()
if in_recovery:
db_conn.commit()
return None, backup_end_time
if self.pg_version_server >= 100000:
cursor.execute("SELECT pg_walfile_name(pg_current_wal_lsn()), txid_current()")
else:
cursor.execute("SELECT pg_xlogfile_name(pg_current_xlog_location()), txid_current()")
backup_end_wal_segment, _ = cursor.fetchone()
db_conn.commit()
backup_end_name = "pghoard_end_of_backup"
if backup_mode == "non-exclusive":
cursor.execute("SELECT pg_start_backup(%s, true, false)", [backup_end_name])
cursor.execute("SELECT pg_stop_backup(false)")
elif backup_mode == "pgespresso":
cursor.execute("SELECT pgespresso_start_backup(%s, false)", [backup_end_name])
backup_label = cursor.fetchone()[0]
cursor.execute("SELECT pgespresso_stop_backup(%s)", [backup_label])
else:
cursor.execute("SELECT pg_start_backup(%s)", [backup_end_name])
cursor.execute("SELECT pg_stop_backup()")
db_conn.commit()
return backup_end_wal_segment, backup_end_time | Grab a timestamp and WAL segment name after the end of the backup: this is a point in time to which
we must be able to recover to, and the last WAL segment that is required for the backup to be
consistent.
Note that pg_switch_xlog()/pg_switch_wal() is a superuser-only function, but since pg_start_backup() and
pg_stop_backup() cause an WAL switch we'll call them instead. The downside is an unnecessary
checkpoint. |
23,286 | def _sliced_list(self, selector):
if self.skipmissing:
return self.obj[selector]
keys = xrange(selector.start or 0,
selector.stop or sys.maxint,
selector.step or 1)
res = []
for key in keys:
self._append(self.obj, key, res, skipmissing=False)
return res | For slice selectors operating on lists, we need to handle them
differently, depending on ``skipmissing``. In explicit mode, we may have
to expand the list with ``default`` values. |
23,287 | def jieba_tokenize(text, external_wordlist=False):
global jieba_tokenizer, jieba_orig_tokenizer
if external_wordlist:
if jieba_orig_tokenizer is None:
jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME)
return jieba_orig_tokenizer.lcut(text)
else:
if jieba_tokenizer is None:
jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME)
tokens = []
for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False):
tokens.append(text[start:end])
return tokens | Tokenize the given text into tokens whose word frequencies can probably
be looked up. This uses Jieba, a word-frequency-based tokenizer.
If `external_wordlist` is False, we tell Jieba to default to using
wordfreq's own Chinese wordlist, and not to infer unknown words using a
hidden Markov model. This ensures that the multi-character tokens that it
outputs will be ones whose word frequencies we can look up.
If `external_wordlist` is True, this will use the largest version of
Jieba's original dictionary, with HMM enabled, so its results will be
independent of the data in wordfreq. These results will be better optimized
for purposes that aren't looking up word frequencies, such as general-
purpose tokenization, or collecting word frequencies in the first place. |
23,288 | def get_relationship(self, relationship_id=None):
if relationship_id is None:
raise NullArgument()
url_path = ( +
self._catalog_idstr +
+ str(relationship_id))
return objects.Relationship(self._get_request(url_path)) | Gets the ``Relationship`` specified by its ``Id``.
arg: relationship_id (osid.id.Id): the ``Id`` of the
``Relationship`` to retrieve
return: (osid.relationship.Relationship) - the returned
``Relationship``
raise: NotFound - no ``Relationship`` found with the given
``Id``
raise: NullArgument - ``relationship_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* |
23,289 | def data(self):
data=np.empty((self.nRows,self.nCols),dtype=np.float)
data[:]=np.nan
for colNum,colData in enumerate(self.colData):
validIs=np.where([np.isreal(v) for v in colData])[0]
validData=np.ones(len(colData))*np.nan
validData[validIs]=np.array(colData)[validIs]
data[:len(colData),colNum]=validData
return data | return all of colData as a 2D numpy array. |
23,290 | def _UpdateAuthorizedKeys(self, user, ssh_keys):
pw_entry = self._GetUser(user)
if not pw_entry:
return
uid = pw_entry.pw_uid
gid = pw_entry.pw_gid
home_dir = pw_entry.pw_dir
ssh_dir = os.path.join(home_dir, )
for i, line in enumerate(lines):
if i not in google_lines and line:
line += if not line.endswith() else
updated_keys.write(line)
for ssh_key in ssh_keys:
ssh_key += if not ssh_key.endswith() else
updated_keys.write( % self.google_comment)
updated_keys.write(ssh_key)
updated_keys.flush()
shutil.copy(updated_keys_file, authorized_keys_file)
file_utils.SetPermissions(
authorized_keys_file, mode=0o600, uid=uid, gid=gid) | Update the authorized keys file for a Linux user with a list of SSH keys.
Args:
user: string, the name of the Linux user account.
ssh_keys: list, the SSH key strings associated with the user.
Raises:
IOError, raised when there is an exception updating a file.
OSError, raised when setting permissions or writing to a read-only
file system. |
23,291 | def connect(self, server, port=6667):
self.socket.connect((server, port))
self.lines = self._read_lines()
for event_handler in list(self.on_connect):
event_handler(self) | Connects to a given IRC server. After the connection is established, it calls
the on_connect event handler. |
23,292 | def serialize(self, data, fmt=):
gmf_set_nodes = []
for gmf_set in data:
gmf_set_node = Node()
if gmf_set.investigation_time:
gmf_set_node[] = str(
gmf_set.investigation_time)
gmf_set_node[] = str(
gmf_set.stochastic_event_set_id)
gmf_set_node.nodes = gen_gmfs(gmf_set)
gmf_set_nodes.append(gmf_set_node)
gmf_container = Node()
gmf_container[SM_TREE_PATH] = self.sm_lt_path
gmf_container[GSIM_TREE_PATH] = self.gsim_lt_path
gmf_container.nodes = gmf_set_nodes
with open(self.dest, ) as dest:
nrml.write([gmf_container], dest, fmt) | Serialize a collection of ground motion fields to XML.
:param data:
An iterable of "GMF set" objects.
Each "GMF set" object should:
* have an `investigation_time` attribute
* have an `stochastic_event_set_id` attribute
* be iterable, yielding a sequence of "GMF" objects
Each "GMF" object should:
* have an `imt` attribute
* have an `sa_period` attribute (only if `imt` is 'SA')
* have an `sa_damping` attribute (only if `imt` is 'SA')
* have a `event_id` attribute (to indicate which rupture
contributed to this gmf)
* be iterable, yielding a sequence of "GMF node" objects
Each "GMF node" object should have:
* a `gmv` attribute (to indicate the ground motion value
* `lon` and `lat` attributes (to indicate the geographical location
of the ground motion field) |
23,293 | def _read(cls, filepath_or_buffer, **kwargs):
try:
args, _, _, defaults, _, _, _ = inspect.getfullargspec(cls.read_csv)
defaults = dict(zip(args[2:], defaults))
filtered_kwargs = {
kw: kwargs[kw]
for kw in kwargs
if kw in defaults
and not isinstance(kwargs[kw], type(defaults[kw]))
or kwargs[kw] != defaults[kw]
}
except AttributeError:
filtered_kwargs = kwargs
if isinstance(filepath_or_buffer, str):
if not file_exists(filepath_or_buffer):
ErrorMessage.default_to_pandas("File path could not be resolved")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
elif not isinstance(filepath_or_buffer, py.path.local):
read_from_pandas = True
try:
import pathlib
if isinstance(filepath_or_buffer, pathlib.Path):
read_from_pandas = False
except ImportError:
pass
if read_from_pandas:
ErrorMessage.default_to_pandas("Reading from buffer.")
return cls._read_csv_from_pandas(filepath_or_buffer, kwargs)
if (
_infer_compression(filepath_or_buffer, kwargs.get("compression"))
is not None
):
ErrorMessage.default_to_pandas("Compression detected.")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
chunksize = kwargs.get("chunksize")
if chunksize is not None:
ErrorMessage.default_to_pandas("Reading chunks from a file.")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
skiprows = kwargs.get("skiprows")
if skiprows is not None and not isinstance(skiprows, int):
ErrorMessage.default_to_pandas("skiprows parameter not optimized yet.")
return cls._read_csv_from_pandas(filepath_or_buffer, kwargs)
if kwargs.get("nrows") is not None:
ErrorMessage.default_to_pandas("`read_csv` with `nrows`")
return cls._read_csv_from_pandas(filepath_or_buffer, filtered_kwargs)
else:
return cls._read_csv_from_file_pandas_on_ray(
filepath_or_buffer, filtered_kwargs
) | Read csv file from local disk.
Args:
filepath_or_buffer:
The filepath of the csv file.
We only support local files for now.
kwargs: Keyword arguments in pandas.read_csv |
23,294 | def cross(environment, book, row, sheet_source, column_source, column_key):
a = book.sheets[sheet_source]
return environment.copy(a.get(**{column_key: row[column_key]})[column_source]) | Returns a single value from a column from a different dataset, matching by the key. |
23,295 | def _read_columns_file(f):
try:
columns = json.loads(open(f, ).read(),
object_pairs_hook=collections.OrderedDict)
except Exception as err:
raise InvalidColumnsFileError(
"There was an error while reading {0}: {1}".format(f, err))
if in columns:
del columns[]
return columns | Return the list of column queries read from the given JSON file.
:param f: path to the file to read
:type f: string
:rtype: list of dicts |
23,296 | def flatten_(structure):
if isinstance(structure, dict):
if structure:
structure = zip(*sorted(structure.items(), key=lambda x: x[0]))[1]
else:
structure = ()
if isinstance(structure, (tuple, list)):
result = []
for element in structure:
result += flatten_(element)
return tuple(result)
return (structure,) | Combine all leaves of a nested structure into a tuple.
The nested structure can consist of any combination of tuples, lists, and
dicts. Dictionary keys will be discarded but values will ordered by the
sorting of the keys.
Args:
structure: Nested structure.
Returns:
Flat tuple. |
23,297 | def is_os(name, version_id=None):
Debian GNU/LinuxUbuntu14.0416.048
result = False
os_release_infos = _fetch_os_release_infos()
if name == os_release_infos.get(, None):
if version_id is None:
result = True
elif version_id == os_release_infos.get(, None):
result = True
return result | Return True if OS name in /etc/lsb-release of host given by fabric param
`-H` is the same as given by argument, False else.
If arg version_id is not None only return True if it is the same as in
/etc/lsb-release, too.
Args:
name: 'Debian GNU/Linux', 'Ubuntu'
version_id(None or str): None,
'14.04', (Ubuntu)
'16.04', (Ubuntu)
'8', (Debian) |
23,298 | def set_chebyshev_approximators(self, deg_forward=50, deg_backwards=200):
r
from fluids.optional.pychebfun import Chebfun
to_fit = lambda h: self.V_from_h(h, )
self.c_backward = Chebfun.from_function(np.vectorize(to_fit), [0.0, self.V_total], N=deg_backwards).coefficients().tolist()
self.h_from_V_cheb = lambda x : chebval((2.0*x-self.V_total)/(self.V_total), self.c_backward)
self.chebyshev = True | r'''Method to derive and set coefficients for chebyshev polynomial
function approximation of the height-volume and volume-height
relationship.
A single set of chebyshev coefficients is used for the entire height-
volume and volume-height relationships respectively.
The forward relationship, `V_from_h`, requires
far fewer coefficients in its fit than the reverse to obtain the same
relative accuracy.
Optionally, deg_forward or deg_backwards can be set to None to try to
automatically fit the series to machine precision.
Parameters
----------
deg_forward : int, optional
The degree of the chebyshev polynomial to be created for the
`V_from_h` curve, [-]
deg_backwards : int, optional
The degree of the chebyshev polynomial to be created for the
`h_from_V` curve, [-] |
23,299 | def _connected(self, link_uri):
print( % link_uri)
p_toc = self._cf.param.toc.toc
for group in sorted(p_toc.keys()):
print(.format(group))
for param in sorted(p_toc[group].keys()):
print(.format(param))
self._param_check_list.append(.format(group, param))
self._param_groups.append(.format(group))
self._cf.param.add_update_callback(group=group, name=None,
cb=self._param_callback)
self._cf.param.add_update_callback(group=, name=,
cb=self._cpu_flash_callback)
print() | This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.