Unnamed: 0
int64 0
389k
| code
stringlengths 26
79.6k
| docstring
stringlengths 1
46.9k
|
---|---|---|
26,300 |
def dihedral(x, dih):
x = np.rot90(x, dih%4)
return x if dih<4 else np.fliplr(x)
|
Perform any of 8 permutations of 90-degrees rotations or flips for image x.
|
26,301 |
def get_bounding_box(self):
pointlist = self.get_pointlist()
minx, maxx = pointlist[0][0]["x"], pointlist[0][0]["x"]
miny, maxy = pointlist[0][0]["y"], pointlist[0][0]["y"]
mint, maxt = pointlist[0][0]["time"], pointlist[0][0]["time"]
for stroke in pointlist:
for p in stroke:
minx, maxx = min(minx, p["x"]), max(maxx, p["x"])
miny, maxy = min(miny, p["y"]), max(maxy, p["y"])
mint, maxt = min(mint, p["time"]), max(maxt, p["time"])
return {"minx": minx, "maxx": maxx, "miny": miny, "maxy": maxy,
"mint": mint, "maxt": maxt}
|
Get the bounding box of a pointlist.
|
26,302 |
def box_model_domain(num_points=2, **kwargs):
ax = Axis(axis_type=, num_points=num_points)
boxes = _Domain(axes=ax, **kwargs)
boxes.domain_type =
return boxes
|
Creates a box model domain (a single abstract axis).
:param int num_points: number of boxes [default: 2]
:returns: Domain with single axis of type ``'abstract'``
and ``self.domain_type = 'box'``
:rtype: :class:`_Domain`
:Example:
::
>>> from climlab import domain
>>> box = domain.box_model_domain(num_points=2)
>>> print box
climlab Domain object with domain_type=box and shape=(2,)
|
26,303 |
def get_key_from_envs(envs, key):
if hasattr(envs, ):
envs = [envs]
for env in envs:
if key in env:
return env[key]
return NO_VALUE
|
Return the value of a key from the given dict respecting namespaces.
Data can also be a list of data dicts.
|
26,304 |
def vcenter_discovery_ignore_delete_all_response_ignore_value(self, **kwargs):
config = ET.Element("config")
vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch")
id_key = ET.SubElement(vcenter, "id")
id_key.text = kwargs.pop()
discovery = ET.SubElement(vcenter, "discovery")
ignore_delete_all_response = ET.SubElement(discovery, "ignore-delete-all-response")
ignore_value = ET.SubElement(ignore_delete_all_response, "ignore-value")
ignore_value.text = kwargs.pop()
callback = kwargs.pop(, self._callback)
return callback(config)
|
Auto Generated Code
|
26,305 |
def htmlTable(tableData, reads1, reads2, square, matchAmbiguous, colors,
concise=False, showLengths=False, showGaps=False, showNs=False,
footer=False, div=False, gapChars=):
readLengths1 = getReadLengths(reads1.values(), gapChars)
readLengths2 = getReadLengths(reads2.values(), gapChars)
result = []
append = result.append
def writeHeader():
append()
append()
for read2 in reads2.values():
append( %
read2.id)
if showLengths and not square:
append( % readLengths2[read2.id])
if showGaps and not square:
append( % (len(read2) - readLengths2[read2.id]))
if showNs and not square:
append( % read2.sequence.count())
append()
append()
if div:
append()
else:
append()
append()
append()
append()
append()
append()
append()
append()
for threshold, color in colors:
append( % (
thresholdToCssName(threshold), color))
append()
if not div:
append(explanation(
matchAmbiguous, concise, showLengths, showGaps, showNs))
append()
append()
append()
bestIdentityForId = {}
for id1, read1 in reads1.items():
read1Len = readLengths1[id1]
bestIdentity = -1.0
for id2, read2 in reads2.items():
if id1 != id2 or not square:
stats = tableData[id1][id2]
identity = (
stats[] +
(stats[] if matchAmbiguous else 0)
) / read1Len
if identity > bestIdentity:
bestIdentity = identity
bestIdentityForId[id1] = bestIdentity
writeHeader()
for id1, read1 in reads1.items():
read1Len = readLengths1[id1]
append()
append( % id1)
if showLengths:
append( % read1Len)
if showGaps:
append( % (len(read1) - read1Len))
if showNs:
append( % read1.sequence.count())
append()
for id2, read2 in reads2.items():
if id1 == id2 and square:
append()
continue
stats = tableData[id1][id2]
identity = (
stats[] +
(stats[] if matchAmbiguous else 0)
) / read1Len
append( % thresholdToCssName(
thresholdForIdentity(identity, colors)))
if identity == bestIdentityForId[id1]:
scoreStyle =
else:
scoreStyle =
append( % (scoreStyle, identity))
if not concise:
append( % stats[])
if matchAmbiguous:
append( % stats[])
append(
%
(stats[],
stats[],
stats[]))
append()
append()
if footer:
writeHeader()
append()
append()
append()
if div:
append()
else:
append()
append()
return .join(result)
|
Make an HTML table showing inter-sequence distances.
@param tableData: A C{defaultdict(dict)} keyed by read ids, whose values
are the dictionaries returned by compareDNAReads.
@param reads1: An C{OrderedDict} of C{str} read ids whose values are
C{Read} instances. These will be the rows of the table.
@param reads2: An C{OrderedDict} of C{str} read ids whose values are
C{Read} instances. These will be the columns of the table.
@param square: If C{True} we are making a square table of a set of
sequences against themselves (in which case we show nothing on the
diagonal).
@param matchAmbiguous: If C{True}, count ambiguous nucleotides that are
possibly correct as actually being correct. Otherwise, we are strict
and insist that only non-ambiguous nucleotides can contribute to the
matching nucleotide count.
@param colors: A C{list} of (threshold, color) tuples, where threshold is a
C{float} and color is a C{str} to be used as a cell background. This
is as returned by C{parseColors}.
@param concise: If C{True}, do not show match details.
@param showLengths: If C{True}, include the lengths of sequences.
@param showGaps: If C{True}, include the number of gaps in sequences.
@param showGaps: If C{True}, include the number of N characters in
sequences.
@param footer: If C{True}, incude a footer row giving the same information
as found in the table header.
@param div: If C{True}, return an HTML <div> fragment only, not a full HTML
document.
@param gapChars: A C{str} of sequence characters considered to be gaps.
@return: An HTML C{str} showing inter-sequence distances.
|
26,306 |
def output_dict(self):
if self._output_dict is None:
self._output_dict = Executor._get_dict(
self._symbol.list_outputs(), self.outputs)
return self._output_dict
|
Get dictionary representation of output arrays.
Returns
-------
output_dict : dict of str to NDArray
The dictionary that maps name of output names to NDArrays.
Raises
------
ValueError : if there are duplicated names in the outputs.
|
26,307 |
def monitor(self):
time.sleep(self._settings.max_latency)
_LOGGER.debug("Monitor is waking up")
return self._commit()
|
Commit this batch after sufficient time has elapsed.
This simply sleeps for ``self._settings.max_latency`` seconds,
and then calls commit unless the batch has already been committed.
|
26,308 |
def get_file_range(ase, offsets, timeout=None):
dir, fpath, _ = parse_file_path(ase.name)
return ase.client._get_file(
share_name=ase.container,
directory_name=dir,
file_name=fpath,
start_range=offsets.range_start,
end_range=offsets.range_end,
validate_content=False,
timeout=timeout,
snapshot=ase.snapshot,
).content
|
Retrieve file range
:param blobxfer.models.azure.StorageEntity ase: Azure StorageEntity
:param blobxfer.models.download.Offsets offsets: download offsets
:param int timeout: timeout
:rtype: bytes
:return: content for file range
|
26,309 |
def from_image(cls, image, partname):
return ImagePart(partname, image.content_type, image.blob, image)
|
Return an |ImagePart| instance newly created from *image* and
assigned *partname*.
|
26,310 |
def checkPidFile(pidfile):
if os.path.exists(pidfile):
try:
with open(pidfile) as f:
pid = int(f.read())
except ValueError:
raise ValueError(.format(pidfile))
try:
os.kill(pid, 0)
except OSError as why:
if why.errno == errno.ESRCH:
pid, pidfile, why))
else:
raise BusyError(" exists - is this master still running?".format(pidfile))
|
mostly comes from _twistd_unix.py which is not twisted public API :-/
except it returns an exception instead of exiting
|
26,311 |
def privmsg(self, target, message, nowait=False):
if message:
messages = utils.split_message(message, self.config.max_length)
if isinstance(target, DCCChat):
for message in messages:
target.send_line(message)
elif target:
f = None
for message in messages:
f = self.send_line( % (target, message),
nowait=nowait)
return f
|
send a privmsg to target
|
26,312 |
def get_template_setting(template_key, default=None):
templates_var = getattr(settings, , None)
if templates_var:
for tdict in templates_var:
if template_key in tdict:
return tdict[template_key]
return default
|
Read template settings
|
26,313 |
def export(self):
minor_status = ffi.new()
output_buffer = ffi.new()
retval = C.gss_export_name(
minor_status,
self._name[0],
output_buffer
)
try:
if GSS_ERROR(retval):
if minor_status[0] and self._mech_type:
raise _exception_for_status(retval, minor_status[0], self._mech_type)
else:
raise _exception_for_status(retval, minor_status[0])
return _buf_to_str(output_buffer[0])
finally:
if output_buffer[0].length != 0:
C.gss_release_buffer(minor_status, output_buffer)
|
Returns a representation of the Mechanism Name which is suitable for direct string
comparison against other exported Mechanism Names. Its form is defined in the GSSAPI
specification (RFC 2743). It can also be re-imported by constructing a :class:`Name` with
the `name_type` param set to :const:`gssapi.C_NT_EXPORT_NAME`.
:returns: an exported bytestring representation of this mechanism name
:rtype: bytes
|
26,314 |
def check_marginal_likelihoods(tree, feature):
lh_feature = get_personalized_feature_name(feature, LH)
lh_sf_feature = get_personalized_feature_name(feature, LH_SF)
for node in tree.traverse():
if not node.is_root() and not (node.is_leaf() and node.dist == 0):
node_loglh = np.log10(getattr(node, lh_feature).sum()) - getattr(node, lh_sf_feature)
parent_loglh = np.log10(getattr(node.up, lh_feature).sum()) - getattr(node.up, lh_sf_feature)
assert (round(node_loglh, 2) == round(parent_loglh, 2))
|
Sanity check: combined bottom-up and top-down likelihood of each node of the tree must be the same.
:param tree: ete3.Tree, the tree of interest
:param feature: str, character for which the likelihood is calculated
:return: void, stores the node marginal likelihoods in the get_personalised_feature_name(feature, LH) feature.
|
26,315 |
def play_beat(
self,
frequencys,
play_time,
sample_rate=44100,
volume=0.01
):
audio = pyaudio.PyAudio()
stream = audio.open(
format=pyaudio.paFloat32,
channels=2,
rate=sample_rate,
output=1
)
left_frequency, right_frequency = frequencys
left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate)
right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate)
self.write_stream(stream, left_chunk, right_chunk, volume)
stream.stop_stream()
stream.close()
audio.terminate()
|
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
|
26,316 |
def combination_step(self):
tprv = self.t
self.t = 0.5 * float(1. + np.sqrt(1. + 4. * tprv**2))
if not self.opt[]:
self.Yfprv = self.Yf.copy()
self.Yf = self.Xf + ((tprv - 1.) / self.t) * (self.Xf - self.Xfprv)
|
Update auxiliary state by a smart combination of previous
updates in the frequency domain (standard FISTA
:cite:`beck-2009-fast`).
|
26,317 |
def agg_grid(grid, agg=None):
grid = deepcopy(grid)
if agg is None:
if type(grid[0][0]) is list and type(grid[0][0][0]) is str:
agg = string_avg
else:
agg = mode
for i in range(len(grid)):
for j in range(len(grid[i])):
grid[i][j] = agg(grid[i][j])
return grid
|
Many functions return a 2d list with a complex data type in each cell.
For instance, grids representing environments have a set of resources,
while reading in multiple data files at once will yield a list
containing the values for that cell from each file. In order to visualize
these data types it is helpful to summarize the more complex data types
with a single number. For instance, you might want to take the length
of a resource set to see how many resource types are present. Alternately,
you might want to take the mode of a list to see the most common phenotype
in a cell.
This function facilitates this analysis by calling the given aggregation
function (agg) on each cell of the given grid and returning the result.
agg - A function indicating how to summarize grid contents. Default: len.
|
26,318 |
def decompose_code(code):
code = "%-6s" % code
ind1 = code[3:4]
if ind1 == " ": ind1 = "_"
ind2 = code[4:5]
if ind2 == " ": ind2 = "_"
subcode = code[5:6]
if subcode == " ": subcode = None
return (code[0:3], ind1, ind2, subcode)
|
Decomposes a MARC "code" into tag, ind1, ind2, subcode
|
26,319 |
def get_mapping(self, meta_fields=True):
return {: dict((name, field.json()) for name, field in iteritems(self.fields) if meta_fields or name not in AbstractField.meta_fields)}
|
Returns the mapping for the index as a dictionary.
:param meta_fields: Also include elasticsearch meta fields in the dictionary.
:return: a dictionary which can be used to generate the elasticsearch index mapping for this doctype.
|
26,320 |
def normalize_variable_name(node, reachability_tester):
node_type = NodeType(node[])
if not is_analyzable_identifier(node):
return None
if node_type is NodeType.IDENTIFIER:
return _normalize_identifier_value(node, reachability_tester)
if node_type in IdentifierLikeNodeTypes:
return node[]
|
Returns normalized variable name.
Normalizing means that variable names get explicit visibility by
visibility prefix such as: "g:", "s:", ...
Returns None if the specified node is unanalyzable.
A node is unanalyzable if:
- the node is not identifier-like
- the node is named dynamically
|
26,321 |
def _get_container(self, path):
cont = self.native_conn.get_container(path)
return self.cont_cls(self,
cont.name,
cont.object_count,
cont.size_used)
|
Return single container.
|
26,322 |
def execute(self):
with SMCSocketProtocol(self, **self.sockopt) as protocol:
for result in protocol.receive():
yield result
|
Execute the query with optional timeout. The response to the execute
query is the raw payload received from the websocket and will contain
multiple dict keys and values. It is more common to call query.fetch_XXX
which will filter the return result based on the method. Each result set
will have a max batch size of 200 records. This method will also
continuously return results until terminated. To make a single bounded
fetch, call :meth:`.fetch_batch` or :meth:`.fetch_raw`.
:param int sock_timeout: event loop interval
:return: raw dict returned from query
:rtype: dict(list)
|
26,323 |
def http_basic_auth_group_member_required(groups):
if isinstance(groups, six.string_types):
return auth.set_authentication_predicate(http_basic_auth_check_user, [auth.user_in_group, groups])
else:
return auth.set_authentication_predicate(http_basic_auth_check_user, [auth.user_in_any_group, groups])
|
Decorator. Use it to specify a RPC method is available only to logged users with given permissions
|
26,324 |
def get_neighbor_out_filter(neigh_ip_address):
core = CORE_MANAGER.get_core_service()
ret = core.peer_manager.get_by_addr(neigh_ip_address).out_filters
return ret
|
Returns a neighbor out_filter for given ip address if exists.
|
26,325 |
def multi_index(idx, dim):
def _rec(idx, dim):
idxn = idxm = 0
if not dim:
return ()
if idx == 0:
return (0, )*dim
while terms(idxn, dim) <= idx:
idxn += 1
idx -= terms(idxn-1, dim)
if idx == 0:
return (idxn,) + (0,)*(dim-1)
while terms(idxm, dim-1) <= idx:
idxm += 1
return (int(idxn-idxm),) + _rec(idx, dim-1)
return _rec(idx, dim)
|
Single to multi-index using graded reverse lexicographical notation.
Parameters
----------
idx : int
Index in interger notation
dim : int
The number of dimensions in the multi-index notation
Returns
-------
out : tuple
Multi-index of `idx` with `len(out)=dim`
Examples
--------
>>> for idx in range(5):
... print(chaospy.bertran.multi_index(idx, 3))
(0, 0, 0)
(1, 0, 0)
(0, 1, 0)
(0, 0, 1)
(2, 0, 0)
See Also
--------
single_index
|
26,326 |
def update_event(self, event, action, flags):
if action == PYCBC_EVACTION_UNWATCH:
if event.flags & LCB_READ_EVENT:
self.reactor.removeReader(event)
if event.flags & LCB_WRITE_EVENT:
self.reactor.removeWriter(event)
elif action == PYCBC_EVACTION_WATCH:
if flags & LCB_READ_EVENT:
self.reactor.addReader(event)
if flags & LCB_WRITE_EVENT:
self.reactor.addWriter(event)
if flags & LCB_READ_EVENT == 0:
self.reactor.removeReader(event)
if flags & LCB_WRITE_EVENT == 0:
self.reactor.removeWriter(event)
|
Called by libcouchbase to add/remove event watchers
|
26,327 |
def plot_reliability_diagram(confidence, labels, filepath):
assert len(confidence.shape) == 2
assert len(labels.shape) == 1
assert confidence.shape[0] == labels.shape[0]
print( + str(filepath))
if confidence.max() <= 1.:
bins_start = [b / 10. for b in xrange(0, 10)]
bins_end = [b / 10. for b in xrange(1, 11)]
bins_center = [(b + .5) / 10. for b in xrange(0, 10)]
preds_conf = np.max(confidence, axis=1)
preds_l = np.argmax(confidence, axis=1)
else:
raise ValueError()
print(preds_conf.shape, preds_l.shape)
reliability_diag = []
num_points = []
for bin_start, bin_end in zip(bins_start, bins_end):
above = preds_conf >= bin_start
if bin_end == 1.:
below = preds_conf <= bin_end
else:
below = preds_conf < bin_end
mask = np.multiply(above, below)
num_points.append(np.sum(mask))
bin_mean_acc = max(0, np.mean(preds_l[mask] == labels[mask]))
reliability_diag.append(bin_mean_acc)
assert len(reliability_diag) == len(bins_center)
print(reliability_diag)
print(bins_center)
print(num_points)
fig, ax1 = plt.subplots()
_ = ax1.bar(bins_center, reliability_diag, width=.1, alpha=0.8)
plt.xlim([0, 1.])
ax1.set_ylim([0, 1.])
ax2 = ax1.twinx()
print(sum(num_points))
ax2.plot(bins_center, num_points, color=, linestyle=, linewidth=7.0)
ax2.set_ylabel(, fontsize=16, color=)
if len(np.argwhere(confidence[0] != 0.)) == 1:
ax1.set_xlabel(, fontsize=16)
else:
ax1.set_xlabel(, fontsize=16)
ax1.set_ylabel(, fontsize=16)
ax1.tick_params(axis=, labelsize=14)
ax2.tick_params(axis=, labelsize=14, colors=)
fig.tight_layout()
plt.savefig(filepath, bbox_inches=)
|
Takes in confidence values for predictions and correct
labels for the data, plots a reliability diagram.
:param confidence: nb_samples x nb_classes (e.g., output of softmax)
:param labels: vector of nb_samples
:param filepath: where to save the diagram
:return:
|
26,328 |
def password_credentials(self, username, password, **kwargs):
return self._token_request(grant_type=,
username=username, password=password,
**kwargs)
|
Retrieve access token by 'password credentials' grant.
https://tools.ietf.org/html/rfc6749#section-4.3
:param str username: The user name to obtain an access token for
:param str password: The user's password
:rtype: dict
:return: Access token response
|
26,329 |
def tile_2d(physical_shape, tile_shape,
outer_name="outer",
inner_name="inner",
cores_name=None):
logical_to_physical = []
p0, p1, p2 = physical_shape
t0, t1 = tile_shape
tile_ring = _ring_2d(t0, t1)
tiles_ring = _ring_2d(p0 // t0, p1 // t1)
for logical_pnum in range(p0 * p1 * p2):
core_on_chip = logical_pnum % p2
logical_chip_num = logical_pnum // p2
logical_pos_in_tile = logical_chip_num % (t0 * t1)
logical_tile_num = logical_chip_num // (t0 * t1)
tile_i, tile_j = tile_ring[logical_pos_in_tile]
tiles_i, tiles_j = tiles_ring[logical_tile_num]
physical_pnum = core_on_chip + p2 * (
tile_i * p1 + tile_j +
tiles_i * p1 * t0 + tiles_j * t1)
logical_to_physical.append(physical_pnum)
assert sorted(logical_to_physical) == list(range(p0 * p1 * p2))
tile_size = t0 * t1 * p2
num_tiles = p0 * p1 // (t0 * t1)
if cores_name:
mesh_shape = mtf.Shape(
[mtf.Dimension(outer_name, int(num_tiles)),
mtf.Dimension(inner_name, int(t0 * t1)),
mtf.Dimension(cores_name, int(p2))])
else:
mesh_shape = mtf.Shape(
[mtf.Dimension(outer_name, int(num_tiles)),
mtf.Dimension(inner_name, int(tile_size))])
return mesh_shape, logical_to_physical
|
2D tiling of a 3d physical mesh.
The "outer" mesh dimension corresponds to which tile.
The "inner" mesh dimension corresponds to the position within a tile
of processors.
Optionally, if cores_name is specified, then a 3 dimensional logical mesh
is returned, with the third dimension representing the two different
cores within a chip. If cores_name is not specified, then the
cores-in-a-chip dimension is folded into the inner dimension.
TODO(noam): explain this better.
Example:
tile_2d(physical_shape=[8, 16, 2], tile_shape=[4, 4])
The "inner" dimension has size 4x4x2=32 and corresponds to the position
within a 4x4 tile of processors.
The "outer" dimension has size 8/4 * 16/4 = 8, and corresponds to the 8
tiles in the mesh.
Args:
physical_shape: a triple of integers [X, Y, cores]
tile_shape: a pair
outer_name: a string
inner_name: a string
cores_name: an optional string
Returns:
mesh_shape: a mtf.Shape
logical_to_physical: a list
|
26,330 |
def _Members(self, group):
group.members = set(group.members).union(self.gids.get(group.gid, []))
return group
|
Unify members of a group and accounts with the group as primary gid.
|
26,331 |
def _bfgs_inv_hessian_update(grad_delta, position_delta, normalization_factor,
inv_hessian_estimate):
conditioned_grad_delta = _mul_right(inv_hessian_estimate, grad_delta)
conditioned_grad_delta_norm = tf.reduce_sum(
input_tensor=conditioned_grad_delta * grad_delta, axis=-1)
cross_term = _tensor_product(position_delta, conditioned_grad_delta)
def _expand_scalar(s):
return s[..., tf.newaxis, tf.newaxis]
cross_term += _tensor_product(conditioned_grad_delta, position_delta)
position_term = _tensor_product(position_delta, position_delta)
with tf.control_dependencies([position_term]):
position_term *= _expand_scalar(
1 + conditioned_grad_delta_norm / normalization_factor)
return (inv_hessian_estimate +
(position_term - cross_term) / _expand_scalar(normalization_factor))
|
Applies the BFGS update to the inverse Hessian estimate.
The BFGS update rule is (note A^T denotes the transpose of a vector/matrix A).
```None
rho = 1/(grad_delta^T * position_delta)
U = (I - rho * position_delta * grad_delta^T)
H_1 = U * H_0 * U^T + rho * position_delta * position_delta^T
```
Here, `H_0` is the inverse Hessian estimate at the previous iteration and
`H_1` is the next estimate. Note that `*` should be interpreted as the
matrix multiplication (with the understanding that matrix multiplication for
scalars is usual multiplication and for matrix with vector is the action of
the matrix on the vector.).
The implementation below utilizes an expanded version of the above formula
to avoid the matrix multiplications that would be needed otherwise. By
expansion it is easy to see that one only needs matrix-vector or
vector-vector operations. The expanded version is:
```None
f = 1 + rho * (grad_delta^T * H_0 * grad_delta)
H_1 - H_0 = - rho * [position_delta * (H_0 * grad_delta)^T +
(H_0 * grad_delta) * position_delta^T] +
rho * f * [position_delta * position_delta^T]
```
All the terms in square brackets are matrices and are constructed using
vector outer products. All the other terms on the right hand side are scalars.
Also worth noting that the first and second lines are both rank 1 updates
applied to the current inverse Hessian estimate.
Args:
grad_delta: Real `Tensor` of shape `[..., n]`. The difference between the
gradient at the new position and the old position.
position_delta: Real `Tensor` of shape `[..., n]`. The change in position
from the previous iteration to the current one.
normalization_factor: Real `Tensor` of shape `[...]`. Should be equal to
`grad_delta^T * position_delta`, i.e. `1/rho` as defined above.
inv_hessian_estimate: Real `Tensor` of shape `[..., n, n]`. The previous
estimate of the inverse Hessian. Should be positive definite and
symmetric.
Returns:
A tuple containing the following fields
is_valid: A Boolean `Tensor` of shape `[...]` indicating batch members
where the update succeeded. The update can fail if the position change
becomes orthogonal to the gradient change.
next_inv_hessian_estimate: A `Tensor` of shape `[..., n, n]`. The next
Hessian estimate updated using the BFGS update scheme. If the
`inv_hessian_estimate` is symmetric and positive definite, the
`next_inv_hessian_estimate` is guaranteed to satisfy the same
conditions.
|
26,332 |
def from_json(cls, json):
result = super(_ReducerReader, cls).from_json(json)
result.current_key = _ReducerReader.decode_data(json["current_key"])
result.current_values = _ReducerReader.decode_data(json["current_values"])
return result
|
Creates an instance of the InputReader for the given input shard state.
Args:
json: The InputReader state as a dict-like object.
Returns:
An instance of the InputReader configured using the values of json.
|
26,333 |
def print_progress_bar(self, task):
str_format = "{0:." + str(task.decimals) + "f}"
percents = str_format.format(100 * (task.progress / float(task.total)))
filled_length = int(round(task.bar_length * task.progress / float(task.total)))
bar = * filled_length + * (task.bar_length - filled_length)
elapsed_time = None
if task.display_time:
if task.end_time:
if not task.elapsed_time_at_end:
task.elapsed_time_at_end = self.millis_to_human_readable(task.end_time - task.begin_time)
elapsed_time = task.elapsed_time_at_end
else:
if not task.begin_time:
task.begin_time = millis()
elapsed_time = self.millis_to_human_readable(millis() - task.begin_time)
prefix_pattern = .format(self.longest_bar_prefix_size)
time_container_pattern = if task.display_time and not task.end_time else
if len(task.suffix) > 0 and task.display_time:
sys.stdout.write(.format(prefix_pattern, time_container_pattern)
% (task.prefix, bar, percents, elapsed_time, task.suffix))
elif len(task.suffix) > 0 and not task.display_time:
sys.stdout.write(.format(prefix_pattern)
% (task.prefix, bar, percents, task.suffix))
elif task.display_time and not len(task.suffix) > 0:
sys.stdout.write(.format(prefix_pattern, time_container_pattern)
% (task.prefix, bar, percents, elapsed_time))
else:
sys.stdout.write(.format(prefix_pattern)
% (task.prefix, bar, percents))
sys.stdout.write()
sys.stdout.flush()
|
Draws a progress bar on screen based on the given information using standard output (stdout).
:param task: TaskProgress object containing all required information to draw a progress bar at the given state.
|
26,334 |
def _waitFor(self, timeout, notification, **kwargs):
callback = self._matchOther
retelem = None
callbackArgs = None
callbackKwargs = None
if in kwargs:
callback = kwargs[]
del kwargs[]
if in kwargs:
if not isinstance(kwargs[], tuple):
errStr =
raise TypeError(errStr)
callbackArgs = kwargs[]
del kwargs[]
if in kwargs:
if not isinstance(kwargs[], dict):
errStr =
raise TypeError(errStr)
callbackKwargs = kwargs[]
del kwargs[]
if kwargs:
if callbackKwargs:
callbackKwargs.update(kwargs)
else:
callbackKwargs = kwargs
else:
callbackArgs = (retelem,)
callbackKwargs = kwargs
return self._setNotification(timeout, notification, callback,
callbackArgs,
callbackKwargs)
|
Wait for a particular UI event to occur; this can be built
upon in NativeUIElement for specific convenience methods.
|
26,335 |
def _get(relative_path, genome=None):
chrom = None
if genome:
if in genome:
genome, chrom = genome.split()
check_genome(genome)
relative_path = relative_path.format(genome=genome)
path = abspath(join(dirname(__file__), relative_path))
if not isfile(path) and isfile(path + ):
path +=
if path.endswith() or path.endswith():
if path.endswith():
bedtools = which()
if not bedtools:
critical( + str(os.environ[]))
debug()
bed = BedTool(path)
else:
debug()
bed = BedTool(path)
if chrom:
debug( + chrom)
bed = bed.filter(lambda r: r.chrom == chrom)
return bed
else:
return path
|
:param relative_path: relative path of the file inside the repository
:param genome: genome name. Can contain chromosome name after comma, like hg19-chr20,
in case of BED, the returning BedTool will be with added filter.
:return: BedTools object if it's a BED file, or filepath
|
26,336 |
def get_select_items(items):
option_items = list()
for item in items:
if isinstance(item, dict) and defs.VALUE in item and defs.LABEL in item:
option_items.append(item[defs.VALUE])
else:
raise exceptions.ParametersFieldError(item, "a dictionary with {} and {}"
.format(defs.LABEL, defs.VALUE))
return option_items
|
Return list of possible select items.
|
26,337 |
def _create_delete_one_query(self, row_id, ctx):
assert isinstance(ctx, ResourceQueryContext)
return self._orm.query(self.model_cls).filter(self._model_pk == row_id)
|
Delete row by id query creation.
:param int row_id: Identifier of the deleted row.
:param ResourceQueryContext ctx: The context of this delete query.
|
26,338 |
def transfer_to(self, cloudpath, bbox, block_size=None, compress=True):
if type(bbox) is Bbox:
requested_bbox = bbox
else:
(requested_bbox, _, _) = self.__interpret_slices(bbox)
realized_bbox = self.__realized_bbox(requested_bbox)
if requested_bbox != realized_bbox:
raise exceptions.AlignmentError(
"Unable to transfer non-chunk aligned bounding boxes. Requested: {}, Realized: {}".format(
requested_bbox, realized_bbox
))
default_block_size_MB = 50
chunk_MB = self.underlying.rectVolume() * np.dtype(self.dtype).itemsize * self.num_channels
if self.layer_type == :
chunk_MB /= 1.3
else:
chunk_MB /= 100.0
chunk_MB /= 1024.0 * 1024.0
if block_size:
step = block_size
else:
step = int(default_block_size_MB // chunk_MB) + 1
try:
destvol = CloudVolume(cloudpath, mip=self.mip)
except exceptions.InfoUnavailableError:
destvol = CloudVolume(cloudpath, mip=self.mip, info=self.info, provenance=self.provenance.serialize())
destvol.commit_info()
destvol.commit_provenance()
except exceptions.ScaleUnavailableError:
destvol = CloudVolume(cloudpath)
for i in range(len(destvol.scales) + 1, len(self.scales)):
destvol.scales.append(
self.scales[i]
)
destvol.commit_info()
destvol.commit_provenance()
num_blocks = np.ceil(self.bounds.volume() / self.underlying.rectVolume()) / step
num_blocks = int(np.ceil(num_blocks))
cloudpaths = txrx.chunknames(realized_bbox, self.bounds, self.key, self.underlying)
pbar = tqdm(
desc=.format(step),
unit=,
disable=(not self.progress),
total=num_blocks,
)
with pbar:
with Storage(self.layer_cloudpath) as src_stor:
with Storage(cloudpath) as dest_stor:
for _ in range(num_blocks, 0, -1):
srcpaths = list(itertools.islice(cloudpaths, step))
files = src_stor.get_files(srcpaths)
files = [ (f[], f[]) for f in files ]
dest_stor.put_files(
files=files,
compress=compress,
content_type=txrx.content_type(destvol),
)
pbar.update()
|
Transfer files from one storage location to another, bypassing
volume painting. This enables using a single CloudVolume instance
to transfer big volumes. In some cases, gsutil or aws s3 cli tools
may be more appropriate. This method is provided for convenience. It
may be optimized for better performance over time as demand requires.
cloudpath (str): path to storage layer
bbox (Bbox object): ROI to transfer
block_size (int): number of file chunks to transfer per I/O batch.
compress (bool): Set to False to upload as uncompressed
|
26,339 |
def success(item):
try:
trg_queue = item.queue
os.rename(fsq_path.item(trg_queue, item.id, host=item.host),
os.path.join(fsq_path.done(trg_queue, host=item.host),
item.id))
except AttributeError, e:
|
Successful finish
|
26,340 |
def append_to_table(self, table):
table.add_row(dict(path=self.path,
key=self.key,
creator=self.creator,
timestamp=self.timestamp,
status=self.status,
flags=self.flags))
|
Add this instance as a row on a `astropy.table.Table`
|
26,341 |
def _get_font_size(document, style):
font_size = style.get_font_size()
if font_size == -1:
if style.based_on:
based_on = document.styles.get_by_id(style.based_on)
if based_on:
return _get_font_size(document, based_on)
return font_size
|
Get font size defined for this style.
It will try to get font size from it's parent style if it is not defined by original style.
:Args:
- document (:class:`ooxml.doc.Document`): Document object
- style (:class:`ooxml.doc.Style`): Style object
:Returns:
Returns font size as a number. -1 if it can not get font size.
|
26,342 |
def connect(self, ctrl):
if self.prompt:
self.prompt_re = self.driver.make_dynamic_prompt(self.prompt)
else:
self.prompt_re = self.driver.prompt_re
self.ctrl = ctrl
if self.protocol.connect(self.driver):
if self.protocol.authenticate(self.driver):
self.ctrl.try_read_prompt(1)
if not self.prompt:
self.prompt = self.ctrl.detect_prompt()
if self.is_target:
self.update_config_mode()
if self.mode is not None and self.mode != :
self.last_error_msg = "Device is not in global mode. Disconnected."
self.chain.disconnect()
return False
self.prompt_re = self.driver.make_dynamic_prompt(self.prompt)
self.connected = True
if self.is_target is False:
if self.os_version is None:
self.update_os_version()
self.update_hostname()
else:
self._connected_to_target()
return True
else:
self.connected = False
return False
|
Connect to the device.
|
26,343 |
def _get_config_int(self, section: str, key: str, fallback: int=object()) -> int:
return self._config.getint(section, key, fallback=fallback)
|
Gets an int config value
:param section: Section
:param key: Key
:param fallback: Optional fallback value
|
26,344 |
def init_batch(raw_constraints: List[Optional[RawConstraintList]],
beam_size: int,
start_id: int,
eos_id: int) -> List[Optional[ConstrainedHypothesis]]:
constraints = [None] * (len(raw_constraints) * beam_size)
if any(raw_constraints):
for i, raw_list in enumerate(raw_constraints):
num_constraints = sum([len(phrase) for phrase in raw_list]) if raw_list is not None else 0
if num_constraints > 0:
hyp = ConstrainedHypothesis(raw_list, eos_id)
idx = i * beam_size
constraints[idx:idx + beam_size] = [hyp.advance(start_id) for x in range(beam_size)]
return constraints
|
:param raw_constraints: The list of raw constraints (list of list of IDs).
:param beam_size: The beam size.
:param start_id: The target-language vocabulary ID of the SOS symbol.
:param eos_id: The target-language vocabulary ID of the EOS symbol.
:return: A list of ConstrainedHypothesis objects (shape: (batch_size * beam_size,)).
|
26,345 |
def h3(data, *args, **kwargs):
import numpy as np
if data is not None and isinstance(data, (list, tuple)) and not np.isscalar(data[0]):
if "axis_names" not in kwargs:
kwargs["axis_names"] = [(column.name if hasattr(column, "name") else None) for column in data]
data = np.concatenate([item[:, np.newaxis] for item in data], axis=1)
else:
kwargs["dim"] = 3
return histogramdd(data, *args, **kwargs)
|
Facade function to create 3D histograms.
Parameters
----------
data : array_like or list[array_like] or tuple[array_like]
Can be a single array (with three columns) or three different arrays
(for each component)
Returns
-------
physt.histogram_nd.HistogramND
|
26,346 |
def report_saved(report_stats):
if Settings.verbose:
report =
truncated_filename = truncate_cwd(report_stats.final_filename)
report += .format(truncated_filename)
total = new_percent_saved(report_stats)
if total:
report += total
else:
report +=
if Settings.test:
report +=
if Settings.verbose > 1:
tools_report = .join(report_stats.report_list)
if tools_report:
report += + tools_report
print(report)
|
Record the percent saved & print it.
|
26,347 |
def _get_value(context, key):
if isinstance(context, dict):
if key in context:
return context[key]
elif type(context).__module__ != _BUILTIN_MODULE:
try:
attr = getattr(context, key)
except AttributeError:
pass
else:
if callable(attr):
return attr()
return attr
return _NOT_FOUND
|
Retrieve a key's value from a context item.
Returns _NOT_FOUND if the key does not exist.
The ContextStack.get() docstring documents this function's intended behavior.
|
26,348 |
def _find_files(root, includes, excludes, follow_symlinks):
root = os.path.abspath(root)
file_set = formic.FileSet(
directory=root, include=includes,
exclude=excludes, symlinks=follow_symlinks,
)
for filename in file_set.qualified_files(absolute=False):
yield filename
|
List files inside a directory based on include and exclude rules.
This is a more advanced version of `glob.glob`, that accepts multiple
complex patterns.
Args:
root (str): base directory to list files from.
includes (list[str]): inclusion patterns. Only files matching those
patterns will be included in the result.
excludes (list[str]): exclusion patterns. Files matching those
patterns will be excluded from the result. Exclusions take
precedence over inclusions.
follow_symlinks (bool): If true, symlinks will be included in the
resulting zip file
Yields:
str: a file name relative to the root.
Note:
Documentation for the patterns can be found at
http://www.aviser.asia/formic/doc/index.html
|
26,349 |
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
msg = ("convert_objects is deprecated. To re-infer data dtypes for "
"object columns, use {klass}.infer_objects()\nFor all "
"other conversions use the data-type specific converters "
"pd.to_datetime, pd.to_timedelta and pd.to_numeric."
).format(klass=self.__class__.__name__)
warnings.warn(msg, FutureWarning, stacklevel=2)
return self._constructor(
self._data.convert(convert_dates=convert_dates,
convert_numeric=convert_numeric,
convert_timedeltas=convert_timedeltas,
copy=copy)).__finalize__(self)
|
Attempt to infer better dtype for object columns.
.. deprecated:: 0.21.0
Parameters
----------
convert_dates : boolean, default True
If True, convert to date where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
convert_numeric : boolean, default False
If True, attempt to coerce to numbers (including strings), with
unconvertible values becoming NaN.
convert_timedeltas : boolean, default True
If True, convert to timedelta where possible. If 'coerce', force
conversion, with unconvertible values becoming NaT.
copy : boolean, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
|
26,350 |
def parse_headers(
self,
lines: List[bytes]
) -> Tuple[,
RawHeaders,
Optional[bool],
Optional[str],
bool,
bool]:
headers, raw_headers = self._headers_parser.parse_headers(lines)
close_conn = None
encoding = None
upgrade = False
chunked = False
conn = headers.get(hdrs.CONNECTION)
if conn:
v = conn.lower()
if v == :
close_conn = True
elif v == :
close_conn = False
elif v == :
upgrade = True
enc = headers.get(hdrs.CONTENT_ENCODING)
if enc:
enc = enc.lower()
if enc in (, , ):
encoding = enc
te = headers.get(hdrs.TRANSFER_ENCODING)
if te and in te.lower():
chunked = True
return (headers, raw_headers, close_conn, encoding, upgrade, chunked)
|
Parses RFC 5322 headers from a stream.
Line continuations are supported. Returns list of header name
and value pairs. Header name is in upper case.
|
26,351 |
def _filter_xpath_grouping(xpath):
xpath = xpath[1:]
index = xpath.rfind()
if index == -1:
raise XpathException("Invalid or unsupported Xpath: %s" % xpath)
xpath = xpath[:index] + xpath[index + 1:]
return xpath
|
This method removes the outer parentheses for xpath grouping.
The xpath converter will break otherwise.
Example:
"(//button[@type='submit'])[1]" becomes "//button[@type='submit'][1]"
|
26,352 |
def _get_db():
with cd(env.remote_path):
file_path = + _sql_paths(, str(base64.urlsafe_b64encode(uuid.uuid4().bytes)).replace(, ))
run(env.python + + file_path)
local_file_path = + _sql_paths(, datetime.now())
get(file_path, local_file_path)
run( + file_path)
return local_file_path
|
Get database from server
|
26,353 |
def get_io_write_task(self, fileobj, data, offset):
return IOWriteTask(
self._transfer_coordinator,
main_kwargs={
: fileobj,
: data,
: offset,
}
)
|
Get an IO write task for the requested set of data
This task can be ran immediately or be submitted to the IO executor
for it to run.
:type fileobj: file-like object
:param fileobj: The file-like object to write to
:type data: bytes
:param data: The data to write out
:type offset: integer
:param offset: The offset to write the data to in the file-like object
:returns: An IO task to be used to write data to a file-like object
|
26,354 |
def critical(self, event=None, *args, **kw):
if not self._logger.isEnabledFor(logging.CRITICAL):
return
kw = self._add_base_info(kw)
kw[] = "critical"
return self._proxy_to_logger(, event, *args, **kw)
|
Process event and call :meth:`logging.Logger.critical` with the result.
|
26,355 |
def mass_fractions(self):
rwaterHO
if self.__mass_fractions:
return self.__mass_fractions
else:
self.__mass_fractions = mass_fractions(self.atoms, self.MW)
return self.__mass_fractions
|
r'''Dictionary of atom:mass-weighted fractional occurence of elements.
Useful when performing mass balances. For atom-fraction occurences, see
:obj:`atom_fractions`.
Examples
--------
>>> Chemical('water').mass_fractions
{'H': 0.11189834407236524, 'O': 0.8881016559276347}
|
26,356 |
def _summarize_peaks(peaks):
previous = peaks[0]
new_peaks = [previous]
for pos in peaks:
if pos > previous + 10:
new_peaks.add(pos)
previous = pos
return new_peaks
|
merge peaks position if closer than 10
|
26,357 |
def log_run(self):
version = get_system_spec()[]
cursor = self.conn.cursor()
cursor.execute(, [version])
self.maybe_commit()
|
Log timestamp and raiden version to help with debugging
|
26,358 |
def GetDetectorData(detectorName):
detPosVector = np.zeros(3)
nx = np.zeros(3)
ny = np.zeros(3)
if detectorName == :
nx[0] = -0.22389266154
nx[1] = 0.79983062746
nx[2] = 0.55690487831
ny[0] = -0.91397818574
ny[1] = 0.02609403989
ny[2] = -0.40492342125
detPosVector[0] = -2.16141492636e6
detPosVector[1] = -3.83469517889e6
detPosVector[2] = 4.60035022664e6
elif detectorName == :
nx[0] = -0.95457412153
nx[1] = -0.14158077340
nx[2] = -0.26218911324
ny[0] = 0.29774156894
ny[1] = -0.48791033647
ny[2] = -0.82054461286
detPosVector[0] = -7.42760447238e4
detPosVector[1] = -5.49628371971e6
detPosVector[2] = 3.22425701744e6
elif detectorName == :
nx[0] = -0.70045821479
nx[1] = 0.20848948619
nx[2] = 0.68256166277
ny[0] = -0.05379255368
ny[1] = -0.96908180549
ny[2] = 0.24080451708
detPosVector[0] = 4.54637409900e6
detPosVector[1] = 8.42989697626e5
detPosVector[2] = 4.37857696241e6
elif detectorName == :
vxLat = (14. + 14./60.)*(np.pi/180.)
vxLon = (76. + 26./60.)*(np.pi/180.)
vxElev = 0.
xAlt = 0.
yAlt = 0.
xAz = np.pi/2.
yAz = 0.
cosLat = np.cos(vxLat)
sinLat = np.sin(vxLat)
cosLon = np.cos(vxLon)
sinLon = np.sin(vxLon)
ellDenom = np.sqrt(AWGS84*AWGS84*cosLat*cosLat + \
BWGS84*BWGS84*sinLat*sinLat)
locRho = cosLat*(AWGS84*AWGS84/ellDenom + vxElev)
detPosVector[0] = locRho*cosLon
detPosVector[1] = locRho*sinLon
detPosVector[2] = sinLat*(BWGS84*BWGS84/ellDenom + vxElev)
cosxAlt = np.cos(xAlt)
sinxAlt = np.sin(xAlt)
cosxAz = np.cos(xAz)
sinxAz = np.sin(xAz)
uxNorth = cosxAlt*cosxAz
uxEast = cosxAlt*sinxAz
uxRho = -sinLat*uxNorth + cosLat*sinxAlt
nx[0] = cosLon*uxRho - sinLon*uxEast
nx[1] = sinLon*uxRho + cosLon*uxEast
nx[2] = cosLat*uxNorth + sinLat*sinxAlt
cosyAlt = np.cos(yAlt)
sinyAlt = np.sin(yAlt)
cosyAz = np.cos(yAz)
sinyAz = np.sin(yAz)
uyNorth = cosyAlt*cosyAz
uyEast = cosyAlt*sinyAz
uyRho = -sinLat*uyNorth + cosLat*sinyAlt
ny[0] = cosLon*uyRho - sinLon*uyEast
ny[1] = sinLon*uyRho + cosLon*uyEast
ny[2] = cosLat*uyNorth + sinLat*sinyAlt
elif detectorName == :
vxLat = (36.25)*(np.pi/180.)
vxLon = (137.18)*(np.pi/180.)
vxElev = 0.
xAlt = 0.
yAlt = 0.
xMid = 1500.
yMid = 1500.
xAz = 19.*(np.pi/180.) + np.pi/2.
yAz = 19.*(np.pi/180.)
cosLat = np.cos(vxLat)
sinLat = np.sin(vxLat)
cosLon = np.cos(vxLon)
sinLon = np.sin(vxLon)
ellDenom = np.sqrt(AWGS84*AWGS84*cosLat*cosLat + \
BWGS84*BWGS84*sinLat*sinLat)
locRho = cosLat*(AWGS84*AWGS84/ellDenom + vxElev)
detPosVector[0] = locRho*cosLon
detPosVector[1] = locRho*sinLon
detPosVector[2] = sinLat*(BWGS84*BWGS84/ellDenom + vxElev)
cosxAlt = np.cos(xAlt)
sinxAlt = np.sin(xAlt)
cosxAz = np.cos(xAz)
sinxAz = np.sin(xAz)
uxNorth = cosxAlt*cosxAz
uxEast = cosxAlt*sinxAz
uxRho = -sinLat*uxNorth + cosLat*sinxAlt
nx[0] = cosLon*uxRho - sinLon*uxEast
nx[1] = sinLon*uxRho + cosLon*uxEast
nx[2] = cosLat*uxNorth + sinLat*sinxAlt
cosyAlt = np.cos(yAlt)
sinyAlt = np.sin(yAlt)
cosyAz = np.cos(yAz)
sinyAz = np.sin(yAz)
uyNorth = cosyAlt*cosyAz
uyEast = cosyAlt*sinyAz
uyRho = -sinLat*uyNorth + cosLat*sinyAlt
ny[0] = cosLon*uyRho - sinLon*uyEast
ny[1] = sinLon*uyRho + cosLon*uyEast
ny[2] = cosLat*uyNorth + sinLat*sinyAlt
detRespTensor = np.zeros((3,3),float)
for i in range(0,3):
for j in range(0,3):
detRespTensor[i,j] = (nx[i]*nx[j] - ny[i]*ny[j])*0.5
return detRespTensor,detPosVector
|
GetDetectorData - function to return the locations and detector response
tensor of the detector described by detectorName.
detectorName - Name of required GW IFO. Can be 'H1', 'L1', 'V1', 'I1'
or 'K1'.
Returns detRespTensor - Detector response tensor of the IFO.
detPosVector - Position vector of the IFO.
Sarah Gossan 2012. Last updated 02/18/14.
|
26,359 |
def set_jenkins_rename_file(self, nodes_rename_file):
self.nodes_rename_file = nodes_rename_file
self.__load_node_renames()
logger.info("Jenkis node rename file active: %s", nodes_rename_file)
|
File with nodes renaming mapping:
Node,Comment
arm-build1,remove
arm-build2,keep
ericsson-build3,merge into ericsson-build1
....
Once set in the next enrichment the rename will be done
|
26,360 |
def reset(self):
self.L4.reset()
for module in self.L6aModules:
module.reset()
|
Clear all cell activity.
|
26,361 |
def get_comment_lookup_session_for_book(self, book_id, proxy):
if not self.supports_comment_lookup():
raise errors.Unimplemented()
return sessions.CommentLookupSession(book_id, proxy, self._runtime)
|
Gets the ``OsidSession`` associated with the comment lookup service for the given book.
arg: book_id (osid.id.Id): the ``Id`` of the ``Book``
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.commenting.CommentLookupSession) - a
``CommentLookupSession``
raise: NotFound - no ``Book`` found by the given ``Id``
raise: NullArgument - ``book_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_comment_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_comment_lookup()`` and
``supports_visible_federation()`` are ``true``*
|
26,362 |
def add_derived_identity(self, id_stmt):
p = self.add_namespace(id_stmt.main_module())
if id_stmt not in self.identities:
self.identities[id_stmt] = SchemaNode.define("__%s_%s" %
(p, id_stmt.arg))
parent = self.identities[id_stmt]
if id_stmt in self.identity_deps:
parent = SchemaNode.choice(parent, occur=2)
for i in self.identity_deps[id_stmt]:
parent.subnode(self.add_derived_identity(i))
idval = SchemaNode("value", parent, p+":"+id_stmt.arg)
idval.attr["type"] = "QName"
res = SchemaNode("ref")
res.attr["name"] = self.identities[id_stmt].attr["name"]
return res
|
Add pattern def for `id_stmt` and all derived identities.
The corresponding "ref" pattern is returned.
|
26,363 |
def call_use_cached_files(tup):
try:
cache, key, results_dir = tup
res = cache.use_cached_files(key, results_dir)
if res:
sys.stderr.write()
else:
sys.stderr.write()
sys.stderr.flush()
return res
except NonfatalArtifactCacheError as e:
logger.warn(.format(e))
return False
|
Importable helper for multi-proc calling of ArtifactCache.use_cached_files on a cache instance.
Multiprocessing map/apply/etc require functions which can be imported, not bound methods.
To call a bound method, instead call a helper like this and pass tuple of the instance and args.
The helper can then call the original method on the deserialized instance.
:param tup: A tuple of an ArtifactCache and args (eg CacheKey) for ArtifactCache.use_cached_files.
|
26,364 |
def trans_history(
self, from_=None, count=None, from_id=None, end_id=None,
order=None, since=None, end=None
):
return self._trade_api_call(
, from_=from_, count=count, from_id=from_id, end_id=end_id,
order=order, since=since, end=end
)
|
Returns the history of transactions.
To use this method you need a privilege of the info key.
:param int or None from_: transaction ID, from which the display starts (default 0)
:param int or None count: number of transaction to be displayed (default 1000)
:param int or None from_id: transaction ID, from which the display starts (default 0)
:param int or None end_id: transaction ID on which the display ends (default inf.)
:param str or None order: sorting (default 'DESC')
:param int or None since: the time to start the display (default 0)
:param int or None end: the time to end the display (default inf.)
|
26,365 |
def find_checkpoints(model_path: str, size=4, strategy="best", metric: str = C.PERPLEXITY) -> List[str]:
maximize = C.METRIC_MAXIMIZE[metric]
points = utils.get_validation_metric_points(model_path=model_path, metric=metric)
param_path = os.path.join(model_path, C.PARAMS_NAME)
points = [(value, checkpoint) for value, checkpoint in points if os.path.exists(param_path % checkpoint)]
if strategy == "best":
top_n = _strategy_best(points, size, maximize)
elif strategy == "last":
top_n = _strategy_last(points, size, maximize)
elif strategy == "lifespan":
top_n = _strategy_lifespan(points, size, maximize)
else:
raise RuntimeError("Unknown strategy, options: best last lifespan")
params_paths = [
os.path.join(model_path, C.PARAMS_NAME % point[-1]) for point in top_n
]
logger.info("Found: " + ", ".join(str(point) for point in top_n))
return params_paths
|
Finds N best points from .metrics file according to strategy.
:param model_path: Path to model.
:param size: Number of checkpoints to combine.
:param strategy: Combination strategy.
:param metric: Metric according to which checkpoints are selected. Corresponds to columns in model/metrics file.
:return: List of paths corresponding to chosen checkpoints.
|
26,366 |
def add_permission_role(self, role, perm_view):
if perm_view not in role.permissions:
try:
role.permissions.append(perm_view)
self.get_session.merge(role)
self.get_session.commit()
log.info(
c.LOGMSG_INF_SEC_ADD_PERMROLE.format(str(perm_view), role.name)
)
except Exception as e:
log.error(c.LOGMSG_ERR_SEC_ADD_PERMROLE.format(str(e)))
self.get_session.rollback()
|
Add permission-ViewMenu object to Role
:param role:
The role object
:param perm_view:
The PermissionViewMenu object
|
26,367 |
def set_window_size_callback(window, cbfun):
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _window_size_callback_repository:
previous_callback = _window_size_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWwindowsizefun(cbfun)
_window_size_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetWindowSizeCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0]
|
Sets the size callback for the specified window.
Wrapper for:
GLFWwindowsizefun glfwSetWindowSizeCallback(GLFWwindow* window, GLFWwindowsizefun cbfun);
|
26,368 |
def log_request(request: str, trim_log_values: bool = False, **kwargs: Any) -> None:
return log_(request, request_logger, logging.INFO, trim=trim_log_values, **kwargs)
|
Log a request
|
26,369 |
def fix_config(self, options):
opt = "db_url"
if opt not in options:
options[opt] = "jdbc:mysql://somehost:3306/somedatabase"
if opt not in self.help:
self.help[opt] = "The JDBC database URL to connect to (str)."
opt = "user"
if opt not in options:
options[opt] = "user"
if opt not in self.help:
self.help[opt] = "The database user to use for connecting (str)."
opt = "password"
if opt not in options:
options[opt] = "secret"
if opt not in self.help:
self.help[opt] = "The password for the database user (str)."
opt = "query"
if opt not in options:
options[opt] = "SELECT * FROM table"
if opt not in self.help:
self.help[opt] = "The SQL query for generating the dataset (str)."
opt = "sparse"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to return the data in sparse format (bool)."
opt = "custom_props"
if opt not in options:
options[opt] = ""
if opt not in self.help:
self.help[opt] = "Custom properties filename (str)."
return super(LoadDatabase, self).fix_config(options)
|
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
|
26,370 |
def create_raw(self, key, value):
data = None
if key is not None and value is not None:
data = self.db.create(key.strip(), value)
else:
self.tcex.log.warning(u)
return data
|
Create method of CRUD operation for raw data.
Args:
key (string): The variable to write to the DB.
value (any): The data to write to the DB.
Returns:
(string): Result of DB write.
|
26,371 |
def call():
args = get_arguments()
if args.debug:
log_level = logging.DEBUG
elif args.quiet:
log_level = logging.WARN
else:
log_level = logging.INFO
setup_logging(log_level)
abode = None
if not args.cache:
if not args.username or not args.password:
raise Exception("Please supply a cache or username and password.")
try:
if args.cache and args.username and args.password:
abode = abodepy.Abode(username=args.username,
password=args.password,
get_devices=True,
cache_path=args.cache)
elif args.cache and not (not args.username or not args.password):
abode = abodepy.Abode(get_devices=True,
cache_path=args.cache)
else:
abode = abodepy.Abode(username=args.username,
password=args.password,
get_devices=True)
if args.mode:
_LOGGER.info("Current alarm mode: %s", abode.get_alarm().mode)
if args.arm:
if abode.get_alarm().set_mode(args.arm):
_LOGGER.info("Alarm mode changed to: %s", args.arm)
else:
_LOGGER.warning("Failed to change alarm mode to: %s", args.arm)
for setting in args.set or []:
keyval = setting.split("=")
if abode.set_setting(keyval[0], keyval[1]):
_LOGGER.info("Setting %s changed to %s", keyval[0], keyval[1])
for device_id in args.on or []:
device = abode.get_device(device_id)
if device:
if device.switch_on():
_LOGGER.info("Switched on device with id: %s", device_id)
else:
_LOGGER.warning("Could not find device with id: %s", device_id)
for device_id in args.off or []:
device = abode.get_device(device_id)
if device:
if device.switch_off():
_LOGGER.info("Switched off device with id: %s", device_id)
else:
_LOGGER.warning("Could not find device with id: %s", device_id)
for device_id in args.lock or []:
device = abode.get_device(device_id)
if device:
if device.lock():
_LOGGER.info("Locked device with id: %s", device_id)
else:
_LOGGER.warning("Could not find device with id: %s", device_id)
for device_id in args.unlock or []:
device = abode.get_device(device_id)
if device:
if device.unlock():
_LOGGER.info("Unlocked device with id: %s", device_id)
else:
_LOGGER.warning("Could not find device with id: %s", device_id)
for device_id in args.json or []:
device = abode.get_device(device_id)
if device:
_LOGGER.info(device_id + " JSON:\n" +
json.dumps(device._json_state, sort_keys=True,
indent=4, separators=(, )))
else:
_LOGGER.warning("Could not find device with id: %s", device_id)
def _device_print(dev, append=):
_LOGGER.info("%s%s",
dev.desc, append)
if args.automations:
for automation in abode.get_automations():
_device_print(automation)
for automation_id in args.activate or []:
automation = abode.get_automation(automation_id)
if automation:
if automation.set_active(True):
_LOGGER.info(
"Activated automation with id: %s", automation_id)
else:
_LOGGER.warning(
"Could not find automation with id: %s", automation_id)
for automation_id in args.deactivate or []:
automation = abode.get_automation(automation_id)
if automation:
if automation.set_active(False):
_LOGGER.info(
"Deactivated automation with id: %s", automation_id)
else:
_LOGGER.warning(
"Could not find automation with id: %s", automation_id)
for automation_id in args.trigger or []:
automation = abode.get_automation(automation_id)
if automation:
if automation.trigger():
_LOGGER.info(
"Triggered automation with id: %s", automation_id)
else:
_LOGGER.warning(
"Could not find automation with id: %s", automation_id)
for device_id in args.capture or []:
device = abode.get_device(device_id)
if device:
if device.capture():
_LOGGER.info(
"Image requested from device with id: %s", device_id)
else:
_LOGGER.warning(
"Failed to request image from device with id: %s",
device_id)
else:
_LOGGER.warning("Could not find device with id: %s", device_id)
for keyval in args.image or []:
devloc = keyval.split("=")
device = abode.get_device(devloc[0])
if device:
try:
if (device.refresh_image() and
device.image_to_file(devloc[1])):
_LOGGER.info(
"Saved image to %s for device id: %s", devloc[1],
devloc[0])
except AbodeException as exc:
_LOGGER.warning("Unable to save image: %s", exc)
else:
_LOGGER.warning(
"Could not find device with id: %s", devloc[0])
if args.devices:
for device in abode.get_devices():
_device_print(device)
def _device_callback(dev):
_device_print(dev, ", At: " + time.strftime("%Y-%m-%d %H:%M:%S"))
def _timeline_callback(tl_json):
event_code = int(tl_json[])
if 5100 <= event_code <= 5199:
return
_LOGGER.info("%s - %s at %s %s",
tl_json[], tl_json[],
tl_json[], tl_json[])
if args.device:
for device_id in args.device:
device = abode.get_device(device_id)
if device:
_device_print(device)
abode.events.add_device_callback(device_id,
_device_callback)
else:
_LOGGER.warning(
"Could not find device with id: %s", device_id)
if args.listen:
if args.device is None:
_LOGGER.info("Adding all devices to listener...")
for device in abode.get_devices():
abode.events.add_device_callback(device.device_id,
_device_callback)
abode.events.add_timeline_callback(TIMELINE.ALL,
_timeline_callback)
_LOGGER.info("Listening for device and timeline updates...")
abode.events.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
abode.events.stop()
_LOGGER.info("Device update listening stopped.")
except abodepy.AbodeException as exc:
_LOGGER.error(exc)
finally:
if abode:
abode.logout()
|
Execute command line helper.
|
26,372 |
def idle_all_workers(self):
t an obvious use case for this function, and its
"maybe wait forever for something out of my control" nature
makes it hard to use in real code. Polling all of the work
specs and their :meth:`num_pending` in application code if
you really needed this operation would have the same
semantics and database load.
waiting for pending work_units: %r', num_pending)
time.sleep(1)
|
Set the global mode to :attr:`IDLE` and wait for workers to stop.
This can wait arbitrarily long before returning. The worst
case in "normal" usage involves waiting five minutes for a
"lost" job to expire; a well-behaved but very-long-running job
can extend its own lease further, and this function will not
return until that job finishes (if ever).
.. deprecated:: 0.4.5
There isn't an obvious use case for this function, and its
"maybe wait forever for something out of my control" nature
makes it hard to use in real code. Polling all of the work
specs and their :meth:`num_pending` in application code if
you really needed this operation would have the same
semantics and database load.
|
26,373 |
def sample(self, bqm, scalar=None, bias_range=1, quadratic_range=None,
ignored_variables=None, ignored_interactions=None,
ignore_offset=False, **parameters):
ignored_variables, ignored_interactions = _check_params(
ignored_variables, ignored_interactions)
child = self.child
bqm_copy = _scaled_bqm(bqm, scalar, bias_range, quadratic_range,
ignored_variables, ignored_interactions,
ignore_offset)
response = child.sample(bqm_copy, **parameters)
return _scale_back_response(bqm, response, bqm_copy.info[],
ignored_variables, ignored_interactions,
ignore_offset)
|
Scale and sample from the provided binary quadratic model.
if scalar is not given, problem is scaled based on bias and quadratic
ranges. See :meth:`.BinaryQuadraticModel.scale` and
:meth:`.BinaryQuadraticModel.normalize`
Args:
bqm (:obj:`dimod.BinaryQuadraticModel`):
Binary quadratic model to be sampled from.
scalar (number):
Value by which to scale the energy range of the binary quadratic model.
bias_range (number/pair):
Value/range by which to normalize the all the biases, or if
`quadratic_range` is provided, just the linear biases.
quadratic_range (number/pair):
Value/range by which to normalize the quadratic biases.
ignored_variables (iterable, optional):
Biases associated with these variables are not scaled.
ignored_interactions (iterable[tuple], optional):
As an iterable of 2-tuples. Biases associated with these interactions are not scaled.
ignore_offset (bool, default=False):
If True, the offset is not scaled.
**parameters:
Parameters for the sampling method, specified by the child sampler.
Returns:
:obj:`dimod.SampleSet`
|
26,374 |
def safe_search_detection(
self, image, max_results=None, retry=None, timeout=None, additional_properties=None
):
client = self.annotator_client
self.log.info("Detecting safe search")
if additional_properties is None:
additional_properties = {}
response = client.safe_search_detection(
image=image, max_results=max_results, retry=retry, timeout=timeout, **additional_properties
)
response = MessageToDict(response)
self._check_for_error(response)
self.log.info("Safe search detection finished")
return response
|
For the documentation see:
:py:class:`~airflow.contrib.operators.gcp_vision_operator.CloudVisionDetectImageSafeSearchOperator`
|
26,375 |
def salt_call():
import salt.cli.call
if in sys.path:
sys.path.remove()
client = salt.cli.call.SaltCall()
_install_signal_handlers(client)
client.run()
|
Directly call a salt command in the modules, does not require a running
salt minion to run.
|
26,376 |
def download_file_content(self, file_id, etag=None):
t be downloaded. If the content is changed, it
will be downloaded and returned with its new ETag.
Note:
ETags should be enclosed in double quotes::
my_etag =
Returns:
A tuple of ETag and content (etag, content) if the content was
retrieved. If an etag was provided, and content didn"71e1ed9ee52e565a56aec66bc648a32c"Hello world!
if not is_valid_uuid(file_id):
raise StorageArgumentException(
.format(file_id))
headers = {: }
if etag:
headers[] = etag
resp = self._authenticated_request \
.to_endpoint(.format(file_id)) \
.with_headers(headers) \
.get()
if resp.status_code == 304:
return (None, None)
if not in resp.headers:
raise StorageException()
return (resp.headers[], resp.content)
|
Download file content.
Args:
file_id (str): The UUID of the file whose content is requested
etag (str): If the content is not changed since the provided ETag,
the content won't be downloaded. If the content is changed, it
will be downloaded and returned with its new ETag.
Note:
ETags should be enclosed in double quotes::
my_etag = '"71e1ed9ee52e565a56aec66bc648a32c"'
Returns:
A tuple of ETag and content (etag, content) if the content was
retrieved. If an etag was provided, and content didn't change
returns (None, None)::
('"71e1ed9ee52e565a56aec66bc648a32c"', 'Hello world!')
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
26,377 |
def unzip(seq, elem_len=None):
ret = tuple(zip(*_gen_unzip(map(tuple, seq), elem_len)))
if ret:
return ret
if elem_len is None:
raise ValueError("cannot unzip empty sequence without ")
return ((),) * elem_len
|
Unzip a length n sequence of length m sequences into m seperate length
n sequences.
Parameters
----------
seq : iterable[iterable]
The sequence to unzip.
elem_len : int, optional
The expected length of each element of ``seq``. If not provided this
will be infered from the length of the first element of ``seq``. This
can be used to ensure that code like: ``a, b = unzip(seq)`` does not
fail even when ``seq`` is empty.
Returns
-------
seqs : iterable[iterable]
The new sequences pulled out of the first iterable.
Raises
------
ValueError
Raised when ``seq`` is empty and ``elem_len`` is not provided.
Raised when elements of ``seq`` do not match the given ``elem_len`` or
the length of the first element of ``seq``.
Examples
--------
>>> seq = [('a', 1), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq)
>>> cs
('a', 'b', 'c')
>>> ns
(1, 2, 3)
# checks that the elements are the same length
>>> seq = [('a', 1), ('b', 2), ('c', 3, 'extra')]
>>> cs, ns = unzip(seq)
Traceback (most recent call last):
...
ValueError: element at index 2 was length 3, expected 2
# allows an explicit element length instead of infering
>>> seq = [('a', 1, 'extra'), ('b', 2), ('c', 3)]
>>> cs, ns = unzip(seq, 2)
Traceback (most recent call last):
...
ValueError: element at index 0 was length 3, expected 2
# handles empty sequences when a length is given
>>> cs, ns = unzip([], elem_len=2)
>>> cs == ns == ()
True
Notes
-----
This function will force ``seq`` to completion.
|
26,378 |
def encode(self, key):
salt = self.salt or os.urandom(32).encode().rstrip()
return self.encode_w_salt(salt, key)
|
Encodes a user key into a particular format. The result of this method
will be used by swauth for storing user credentials.
If salt is not manually set in conf file, a random salt will be
generated and used.
:param key: User's secret key
:returns: A string representing user credentials
|
26,379 |
def with_timeout(
timeout: Union[float, datetime.timedelta],
future: _Yieldable,
quiet_exceptions: "Union[Type[Exception], Tuple[Type[Exception], ...]]" = (),
) -> Future:
future_converted = convert_yielded(future)
result = _create_future()
chain_future(future_converted, result)
io_loop = IOLoop.current()
def error_callback(future: Future) -> None:
try:
future.result()
except Exception as e:
if not isinstance(e, quiet_exceptions):
app_log.error(
"Exception in Future %r after timeout", future, exc_info=True
)
def timeout_callback() -> None:
if not result.done():
result.set_exception(TimeoutError("Timeout"))
future_add_done_callback(future_converted, error_callback)
timeout_handle = io_loop.add_timeout(timeout, timeout_callback)
if isinstance(future_converted, Future):
future_add_done_callback(
future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
)
else:
io_loop.add_future(
future_converted, lambda future: io_loop.remove_timeout(timeout_handle)
)
return result
|
Wraps a `.Future` (or other yieldable object) in a timeout.
Raises `tornado.util.TimeoutError` if the input future does not
complete before ``timeout``, which may be specified in any form
allowed by `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or
an absolute time relative to `.IOLoop.time`)
If the wrapped `.Future` fails after it has timed out, the exception
will be logged unless it is of a type contained in ``quiet_exceptions``
(which may be an exception type or a sequence of types).
The wrapped `.Future` is not canceled when the timeout expires,
permitting it to be reused. `asyncio.wait_for` is similar to this
function but it does cancel the wrapped `.Future` on timeout.
.. versionadded:: 4.0
.. versionchanged:: 4.1
Added the ``quiet_exceptions`` argument and the logging of unhandled
exceptions.
.. versionchanged:: 4.4
Added support for yieldable objects other than `.Future`.
|
26,380 |
def dr( self, r1, r2, cutoff=None ):
delta_r_cartesian = ( r1 - r2 ).dot( self.matrix )
delta_r_squared = sum( delta_r_cartesian**2 )
if cutoff != None:
cutoff_squared = cutoff ** 2
if delta_r_squared > cutoff_squared:
return None
return( math.sqrt( delta_r_squared ) )
|
Calculate the distance between two fractional coordinates in the cell.
Args:
r1 (np.array): fractional coordinates for position 1.
r2 (np.array): fractional coordinates for position 2.
cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset).
Returns:
(float): the distance between r1 and r2.
|
26,381 |
def _is_accepted_input(self, input_string):
ret = False
accept_filter = (self.volume_string, "http stream debug: ")
reject_filter = ()
for n in accept_filter:
if n in input_string:
ret = True
break
if ret:
for n in reject_filter:
if n in input_string:
ret = False
break
return ret
|
vlc input filtering
|
26,382 |
def main(print_cfg):
parser = argparse.ArgumentParser(description="mvmany.py -- MVtest helper script.", epilog=)
parser.add_argument("-v", action="store_true", help="Print version number")
parser.add_argument("--mvpath", type=str, default="mvtest.py", help="The path to mvtest.py if itrrrstore_truestore_truet have column 5 (sex)")
parser.add_argument("--no-parents", action="store_true", help="Pedigree file doesnt have column 1 (family ID)")
parser.add_argument("--no-pheno", action="store_true", help="Pedigree file doesnrrrrrrradditivedominantrecessivegenotypeGenetic model to be usedinforrrrrstore_true', help="Use sex from the pedigree file as a covariate")
parser.add_argument("--missing-phenotype", type=float, help="Encoding for missing phenotypes")
parser.add_argument("--maf", type=float, help="Minimum MAF allowed for analysis")
parser.add_argument("--max-maf", type=float, help="MAX MAF allowed for analysis")
parser.add_argument("--geno", type=float, help="MAX per-SNP missing for analysis")
parser.add_argument("--mind", type=float, help="MAX per-person missing")
parser.set_defaults(all_pheno=False, sex=False)
args = parser.parse_args()
if args.v:
print >> sys.stderr, "%s: %s" % (os.path.basename(__file__), __version__)
sys.exit(0)
mkdir(args.script_path)
mkdir(args.res_path)
mkdir(args.logpath)
if args.mach:
args.mach = args.mach.name
if args.impute:
args.impute = args.impute.name
if args.impute_fam:
args.impute_fam = args.impute_fam.name
if args.covar:
args.covar = args.covar.name
if args.pheno:
args.pheno = args.pheno.name
general_arguments = []
for flag in ("exclude,keep,remove,file,ped,map,map3,no-sex,no-parents,no-fid,no-pheno,liability," + \
"bfile,bed,bim,fam,tfile,tped,tfam,compressed," + \
"impute,impute-fam,impute-uncompresed,impute-encoding,impute-info-ext,impute-gen-ext,impute-info-thresh," +\
"mach,mach-uncompressed,mach-info-ext,mach-dose-ext,mach-min-rsquared,mach-chunk-size," +\
"pheno,sample-pheno,pheno-names,mphenos,all-pheno," + \
"covar,sample-covar,covar-numbers,covar-names,sex,missing-phenotype,maf,max-maf,gen,mind").split(","):
check_and_append(args, flag, general_arguments)
args.template = get_template_file(args)
job_list = None
map_file = None
impute_file_list = None
if args.file:
map_file = "%s.map" % (args.file)
elif args.map:
map_file = args.map
elif args.bfile:
map_file = "%s.bim" % (args.bfile)
elif args.bim:
map_file = args.bim
elif args.tfile:
map_file = "%s.tped"
elif args.tped:
map_file = args.tped
elif args.impute:
impute_file_list = args.impute
max_snp_count = args.snps_per_job
if map_file is not None:
job_list = split_chrom_jobs(args, map_file)
elif args.impute is not None:
job_list = split_impute_jobs(args, args.impute)
elif args.mach is not None:
job_list = split_mach_jobs(args, args.mach)
if job_list is None or len(job_list) == 0:
parser.print_usage(sys.stderr)
print >> sys.stderr, "\nThere were not jobs created. Did you specify the necessary data?"
else:
generate_jobs(args, job_list, " ".join(general_arguments))
sys.exit(0)
|
Main function which parses user options and generate jobs accordingly
|
26,383 |
def from_spec(spec, kwargs=None):
distribution = util.get_object(
obj=spec,
predefined_objects=tensorforce.core.distributions.distributions,
kwargs=kwargs
)
assert isinstance(distribution, Distribution)
return distribution
|
Creates a distribution from a specification dict.
|
26,384 |
def sync(self, vault_client, opt):
active_mounts = []
for audit_log in self.logs():
audit_log.sync(vault_client)
not_policies = self.sync_policies(vault_client)
not_auth = self.sync_auth(vault_client, not_policies)
active_mounts, not_mounts = self.sync_mounts(active_mounts,
not_auth,
vault_client)
sorted_resources = sorted(not_mounts, key=childless_first)
for resource in sorted_resources:
resource.sync(vault_client)
for mount in self.mounts():
if not find_backend(mount.path, active_mounts):
mount.unmount(vault_client)
if opt.remove_unknown:
self.prune(vault_client)
|
Synchronizes the context to the Vault server. This
has the effect of updating every resource which is
in the context and has changes pending.
|
26,385 |
def is_supported(cls, file=None, request=None, response=None,
url_info=None):
tests = (
(response, cls.is_response),
(file, cls.is_file),
(request, cls.is_request),
(url_info, cls.is_url)
)
for instance, method in tests:
if instance:
try:
result = method(instance)
except NotImplementedError:
pass
else:
if result:
return True
elif result is VeryFalse:
return VeryFalse
|
Given the hints, return whether the document is supported.
Args:
file: A file object containing the document.
request (:class:`.http.request.Request`): An HTTP request.
response (:class:`.http.request.Response`): An HTTP response.
url_info (:class:`.url.URLInfo`): A URLInfo.
Returns:
bool: If True, the reader should be able to read it.
|
26,386 |
def read_sla(self,filename,params=None,force=False,timerange=None,datatype=None,**kwargs):
from time import time
from datetime import timedelta
self.message(2,.format(datatype))
self._filename = filename
try:
self._ncfile = ncfile(self._filename, "r")
except Exception,e:
self.warning(1, repr(e))
return {}
if os.path.basename(filename).count() > os.path.basename(filename).count(): delim=
else : delim =
splitted=os.path.basename(filename).split(delim)
if len(splitted) > 3 :
if (datatype == ) | (datatype == ) : sat_name = splitted[2] if splitted[0] == else splitted[3]
elif datatype == : sat_name =
else : sat_name =
else :
sat_name="N/A"
par_list=[i.encode() for i in self._ncfile.variables.keys()]
for i in [,,] : par_list.pop(par_list.index(i))
nparam=len(par_list)
self.message(2,+str(nparam)++str(par_list))
lon = self.load_ncVar(,**kwargs)
lon[] = recale(lon[], degrees=True, zero_2pi=True)
lat = self.load_ncVar(,**kwargs)
ind, flag = in_limits(lon[],lat[],limit=self.limit)
dim_lon = lon[]
lat = lat[].compress(flag)
lon = lon[].compress(flag)
sz=np.shape(lon)
ndims=np.size(sz)
stDate = self.load_ncVar(,**kwargs)[]
dumVar = self.load_ncVar(,**kwargs)
nbCyc = dumVar[]
Ncycs = dumVar[][]
Ntra = dumVar[][]
nbTra = self.load_ncVar(,**kwargs)[]
DeltaT = self._ncfile.variables[][:] / 86400.
npts = self.load_ncVar(,**kwargs)[]
dumind=np.cumsum(npts)
date = ()
cycles = ()
tracks = ()
indcopy=ind.copy()
npts[npts.mask]=0
dumind[dumind.mask]=0
nbTra_copy=nbTra.copy()
toto=npts.copy()
concat_npts = not( nbCyc.shape[-1] > 1)
for i in np.arange(1,Ncycs,1.0,dtype=int) :
nbTra=np.ma.concatenate((nbTra,nbTra_copy))
if concat_npts : npts=np.ma.concatenate((npts,tuple((~nbCyc.T[i].mask)*1*npts)))
if concat_npts: npts=npts.reshape(nbCyc.shape[::-1]).T
else : npts=nbCyc
nbTra=nbTra.reshape(nbCyc.shape[::-1]).T
nbTra.mask=nbCyc.mask
npts=npts.flatten()
nbTra=nbTra.flatten()
nbCyc_flatten=nbCyc.flatten()
nbTra_flatten=nbTra.flatten()
stDate_flatten=stDate.flatten()
outInd=[]
for i,nc in enumerate(nbCyc.data.flatten()):
N=npts[i]
Nprev=npts[i-Ncycs] if i >= (Ncycs) and np.remainder(float(i),Ncycs) == 0 else 0
indcopy-=Nprev
curInd=tuple(sorted(set(xrange(N) if N > 0 else []).intersection(indcopy)))
ncur=len(curInd)
outInd+=map(operator.sub, curInd,(( (curInd[0] if len(curInd) > 0 else 0) - (outInd[-1] +1 if len(outInd) > 0 else 0) - len(ind)*(np.remainder(float(i),Ncycs)),)*ncur))
curInd=tuple(map(operator.mul, curInd, (DeltaT,)*ncur))
date+=tuple(map(operator.add, curInd, (stDate_flatten[i],)*ncur))
cycles+=(nbCyc_flatten[i],)*ncur
tracks+=(nbTra_flatten[i],)*ncur
date=np.ma.masked_array(date,mask=False)
cycles=np.ma.masked_array(cycles,mask=False)
tracks=np.ma.masked_array(tracks,mask=False)
outInd=np.array(outInd,dtype=int)
nt=len(date)
date.mask=(False,)*nt
cycles.mask=date.mask
tracks.mask=date.mask
dimStr = dim_lon
dimStr.pop()
nrec=len(date)
dimStr.update({:nrec})
for i in [,,,,] : par_list.pop(par_list.index(i))
outStr={:dimStr,
:lon,
:lat,
:date,
:cycles,
:tracks}
for param in par_list :
a = time()
dumVar = self.load_ncVar(param,Data=ind,**kwargs)
runtime = time() - a
dimStr=dumVar[]
dimStr.pop()
dimStr.pop()
dimStr[]=nrec
dimStr[]=len(dimStr.keys())-1
curDim = [str(dimname) for dimname in dimStr.keys()[1:]]
curDimval = [dimStr[dim] for dim in curDim]
flag = [(np.array(dimname) == outStr[].keys()).sum() == 0 for dimname in curDim]
dimUpdate = np.array(curDim).compress(flag)
for enum in enumerate(dimUpdate) :
self.message(3, .format(enum[1],np.array(curDimval).compress(flag)[enum[0]]))
outStr[].update({enum[1]:np.array(curDimval).compress(flag)[enum[0]]})
outStr[][]+=1
dumStr = {param.lower() : dumVar[].flatten()[outInd]}
outStr.update(dumStr)
id=np.repeat(sat_name,outStr[][])
outStr.update({:id})
self._ncfile.close()
return outStr
|
Read AVISO Along-Track products
:return outStr: Output data structure containing all recorded parameters as specificied by NetCDF file PARAMETER list.
:author: Renaud Dussurget
|
26,387 |
def outputMode(self, outputMode):
if not outputMode or type(outputMode) != str or len(outputMode.strip()) == 0:
raise ValueError( % outputMode)
self._jwrite = self._jwrite.outputMode(outputMode)
return self
|
Specifies how data of a streaming DataFrame/Dataset is written to a streaming sink.
Options include:
* `append`:Only the new rows in the streaming DataFrame/Dataset will be written to
the sink
* `complete`:All the rows in the streaming DataFrame/Dataset will be written to the sink
every time these is some updates
* `update`:only the rows that were updated in the streaming DataFrame/Dataset will be
written to the sink every time there are some updates. If the query doesn't contain
aggregations, it will be equivalent to `append` mode.
.. note:: Evolving.
>>> writer = sdf.writeStream.outputMode('append')
|
26,388 |
def parse_url(cls, string):
match = cls.URL_RE.match(string)
if not match:
raise InvalidKeyError(cls, string)
return match.groupdict()
|
If it can be parsed as a version_guid with no preceding org + offering, returns a dict
with key 'version_guid' and the value,
If it can be parsed as a org + offering, returns a dict
with key 'id' and optional keys 'branch' and 'version_guid'.
Raises:
InvalidKeyError: if string cannot be parsed -or- string ends with a newline.
|
26,389 |
def color_msg(msg, color):
" Return colored message "
return .join((COLORS.get(color, COLORS[]), msg, COLORS[]))
|
Return colored message
|
26,390 |
def blast_records_to_object(blast_records):
blast_objects_list = []
for blast_record in blast_records:
br = BlastRecord(**{: blast_record.query,
: blast_record.version,
: blast_record.expect,
: blast_record.application,
: blast_record.reference})
for alignment in blast_record.alignments:
al = Alignment(**{
: alignment.hit_def,
: alignment.title,
: alignment.length,
})
for hsp in alignment.hsps:
h = Hsp(**{
: hsp.align_length,
: hsp.bits,
: hsp.expect,
: hsp.frame,
: hsp.gaps,
: hsp.identities,
: hsp.match,
: hsp.num_alignments,
: hsp.positives,
: hsp.query,
: hsp.query_end,
: hsp.query_start,
: hsp.sbjct,
: hsp.sbjct_end,
: hsp.sbjct_start,
: hsp.score,
: hsp.strand,
: str(hsp),
})
al.hsp_list.append(h)
br.alignments.append(al)
blast_objects_list.append(br)
return blast_objects_list
|
Transforms biopython's blast record into blast object defined in django-blastplus app.
|
26,391 |
def htmlFromThing(thing,title):
try:
thing2 = copy.copy(thing)
except:
print("crashed copying the thing! I can<span style="color:
dt=datetime.datetime.now()
html+=%(__version__,dt.strftime("at %I:%M %p on %B %d, %Y"))
return html
|
create pretty formatted HTML from a things dictionary.
|
26,392 |
def rpc_get_oneline_docstring(self, filename, source, offset):
line, column = pos_to_linecol(source, offset)
definitions = run_with_debug(jedi, ,
source=source, line=line, column=column,
path=filename, encoding=)
assignments = run_with_debug(jedi, ,
source=source, line=line, column=column,
path=filename, encoding=)
if definitions:
definition = definitions[0]
else:
definition = None
if assignments:
assignment = assignments[0]
else:
assignment = None
if definition:
if definition.type in [, ]:
raw_name = definition.name
name = .format(raw_name)
doc = definition.docstring().split()
elif definition.type in []:
raw_name = definition.name
name = .format(raw_name, definition.type)
doc = definition.docstring().split()
elif (definition.type in []
and hasattr(assignment, "name")):
raw_name = assignment.name
name = raw_name
doc = assignment.docstring().split()
else:
return None
lines = []
call = "{}(".format(raw_name)
doc.append()
for i in range(len(doc)):
if doc[i] == and len(lines) != 0:
paragraph = " ".join(lines)
lines = []
if call != paragraph[0:len(call)]:
break
paragraph = ""
continue
lines.append(doc[i])
onelinedoc = paragraph.split(, 1)
if len(onelinedoc) == 2:
onelinedoc = onelinedoc[0] +
else:
onelinedoc = onelinedoc[0]
if onelinedoc == :
onelinedoc = "No documentation"
return {"name": name,
"doc": onelinedoc}
return None
|
Return a oneline docstring for the symbol at offset
|
26,393 |
def set_branching_model(self, project, repository, data):
url = .format(
project=project,
repository=repository)
return self.put(url, data=data)
|
Set branching model
:param project:
:param repository:
:param data:
:return:
|
26,394 |
def delete_items_from_dict(d, to_delete):
if not isinstance(to_delete, list):
to_delete = [to_delete]
if isinstance(d, dict):
for single_to_delete in set(to_delete):
if single_to_delete in d.values():
for k, v in d.copy().items():
if v == single_to_delete:
del d[k]
for k, v in d.items():
delete_items_from_dict(v, to_delete)
elif isinstance(d, list):
for i in d:
delete_items_from_dict(i, to_delete)
return remove_none(d)
|
Recursively deletes items from a dict,
if the item's value(s) is in ``to_delete``.
|
26,395 |
def set_cookie(self, key, value=, max_age=None, path=, domain=None,
secure=False, httponly=False, expires=None):
key, value = key.encode(), value.encode()
cookie = SimpleCookie({key: value})
m = cookie[key]
if max_age is not None:
if isinstance(max_age, timedelta):
m[] = int(total_seconds(max_age))
else:
m[] = int(max_age)
if path is not None: m[] = path.encode()
if domain is not None: m[] = domain.encode()
if secure: m[] = True
if httponly: m[] = True
if expires is not None:
if isinstance(expires, datetime):
expires = total_seconds(expires - datetime.utcnow())
elif isinstance(expires, timedelta):
expires = total_seconds(expires)
m[] = int(expires)
self.headers.add_header(, m.OutputString())
|
Set a response cookie.
Parameters:
key
: The cookie name.
value
: The cookie value.
max_age
: The maximum age of the cookie in seconds, or as a
datetime.timedelta object.
path
: Restrict the cookie to this path (default: '/').
domain
: Restrict the cookie to his domain.
secure
: When True, instruct the client to only sent the cookie over HTTPS.
httponly
: When True, instruct the client to disallow javascript access to
the cookie.
expires
: Another way of specifying the maximum age of the cookie. Accepts
the same values as max_age (number of seconds, datetime.timedelta).
Additionaly accepts a datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
|
26,396 |
def initialize_weights(self, save_file):
tf.train.Saver().restore(self.sess, save_file)
|
Initialize the weights from the given save_file.
Assumes that the graph has been constructed, and the
save_file contains weights that match the graph. Used
to set the weights to a different version of the player
without redifining the entire graph.
|
26,397 |
def ensure_dir_exists(func):
"wrap a function that returns a dir, making sure it exists"
@functools.wraps(func)
def make_if_not_present():
dir = func()
if not os.path.isdir(dir):
os.makedirs(dir)
return dir
return make_if_not_present
|
wrap a function that returns a dir, making sure it exists
|
26,398 |
def send_mail_worker(config, mail, event):
log = ""
try:
if config.mail_ssl:
server = SMTP_SSL(config.mail_server, port=config.mail_server_port, timeout=30)
else:
server = SMTP(config.mail_server, port=config.mail_server_port, timeout=30)
if config.mail_tls:
log +=
server.starttls()
if config.mail_username != :
log += + str(config.mail_username) + "\n"
server.login(config.mail_username, config.mail_password)
else:
log +=
log +=
response_send = server.send_message(mail)
server.quit()
except timeout as e:
log += + str(e) + "\n"
return False, log, event
log += + str(response_send)
return True, log, event
|
Worker task to send out an email, which blocks the process unless it is threaded
|
26,399 |
def delete(self, queue, virtual_host=):
virtual_host = quote(virtual_host, )
return self.http_client.delete(API_QUEUE %
(
virtual_host,
queue
))
|
Delete a Queue.
:param str queue: Queue name
:param str virtual_host: Virtual host name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.