id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
6,333 |
def test_JobStateUpdateAndJobMonitoringMultuple(wmsClient: WMSClient):
"""Now, let's submit some jobs. Different sites, types, inputs"""
jobMonitor = JobMonitoringClient()
jobStateUpdate = JobStateUpdateClient()
jobIDs = []
lfnss = [["/vo/1.txt", "/vo/2.txt"], ["/vo/1.txt", "/vo/3.txt", "/vo/4.txt"], []]
types = ["User", "Test"]
for lfns in lfnss:
for jobType in types:
job = helloWorldJob()
job.setDestination("DIRAC.Jenkins.ch")
job.setInputData(lfns)
job.setType(jobType)
jobDescription = createFile(job)
res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription))
assert res["OK"] is True, res["Message"]
jobID = res["Value"]
jobIDs.append(jobID)
res = jobMonitor.getSites()
assert res["OK"] is True, res["Message"]
assert set(res["Value"]) <= {"ANY", "DIRAC.Jenkins.ch", "Site"}
res = jobMonitor.getJobTypes()
assert res["OK"] is True, res["Message"]
assert sorted(res["Value"]) == sorted(types)
res = jobMonitor.getApplicationStates()
assert res["OK"] is True, res["Message"]
assert res["Value"] == ["app status", "Unknown"]
res = jobMonitor.getOwners()
assert res["OK"] is True, res["Message"]
res = jobMonitor.getOwnerGroup()
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobGroups()
assert res["OK"] is True, res["Message"]
resJG_empty = res["Value"]
res = jobMonitor.getJobGroups(None, datetime.datetime.utcnow())
assert res["OK"] is True, res["Message"]
resJG_olderThanNow = res["Value"]
assert resJG_empty == resJG_olderThanNow
res = jobMonitor.getJobGroups(None, datetime.datetime.utcnow() - datetime.timedelta(days=365))
assert res["OK"] is True, res["Message"]
resJG_olderThanOneYear = res["Value"]
assert set(resJG_olderThanOneYear).issubset(set(resJG_olderThanNow))
res = jobMonitor.getStates()
assert res["OK"] is True, res["Message"]
assert sorted(res["Value"]) in [[JobStatus.RECEIVED], sorted([JobStatus.RECEIVED, JobStatus.KILLED])]
res = jobMonitor.getMinorStates()
assert res["OK"] is True, res["Message"]
assert sorted(res["Value"]) in [
["Job accepted"],
sorted(["Job accepted", JobMinorStatus.RESCHEDULED]),
sorted(["Job accepted", "Marked for termination"]),
]
res = jobMonitor.getJobs()
assert res["OK"] is True, res["Message"]
assert {str(x) for x in jobIDs} <= set(res["Value"])
# res = jobMonitor.getCounters(attrList)
# self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobsSummary(jobIDs)
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobPageSummaryWeb({}, [], 0, 100)
assert res["OK"] is True, res["Message"]
res = jobStateUpdate.setJobStatusBulk(
jobID,
{
str(datetime.datetime.utcnow()): {
"Status": JobStatus.CHECKING,
"MinorStatus": "MinorStatus",
"Source": "Unknown",
}
},
False,
)
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobSummary(int(jobID))
assert res["OK"] is True, res["Message"]
assert res["Value"]["Status"] == JobStatus.CHECKING
assert res["Value"]["MinorStatus"] == "MinorStatus"
res = jobStateUpdate.setJobStatusBulk(
jobID,
{
str(datetime.datetime.utcnow() + datetime.timedelta(hours=1)): {
"Status": JobStatus.WAITING,
"MinorStatus": "MinorStatus",
"Source": "Unknown",
},
str(datetime.datetime.utcnow() + datetime.timedelta(hours=2)): {
"Status": JobStatus.MATCHED,
"MinorStatus": "MinorStatus-matched",
"Source": "Unknown",
},
},
False,
)
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobSummary(int(jobID))
assert res["OK"] is True, res["Message"]
assert res["Value"]["Status"] == JobStatus.MATCHED
assert res["Value"]["MinorStatus"] == "MinorStatus-matched"
res = jobStateUpdate.setJobsParameter({jobID: ["Whatever", "booh"]})
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobSummary(int(jobID))
assert res["OK"] is True, res["Message"]
assert res["Value"]["Status"] == JobStatus.MATCHED
assert res["Value"]["MinorStatus"] == "MinorStatus-matched"
res = jobStateUpdate.setJobAttribute(jobID, "Status", JobStatus.RUNNING)
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobSummary(int(jobID))
assert res["OK"] is True, res["Message"]
assert res["Value"]["Status"], JobStatus.RUNNING
# delete the jobs - this will just set its status to "deleted"
wmsClient.deleteJob(jobIDs)
|
def test_JobStateUpdateAndJobMonitoringMultuple(wmsClient: WMSClient):
"""Now, let's submit some jobs. Different sites, types, inputs"""
jobMonitor = JobMonitoringClient()
jobStateUpdate = JobStateUpdateClient()
jobIDs = []
lfnss = [["/vo/1.txt", "/vo/2.txt"], ["/vo/1.txt", "/vo/3.txt", "/vo/4.txt"], []]
types = ["User", "Test"]
for lfns in lfnss:
for jobType in types:
job = helloWorldJob()
job.setDestination("DIRAC.Jenkins.ch")
job.setInputData(lfns)
job.setType(jobType)
jobDescription = createFile(job)
res = wmsClient.submitJob(job._toJDL(xmlFile=jobDescription))
assert res["OK"] is True, res["Message"]
jobID = res["Value"]
jobIDs.append(jobID)
res = jobMonitor.getSites()
assert res["OK"] is True, res["Message"]
assert set(res["Value"]) <= {"ANY", "DIRAC.Jenkins.ch", "Site"}
res = jobMonitor.getJobTypes()
assert res["OK"] is True, res["Message"]
assert sorted(res["Value"]) == sorted(types)
res = jobMonitor.getApplicationStates()
assert res["OK"] is True, res["Message"]
assert res["Value"] == ["app status", "Unknown"]
res = jobMonitor.getOwners()
assert res["OK"] is True, res["Message"]
res = jobMonitor.getOwnerGroup()
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobGroups()
assert res["OK"] is True, res["Message"]
resJG_empty = res["Value"]
res = jobMonitor.getJobGroups(None, datetime.datetime.utcnow())
assert res["OK"] is True, res["Message"]
resJG_olderThanNow = res["Value"]
assert resJG_empty == resJG_olderThanNow
res = jobMonitor.getJobGroups(None, datetime.datetime.utcnow() - datetime.timedelta(days=365))
assert res["OK"] is True, res["Message"]
resJG_olderThanOneYear = res["Value"]
assert set(resJG_olderThanOneYear).issubset(set(resJG_olderThanNow))
res = jobMonitor.getStates()
assert res["OK"] is True, res["Message"]
assert sorted(res["Value"]) in [[JobStatus.RECEIVED], sorted([JobStatus.RECEIVED, JobStatus.KILLED])]
res = jobMonitor.getMinorStates()
assert res["OK"] is True, res["Message"]
assert sorted(res["Value"]) in [
["Job accepted"],
sorted(["Job accepted", JobMinorStatus.RESCHEDULED]),
sorted(["Job accepted", "Marked for termination"]),
]
res = jobMonitor.getJobs()
assert res["OK"] is True, res["Message"]
assert {str(x) for x in jobIDs} <= set(res["Value"])
# res = jobMonitor.getCounters(attrList)
# self.assertTrue(res['OK'], res.get('Message'))
res = jobMonitor.getJobsSummary(jobIDs)
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobPageSummaryWeb({}, [], 0, 100)
assert res["OK"] is True, res["Message"]
res = jobStateUpdate.setJobStatusBulk(
jobID,
{
str(datetime.datetime.utcnow()): {
"Status": JobStatus.CHECKING,
"MinorStatus": "MinorStatus",
"Source": "Unknown",
}
},
False,
)
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobSummary(int(jobID))
assert res["OK"] is True, res["Message"]
assert res["Value"]["Status"] == JobStatus.CHECKING
assert res["Value"]["MinorStatus"] == "MinorStatus"
res = jobStateUpdate.setJobStatusBulk(
jobID,
{
str(datetime.datetime.utcnow() + datetime.timedelta(hours=1)): {
"Status": JobStatus.WAITING,
"MinorStatus": "MinorStatus",
"Source": "Unknown",
},
str(datetime.datetime.utcnow() + datetime.timedelta(hours=2)): {
"Status": JobStatus.MATCHED,
"MinorStatus": "MinorStatus-matched",
"Source": "Unknown",
},
},
False,
)
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobSummary(int(jobID))
assert res["OK"] is True, res["Message"]
assert res["Value"]["Status"] == JobStatus.MATCHED
assert res["Value"]["MinorStatus"] == "MinorStatus-matched"
res = jobStateUpdate.setJobsParameter({jobID: ["Whatever", "booh"]})
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobSummary(int(jobID))
assert res["OK"] is True, res["Message"]
assert res["Value"]["Status"] == JobStatus.MATCHED
assert res["Value"]["MinorStatus"] == "MinorStatus-matched"
res = jobStateUpdate.setJobAttribute(jobID, "Status", JobStatus.RUNNING)
assert res["OK"] is True, res["Message"]
res = jobMonitor.getJobSummary(int(jobID))
assert res["OK"] is True, res["Message"]
assert res["Value"]["Status"] == JobStatus.RUNNING
# delete the jobs - this will just set its status to "deleted"
wmsClient.deleteJob(jobIDs)
|
27,741 |
def test_strtobooL():
assert _strtobool("YES")
assert not _strtobool("NO")
with pytest.raises(ValueError):
_strtobool("unknown")
|
def test_strtobool():
assert _strtobool("YES")
assert not _strtobool("NO")
with pytest.raises(ValueError):
_strtobool("unknown")
|
54,861 |
def laplacian_pe(g, k, padding=False, return_eigval=False):
r"""Laplacian Positional Encoding, as introduced in
`Benchmarking Graph Neural Networks
<https://arxiv.org/abs/2003.00982>`__
This function computes the laplacian positional encodings as the
k smallest non-trivial eigenvectors.
Parameters
----------
g : DGLGraph
The input graph. Must be homogeneous.
k : int
Number of smallest non-trivial eigenvectors to use for positional encoding.
padding : bool
If padding=='false', raise exception when k>=n.
Else return (n-1) laplacian positional encodings and (k-n+1) zero encodings
(padding) when k>=n.
n is the number of nodes in the given graph.
return_eigval : bool
If return_eigval=='True', return laplacian eigenvalues together with eigenvectors.
Else return laplacian eigenvectors only.
Returns
-------
Tensor
The laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Two tensors
The eigenvalues of shape :math:`N` and
the laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph.
Example
-------
>>> import dgl
>>> g = dgl.rand_graph(6, 20)
>>> dgl.laplacian_pe(g, 2)
tensor([[ 0.7251, -0.6224],
[-0.0000, 0.5390],
[-0.4065, 0.4042],
[-0.0744, 0.0519],
[-0.4694, -0.1556],
[ 0.2881, -0.3631]])
>>> dgl.laplacian_pe(g, 6, padding=True)
tensor([[-7.2513e-01, -6.2238e-01, -1.8517e-09, 1.8517e-09, 4.3006e-01, 0.0000e+00],
[ 0.0000e+00, 5.3900e-01, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[ 4.0653e-01, 4.0425e-01, 6.4145e-09, -6.4145e-09, 2.8766e-01, 0.0000e+00],
[ 7.4425e-02, 5.1865e-02, -7.0711e-01, -7.0711e-01, -6.5471e-01, 0.0000e+00],
[ 4.6942e-01, -1.5560e-01, -7.4068e-09, 7.4068e-09, 3.3216e-01, 0.0000e+00],
[-2.8814e-01, -3.6306e-01, 7.0711e-01, 7.0711e-01, -4.3968e-01, 0.0000e+00]])
>>> dgl.laplacian_pe(g, 6, padding=True, return_eigval=True)
(tensor([0.5684, 0.7500, 1.0000, 1.0000, 1.5149, nan]),
tensor([[ 7.2513e-01, -6.2238e-01, 1.8517e-09, -1.8517e-09, -4.3006e-01, 0.0000e+00],
[-0.0000e+00, 5.3900e-01, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],
[-4.0653e-01, 4.0425e-01, -6.4145e-09, 6.4145e-09, -2.8766e-01, 0.0000e+00],
[-7.4425e-02, 5.1865e-02, 7.0711e-01, 7.0711e-01, 6.5471e-01, 0.0000e+00],
[-4.6942e-01, -1.5560e-01, 7.4068e-09, -7.4068e-09, -3.3216e-01, 0.0000e+00],
[ 2.8814e-01, -3.6306e-01, -7.0711e-01, -7.0711e-01, 4.3968e-01, 0.0000e+00]]))
"""
# check for the "k < n" constraint
n = g.num_nodes()
if not padding and n <= k:
assert "the number of eigenvectors k must be smaller than the number of nodes n, " + \
f"{k} and {n} detected."
# get laplacian matrix as I - D^-0.5 * A * D^-0.5
A = g.adj(scipy_fmt='csr') # adjacency matrix
N = sparse.diags(F.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) # D^-1/2
L = sparse.eye(g.num_nodes()) - N * A * N
# select eigenvectors with smaller eigenvalues O(n + klogk)
EigVal, EigVec = np.linalg.eig(L.toarray())
max_freqs = min(n-1,k)
kpartition_indices = np.argpartition(EigVal, max_freqs)[:max_freqs+1]
topk_eigvals = EigVal[kpartition_indices]
topk_indices = kpartition_indices[topk_eigvals.argsort()][1:]
topk_EigVec = np.real(EigVec[:, topk_indices])
eigvals = F.tensor(np.real(EigVal[topk_indices]), dtype=F.float32)
# get random flip signs
rand_sign = 2 * (np.random.rand(max_freqs) > 0.5) - 1.
PE = F.astype(F.tensor(rand_sign * topk_EigVec), F.float32)
# add paddings
if n <= k:
temp_EigVec = F.zeros([n, k-n+1], dtype=F.float32, ctx=F.context(PE))
PE = F.cat([PE, temp_EigVec], dim=1)
temp_EigVal = F.tensor(np.full(k-n+1, np.nan), F.float32)
eigvals = F.cat([eigvals, temp_EigVal], dim=0)
if return_eigval:
return eigvals, PE
return PE
|
def laplacian_pe(g, k, padding=False, return_eigval=False):
r"""Laplacian Positional Encoding, as introduced in
`Benchmarking Graph Neural Networks
<https://arxiv.org/abs/2003.00982>`__
This function computes the laplacian positional encodings as the
k smallest non-trivial eigenvectors.
Parameters
----------
g : DGLGraph
The input graph. Must be homogeneous.
k : int
Number of smallest non-trivial eigenvectors to use for positional encoding.
padding : bool
If padding=='false', raise exception when k>=n.
Else return (n-1) laplacian positional encodings and (k-n+1) zero encodings
(padding) when k>=n.
n is the number of nodes in the given graph.
return_eigval : bool
If return_eigval=='True', return laplacian eigenvalues together with eigenvectors.
Else return laplacian eigenvectors only.
Returns
-------
Tensor or (Tensor, Tensor)
Return the laplacian positional encodings of shape :math:`(N, k)`, where :math:`N` is the
number of nodes in the input graph, when :attr:`return_eigval` is False. The eigenvalues
of shape :math:`N` is additionally returned as the first element when :attr:`return_eigval`
is True.
Example
-------
>>> import dgl
>>> g = dgl.rand_graph(6, 20)
>>> dgl.laplacian_pe(g, 2)
tensor([[ 0.7251, -0.6224],
[-0.0000, 0.5390],
[-0.4065, 0.4042],
[-0.0744, 0.0519],
[-0.4694, -0.1556],
[ 0.2881, -0.3631]])
>>> dgl.laplacian_pe(g, 6, padding=True)
tensor([[-7.2513e-01, -6.2238e-01, -1.8517e-09, 1.8517e-09, 4.3006e-01, 0.0000e+00],
[ 0.0000e+00, 5.3900e-01, -0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[ 4.0653e-01, 4.0425e-01, 6.4145e-09, -6.4145e-09, 2.8766e-01, 0.0000e+00],
[ 7.4425e-02, 5.1865e-02, -7.0711e-01, -7.0711e-01, -6.5471e-01, 0.0000e+00],
[ 4.6942e-01, -1.5560e-01, -7.4068e-09, 7.4068e-09, 3.3216e-01, 0.0000e+00],
[-2.8814e-01, -3.6306e-01, 7.0711e-01, 7.0711e-01, -4.3968e-01, 0.0000e+00]])
>>> dgl.laplacian_pe(g, 6, padding=True, return_eigval=True)
(tensor([0.5684, 0.7500, 1.0000, 1.0000, 1.5149, nan]),
tensor([[ 7.2513e-01, -6.2238e-01, 1.8517e-09, -1.8517e-09, -4.3006e-01, 0.0000e+00],
[-0.0000e+00, 5.3900e-01, 0.0000e+00, -0.0000e+00, -0.0000e+00, 0.0000e+00],
[-4.0653e-01, 4.0425e-01, -6.4145e-09, 6.4145e-09, -2.8766e-01, 0.0000e+00],
[-7.4425e-02, 5.1865e-02, 7.0711e-01, 7.0711e-01, 6.5471e-01, 0.0000e+00],
[-4.6942e-01, -1.5560e-01, 7.4068e-09, -7.4068e-09, -3.3216e-01, 0.0000e+00],
[ 2.8814e-01, -3.6306e-01, -7.0711e-01, -7.0711e-01, 4.3968e-01, 0.0000e+00]]))
"""
# check for the "k < n" constraint
n = g.num_nodes()
if not padding and n <= k:
assert "the number of eigenvectors k must be smaller than the number of nodes n, " + \
f"{k} and {n} detected."
# get laplacian matrix as I - D^-0.5 * A * D^-0.5
A = g.adj(scipy_fmt='csr') # adjacency matrix
N = sparse.diags(F.asnumpy(g.in_degrees()).clip(1) ** -0.5, dtype=float) # D^-1/2
L = sparse.eye(g.num_nodes()) - N * A * N
# select eigenvectors with smaller eigenvalues O(n + klogk)
EigVal, EigVec = np.linalg.eig(L.toarray())
max_freqs = min(n-1,k)
kpartition_indices = np.argpartition(EigVal, max_freqs)[:max_freqs+1]
topk_eigvals = EigVal[kpartition_indices]
topk_indices = kpartition_indices[topk_eigvals.argsort()][1:]
topk_EigVec = np.real(EigVec[:, topk_indices])
eigvals = F.tensor(np.real(EigVal[topk_indices]), dtype=F.float32)
# get random flip signs
rand_sign = 2 * (np.random.rand(max_freqs) > 0.5) - 1.
PE = F.astype(F.tensor(rand_sign * topk_EigVec), F.float32)
# add paddings
if n <= k:
temp_EigVec = F.zeros([n, k-n+1], dtype=F.float32, ctx=F.context(PE))
PE = F.cat([PE, temp_EigVec], dim=1)
temp_EigVal = F.tensor(np.full(k-n+1, np.nan), F.float32)
eigvals = F.cat([eigvals, temp_EigVal], dim=0)
if return_eigval:
return eigvals, PE
return PE
|
38,568 |
def uniquify_point_set(
points: np.ndarray[Any, np.dtype[np.float64]], tol: float = 1e-8
) -> Tuple[
np.ndarray[Any, np.dtype[np.float64]],
np.ndarray[Any, np.dtype[np.int64]],
np.ndarray[Any, np.dtype[np.int64]],
]:
"""Uniquify a set of points so that no two sets of points are closer than a
distance tol from each other.
This function is partially overlapping by the function unique_columns_tol,
but the latter is more general, as it provides fast treatment of integer
arrays.
FIXME: It should be possible to unify the two implementations, however,
more experience is needed before doing so.
Parameters:
mat (np.ndarray, nd x n_pts): Columns to be uniquified.
tol (double, optional): Tolerance for when columns are considered equal.
Should be seen in connection with distance between the points in
the points (due to rounding errors). Defaults to 1e-8.
Returns:
np.ndarray: Unique columns.
new_2_old: Index of which points that are preserved
old_2_new: Index of the representation of old points in the reduced
list.
"""
# The implementation uses Scipy's KDTree implementation to efficiently get
# the distance between points.
num_p = points.shape[1]
# Transpose needed to comply with KDTree.
tree = KDTree(points.T)
# Get all pairs of points closer than the tolerance.
pairs = tree.query_pairs(tol, output_type="ndarray")
if pairs.size == 0:
# No points were find, we can return
return points, np.arange(num_p), np.arange(num_p)
# Process information to arive at a unique point set. This is technical,
# since we need to deal with cases where more than two points coincide
# (example: if the points p1, p2 and p3 coincide, they will be identified
# either by the pairs {(i1, i2), (i1, i3)}, by {(i1, i2), (i2, i3)},
# or by {(i1, i3), (i2, i3)}).
# Sort the index pairs of identical points for simpler identification.
# NOTE: pairs, as returned by KDTree, is a num_pairs x 2 array, thus
# sorting the pairs should be along axis 1.
pair_arr = np.sort(pairs, axis=1)
# Sort the pairs along axis=1. The lexsort will make the sorting first
# according to pair_arr[:, 0] (the point with the lowest index in each
# pair), and then according to the second index (pair_arr[:, 1]). The
# result will be a lexiograpically ordered array.
# Also note the transport back to a 2 x num_pairs array.
sorted_arr = pair_arr[np.lexsort((pair_arr[:, 1], pair_arr[:, 0]))].T
# Find points that are both in the first and second row. This will identify
# triplets identified by pairs {(i1, i2), (i2, i3)} as described above.
duplicate = np.isin(sorted_arr[0], sorted_arr[1])
# Array with duplicates of the type {(i1, i2), (i1, i3)} removed.
reduced_arr = sorted_arr[:, np.logical_not(duplicate)]
# Also identify points that are not involved in any pairs, these should be
# included in the unique set. Append these to the point array.
not_in_pairs = np.setdiff1d(np.arange(points.shape[1]), pair_arr.ravel())
reduced_arr = np.hstack((reduced_arr, np.tile(not_in_pairs, (2, 1))))
# The array can still contain pairs of type {(i1, i2), (i1, i3)} and
# {(i1, i3), (i1, i3)}. These can be identified by a unique on the first
# row.
ia = np.unique(reduced_arr[0])
# Make a mapping from all points to the reduced set.
ib = np.arange(num_p)
_, inv_map = np.unique(reduced_arr[0], return_inverse=True)
ib[reduced_arr[0]] = inv_map
ib[reduced_arr[1]] = ib[reduced_arr[0]]
# Uniquify points.
upoints = points[:, ia]
# Done.
return upoints, ia, ib
|
def uniquify_point_set(
points: np.ndarray[Any, np.dtype[np.float64]], tol: float = 1e-8
) -> Tuple[
np.ndarray[Any, np.dtype[np.float64]],
np.ndarray[Any, np.dtype[np.int64]],
np.ndarray[Any, np.dtype[np.int64]],
]:
"""Uniquify a set of points so that no two sets of points are closer than a
distance tol from each other.
This function partially overlaps the function unique_columns_tol,
but the latter is more general, as it provides fast treatment of integer
arrays.
FIXME: It should be possible to unify the two implementations, however,
more experience is needed before doing so.
Parameters:
mat (np.ndarray, nd x n_pts): Columns to be uniquified.
tol (double, optional): Tolerance for when columns are considered equal.
Should be seen in connection with distance between the points in
the points (due to rounding errors). Defaults to 1e-8.
Returns:
np.ndarray: Unique columns.
new_2_old: Index of which points that are preserved
old_2_new: Index of the representation of old points in the reduced
list.
"""
# The implementation uses Scipy's KDTree implementation to efficiently get
# the distance between points.
num_p = points.shape[1]
# Transpose needed to comply with KDTree.
tree = KDTree(points.T)
# Get all pairs of points closer than the tolerance.
pairs = tree.query_pairs(tol, output_type="ndarray")
if pairs.size == 0:
# No points were find, we can return
return points, np.arange(num_p), np.arange(num_p)
# Process information to arive at a unique point set. This is technical,
# since we need to deal with cases where more than two points coincide
# (example: if the points p1, p2 and p3 coincide, they will be identified
# either by the pairs {(i1, i2), (i1, i3)}, by {(i1, i2), (i2, i3)},
# or by {(i1, i3), (i2, i3)}).
# Sort the index pairs of identical points for simpler identification.
# NOTE: pairs, as returned by KDTree, is a num_pairs x 2 array, thus
# sorting the pairs should be along axis 1.
pair_arr = np.sort(pairs, axis=1)
# Sort the pairs along axis=1. The lexsort will make the sorting first
# according to pair_arr[:, 0] (the point with the lowest index in each
# pair), and then according to the second index (pair_arr[:, 1]). The
# result will be a lexiograpically ordered array.
# Also note the transport back to a 2 x num_pairs array.
sorted_arr = pair_arr[np.lexsort((pair_arr[:, 1], pair_arr[:, 0]))].T
# Find points that are both in the first and second row. This will identify
# triplets identified by pairs {(i1, i2), (i2, i3)} as described above.
duplicate = np.isin(sorted_arr[0], sorted_arr[1])
# Array with duplicates of the type {(i1, i2), (i1, i3)} removed.
reduced_arr = sorted_arr[:, np.logical_not(duplicate)]
# Also identify points that are not involved in any pairs, these should be
# included in the unique set. Append these to the point array.
not_in_pairs = np.setdiff1d(np.arange(points.shape[1]), pair_arr.ravel())
reduced_arr = np.hstack((reduced_arr, np.tile(not_in_pairs, (2, 1))))
# The array can still contain pairs of type {(i1, i2), (i1, i3)} and
# {(i1, i3), (i1, i3)}. These can be identified by a unique on the first
# row.
ia = np.unique(reduced_arr[0])
# Make a mapping from all points to the reduced set.
ib = np.arange(num_p)
_, inv_map = np.unique(reduced_arr[0], return_inverse=True)
ib[reduced_arr[0]] = inv_map
ib[reduced_arr[1]] = ib[reduced_arr[0]]
# Uniquify points.
upoints = points[:, ia]
# Done.
return upoints, ia, ib
|
5,345 |
def certificate_managed(
name, days_remaining=90, append_certs=None, managed_private_key=None, **kwargs
):
"""
Manage a Certificate
name
Path to the certificate
days_remaining : 90
Recreate the certificate if the number of days remaining on it
are less than this number. The value should be less than
``days_valid``, otherwise the certificate will be recreated
every time the state is run. A value of 0 disables automatic
renewal.
append_certs:
A list of certificates to be appended to the managed file.
They must be valid PEM files, otherwise an error will be thrown.
managed_private_key:
Has no effect since v2016.11 and will be removed in Salt Aluminium.
Use a separate x509.private_key_managed call instead.
kwargs:
Any arguments supported by :py:func:`x509.create_certificate
<salt.modules.x509.create_certificate>` or :py:func:`file.managed
<salt.states.file.managed>` are supported.
not_before:
Initial validity date for the certificate. This date must be specified
in the format '%Y-%m-%d %H:%M:%S'.
.. versionadded:: 3001
not_after:
Final validity date for the certificate. This date must be specified in
the format '%Y-%m-%d %H:%M:%S'.
.. versionadded:: 3001
Examples:
.. code-block:: yaml
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- CN: ca.example.com
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3650
- days_remaining: 0
- backup: True
.. code-block:: yaml
/etc/ssl/www.crt:
x509.certificate_managed:
- ca_server: pki
- signing_policy: www
- public_key: /etc/ssl/www.key
- CN: www.example.com
- days_valid: 90
- days_remaining: 30
- backup: True
"""
if "path" in kwargs:
name = kwargs.pop("path")
if "ca_server" in kwargs and "signing_policy" not in kwargs:
raise salt.exceptions.SaltInvocationError(
"signing_policy must be specified if ca_server is."
)
if (
"public_key" not in kwargs
and "signing_private_key" not in kwargs
and "csr" not in kwargs
):
raise salt.exceptions.SaltInvocationError(
"public_key or signing_private_key must be specified."
)
if managed_private_key:
salt.utils.versions.warn_until(
"Aluminium",
"Passing 'managed_private_key' to x509.certificate_managed has no effect and "
"will be removed Salt Aluminium. Use a separate x509.private_key_managed call instead.",
)
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
is_valid, invalid_reason, current_cert_info = _certificate_is_valid(
name, days_remaining, append_certs, **kwargs
)
if is_valid:
file_args, extra_args = _get_file_args(name, **kwargs)
return _certificate_file_managed(ret, file_args)
if __opts__["test"]:
file_args, extra_args = _get_file_args(name, **kwargs)
# Use empty contents for file.managed in test mode.
# We don't want generate a new certificate, even in memory,
# for security reasons.
# Using an empty string instead of omitting it will at least
# show the old certificate in the diff.
file_args["contents"] = ""
ret = _certificate_file_managed(ret, file_args)
ret["result"] = None
ret["comment"] = "Certificate {} will be created".format(name)
ret["changes"]["Status"] = {
"Old": invalid_reason,
"New": "Certificate will be valid and up to date",
}
return ret
contents = __salt__["x509.create_certificate"](text=True, **kwargs)
# Check the module actually returned a cert and not an error message as a string
try:
__salt__["x509.read_certificate"](contents)
except salt.exceptions.SaltInvocationError as e:
ret["result"] = False
ret[
"comment"
] = "An error occurred creating the certificate {}. The result returned from x509.create_certificate is not a valid PEM file:\n{}".format(
name, str(e)
)
return ret
if not append_certs:
append_certs = []
for append_file in append_certs:
try:
append_file_contents = __salt__["x509.get_pem_entry"](
append_file, pem_type="CERTIFICATE"
)
contents += append_file_contents
except salt.exceptions.SaltInvocationError as e:
ret["result"] = False
ret[
"comment"
] = "{} is not a valid certificate file, cannot append it to the certificate {}.\nThe error returned by the x509 module was:\n{}".format(
append_file, name, str(e)
)
return ret
file_args, extra_args = _get_file_args(name, **kwargs)
file_args["contents"] = contents
ret = _certificate_file_managed(ret, file_args)
if ret["result"]:
ret["changes"]["Certificate"] = {
"Old": current_cert_info,
"New": __salt__["x509.read_certificate"](certificate=name),
}
ret["changes"]["Status"] = {
"Old": invalid_reason,
"New": "Certificate is valid and up to date",
}
return ret
|
def certificate_managed(
name, days_remaining=90, append_certs=None, managed_private_key=None, **kwargs
):
"""
Manage a Certificate
name
Path to the certificate
days_remaining : 90
Recreate the certificate if the number of days remaining on it
are less than this number. The value should be less than
``days_valid``, otherwise the certificate will be recreated
every time the state is run. A value of 0 disables automatic
renewal.
append_certs:
A list of certificates to be appended to the managed file.
They must be valid PEM files, otherwise an error will be thrown.
managed_private_key:
Has no effect since v2016.11 and will be removed in Salt Aluminium.
Use a separate x509.private_key_managed call instead.
kwargs:
Any arguments supported by :py:func:`x509.create_certificate
<salt.modules.x509.create_certificate>` or :py:func:`file.managed
<salt.states.file.managed>` are supported.
not_before:
Initial validity date for the certificate. This date must be specified
in the format '%Y-%m-%d %H:%M:%S'.
.. versionadded:: 3001
not_after:
Final validity date for the certificate. This date must be specified in
the format '%Y-%m-%d %H:%M:%S'.
.. versionadded:: 3001
Examples:
.. code-block:: yaml
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- CN: ca.example.com
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3650
- days_remaining: 0
- backup: True
.. code-block:: yaml
/etc/ssl/www.crt:
x509.certificate_managed:
- ca_server: pki
- signing_policy: www
- public_key: /etc/ssl/www.key
- CN: www.example.com
- days_valid: 90
- days_remaining: 30
- backup: True
"""
if "path" in kwargs:
name = kwargs.pop("path")
if "ca_server" in kwargs and "signing_policy" not in kwargs:
raise salt.exceptions.SaltInvocationError(
"signing_policy must be specified if ca_server is."
)
if (
"public_key" not in kwargs
and "signing_private_key" not in kwargs
and "csr" not in kwargs
):
raise salt.exceptions.SaltInvocationError(
"public_key, signing_private_key, or csr must be specified."
)
if managed_private_key:
salt.utils.versions.warn_until(
"Aluminium",
"Passing 'managed_private_key' to x509.certificate_managed has no effect and "
"will be removed Salt Aluminium. Use a separate x509.private_key_managed call instead.",
)
ret = {"name": name, "result": False, "changes": {}, "comment": ""}
is_valid, invalid_reason, current_cert_info = _certificate_is_valid(
name, days_remaining, append_certs, **kwargs
)
if is_valid:
file_args, extra_args = _get_file_args(name, **kwargs)
return _certificate_file_managed(ret, file_args)
if __opts__["test"]:
file_args, extra_args = _get_file_args(name, **kwargs)
# Use empty contents for file.managed in test mode.
# We don't want generate a new certificate, even in memory,
# for security reasons.
# Using an empty string instead of omitting it will at least
# show the old certificate in the diff.
file_args["contents"] = ""
ret = _certificate_file_managed(ret, file_args)
ret["result"] = None
ret["comment"] = "Certificate {} will be created".format(name)
ret["changes"]["Status"] = {
"Old": invalid_reason,
"New": "Certificate will be valid and up to date",
}
return ret
contents = __salt__["x509.create_certificate"](text=True, **kwargs)
# Check the module actually returned a cert and not an error message as a string
try:
__salt__["x509.read_certificate"](contents)
except salt.exceptions.SaltInvocationError as e:
ret["result"] = False
ret[
"comment"
] = "An error occurred creating the certificate {}. The result returned from x509.create_certificate is not a valid PEM file:\n{}".format(
name, str(e)
)
return ret
if not append_certs:
append_certs = []
for append_file in append_certs:
try:
append_file_contents = __salt__["x509.get_pem_entry"](
append_file, pem_type="CERTIFICATE"
)
contents += append_file_contents
except salt.exceptions.SaltInvocationError as e:
ret["result"] = False
ret[
"comment"
] = "{} is not a valid certificate file, cannot append it to the certificate {}.\nThe error returned by the x509 module was:\n{}".format(
append_file, name, str(e)
)
return ret
file_args, extra_args = _get_file_args(name, **kwargs)
file_args["contents"] = contents
ret = _certificate_file_managed(ret, file_args)
if ret["result"]:
ret["changes"]["Certificate"] = {
"Old": current_cert_info,
"New": __salt__["x509.read_certificate"](certificate=name),
}
ret["changes"]["Status"] = {
"Old": invalid_reason,
"New": "Certificate is valid and up to date",
}
return ret
|
14,028 |
def covered_by(data, other):
if compat.USE_PYGEOS:
return _binary_method("covers", data, other)
else:
raise NotImplementedError('covered_by is only implemented for pygeos, not shapely')
|
def covered_by(data, other):
if compat.USE_PYGEOS:
return _binary_method("covered_by", data, other)
else:
raise NotImplementedError('covered_by is only implemented for pygeos, not shapely')
|
31,284 |
def main():
try:
args = demisto.args()
last_seen_gte = args.get('from')
last_seen_lte = args.get('to')
limit = args.get('limit', '100')
get_endpoints_args = {'limit': limit}
get_endpoints_args = {}
if last_seen_gte:
get_endpoints_args['last_seen_gte'] = last_seen_gte
if last_seen_lte and last_seen_lte != '0001-01-01T00:00:00Z':
get_endpoints_args['last_seen_lte'] = last_seen_lte
res = demisto.executeCommand('xdr-get-endpoints', get_endpoints_args)
if isError(res[0]):
return_error(f'Error occurred while trying to get XDR endpoints: {res[0].get("Contents")}')
endpoints = res[0]['Contents']
connected_endpoints = 0
for endpoint in endpoints:
if endpoint.get('endpoint_status') == 'CONNECTED':
connected_endpoints = connected_endpoints + 1
return_outputs(str(connected_endpoints))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute XDRConnectedEndpoints. Error: {str(ex)}')
|
def main():
try:
args = demisto.args()
last_seen_gte = args.get('from')
last_seen_lte = args.get('to')
limit = args.get('limit', '100')
get_endpoints_args = {'limit': limit}
get_endpoints_args = {}
if last_seen_gte:
get_endpoints_args['last_seen_gte'] = last_seen_gte
if last_seen_lte and last_seen_lte != '0001-01-01T00:00:00Z':
get_endpoints_args['last_seen_lte'] = last_seen_lte
res = demisto.executeCommand('xdr-get-endpoints', get_endpoints_args)
if isError(res[0]):
return_error(f'Error occurred while trying to get XDR endpoints: {get_error(res[0])}')
endpoints = res[0]['Contents']
connected_endpoints = 0
for endpoint in endpoints:
if endpoint.get('endpoint_status') == 'CONNECTED':
connected_endpoints = connected_endpoints + 1
return_outputs(str(connected_endpoints))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute XDRConnectedEndpoints. Error: {str(ex)}')
|
34,625 |
def set_train_in_chunks_arguments(parser: argparse.ArgumentParser) -> None:
"""Set the command line arguments for the command 'rasa train-in-chunks'.
Args:
parser: the parser to add the arguments to
"""
add_data_param(parser)
add_config_param(parser)
add_domain_param(parser)
add_out_param(parser, help_text="Directory where your models should be stored.")
add_augmentation_param(parser)
add_num_threads_param(parser)
add_model_name_param(parser)
add_force_param(parser)
|
def set_train_in_chunks_arguments(parser: argparse.ArgumentParser) -> None:
"""Sets the command line arguments for the command 'rasa train-in-chunks'.
Args:
parser: the parser to add the arguments to
"""
add_data_param(parser)
add_config_param(parser)
add_domain_param(parser)
add_out_param(parser, help_text="Directory where your models should be stored.")
add_augmentation_param(parser)
add_num_threads_param(parser)
add_model_name_param(parser)
add_force_param(parser)
|
37,244 |
def pulse_drawer(data: Union[SamplePulse, ScheduleComponent],
dt: int = 1,
style: Union[PulseStyle, SchedStyle] = None,
filename: str = None,
interp_method: Callable = None,
scale: float = None,
channel_scales: Dict[Channel, float] = None,
channels_to_plot: List[Channel] = None,
plot_all: bool = False,
plot_range: Tuple[Union[int, float], Union[int, float]] = None,
interactive: bool = False,
table: bool = True,
label: bool = False,
framechange: bool = True,
channels: List[Channel] = None,
scaling: float = None,
show_framechange_channels: bool = True,
draw_title: bool = False):
"""Plot the interpolated envelope of pulse and schedule.
Args:
data: Pulse or schedule object to plot.
dt: Time interval of samples. Pulses are visualized in the unit of
cycle time if not provided.
style: A style sheet to configure plot appearance.
See :mod:`~qiskit.visualization.pulse.qcstyle` for more information.
filename: Name required to save pulse image. The drawer just returns
`matplot.Figure` object if not provided.
interp_method: Interpolation function. Interpolation is disabled in default.
See :mod:`~qiskit.visualization.pulse.interpolation` for more information.
scale: Scaling of waveform amplitude. Pulses are automatically
scaled channel by channel if not provided.
channel_scales: Dictionary of scale factor for specific channels.
Scale of channels not specified here is overwritten by `scale`.
channels_to_plot: Deprecated, see `channels`.
plot_all: When set `True` plot empty channels.
plot_range: A tuple of time range to plot.
interactive: When set `True` show the circuit in a new window.
This depends on the matplotlib backend being used supporting this.
table: When set `True` draw event table for supported commands.
label: When set `True` draw label for individual instructions.
framechange: When set `True` draw framechange indicators.
scaling: Deprecated, see `scale`.
channels: A list of channel names to plot.
All non-empty channels are shown if not provided.
show_framechange_channels: When set `True` plot channels
with only framechange instructions.
draw_title: When set 'True' plot will have title.
Returns:
matplotlib.figure.Figure: A matplotlib figure object for the pulse envelope.
Example:
This example shows how to visualize your pulse schedule.
Pulse names are added to the plot, unimportant channels are removed
and the time window is truncated to draw out U3 pulse sequence of interest.
.. jupyter-execute::
import numpy as np
import qiskit
from qiskit import pulse
from qiskit.test.mock.backends.almaden import FakeAlmaden
inst_map = FakeAlmaden().defaults().instruction_schedule_map
sched = pulse.Schedule()
sched += inst_map.get('u3', 0, np.pi, 0, np.pi)
sched += inst_map.get('measure', list(range(20))) << sched.duration
channels = [pulse.DriveChannel(0), pulse.MeasureChannel(0)]
scales = {pulse.DriveChannel(0): 10}
qiskit.visualization.pulse_drawer(sched,
channels=channels,
plot_range=(0, 1000),
label=True,
channel_scales=scales)
You are also able to call visualization module from the instance method::
sched.draw(channels=channels, plot_range=(0, 1000), label=True, channel_scales=scales)
To customize the format of the schedule plot, you can setup your style sheet.
.. jupyter-execute::
import numpy as np
import qiskit
from qiskit import pulse
from qiskit.test.mock.backends.almaden import FakeAlmaden
inst_map = FakeAlmaden().defaults().instruction_schedule_map
sched = pulse.Schedule()
sched += inst_map.get('u3', 0, np.pi, 0, np.pi)
sched += inst_map.get('measure', list(range(20))) << sched.duration
# setup style sheet
my_style = qiskit.visualization.SchedStyle(
figsize = (10, 5),
bg_color='w',
d_ch_color = ['#32cd32', '#556b2f'])
channels = [pulse.DriveChannel(0), pulse.MeasureChannel(0)]
scales = {pulse.DriveChannel(0): 10}
qiskit.visualization.pulse_drawer(sched, style=my_style,
channels=channels,
plot_range=(0, 1000),
label=True,
channel_scales=scales)
Raises:
VisualizationError: when invalid data is given
ImportError: when matplotlib is not installed
"""
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
if channels_to_plot:
warnings.warn('The parameter "channels_to_plot" is being replaced by "channels"',
DeprecationWarning, 3)
channels = channels_to_plot
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed.')
if isinstance(data, SamplePulse):
drawer = _matplotlib.SamplePulseDrawer(style=style)
image = drawer.draw(data, dt=dt, interp_method=interp_method, scale=scale,
draw_title=draw_title)
elif isinstance(data, (Schedule, Instruction)):
drawer = _matplotlib.ScheduleDrawer(style=style)
image = drawer.draw(data, dt=dt, interp_method=interp_method, scale=scale,
channel_scales=channel_scales, plot_range=plot_range,
plot_all=plot_all, table=table, label=label,
framechange=framechange, channels=channels,
show_framechange_channels=show_framechange_channels,
draw_title=draw_title)
else:
raise VisualizationError('This data cannot be visualized.')
if filename:
image.savefig(filename, dpi=drawer.style.dpi, bbox_inches='tight')
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
_matplotlib.plt.close(image)
if image and interactive:
image.show()
return image
|
def pulse_drawer(data: Union[SamplePulse, ScheduleComponent],
dt: int = 1,
style: Union[PulseStyle, SchedStyle] = None,
filename: str = None,
interp_method: Callable = None,
scale: float = None,
channel_scales: Dict[Channel, float] = None,
channels_to_plot: List[Channel] = None,
plot_all: bool = False,
plot_range: Tuple[Union[int, float], Union[int, float]] = None,
interactive: bool = False,
table: bool = True,
label: bool = False,
framechange: bool = True,
channels: List[Channel] = None,
scaling: float = None,
show_framechange_channels: bool = True,
draw_title: bool = False):
"""Plot the interpolated envelope of pulse and schedule.
Args:
data: Pulse or schedule object to plot.
dt: Time interval of samples. Pulses are visualized in the unit of
cycle time if not provided.
style: A style sheet to configure plot appearance.
See :mod:`~qiskit.visualization.pulse.qcstyle` for more information.
filename: Name required to save pulse image. The drawer just returns
`matplot.Figure` object if not provided.
interp_method: Interpolation function. Interpolation is disabled in default.
See :mod:`~qiskit.visualization.pulse.interpolation` for more information.
scale: Scaling of waveform amplitude. Pulses are automatically
scaled channel by channel if not provided.
channel_scales: Dictionary of scale factor for specific channels.
Scale of channels not specified here is overwritten by `scale`.
channels_to_plot: Deprecated, see `channels`.
plot_all: When set `True` plot empty channels.
plot_range: A tuple of time range to plot.
interactive: When set `True` show the circuit in a new window.
This depends on the matplotlib backend being used supporting this.
table: When set `True` draw event table for supported commands.
label: When set `True` draw label for individual instructions.
framechange: When set `True` draw framechange indicators.
scaling: Deprecated, see `scale`.
channels: A list of channel names to plot.
All non-empty channels are shown if not provided.
show_framechange_channels: When set `True` plot channels
with only framechange instructions.
draw_title: Add a title to the plot when set ``True``.
Returns:
matplotlib.figure.Figure: A matplotlib figure object for the pulse envelope.
Example:
This example shows how to visualize your pulse schedule.
Pulse names are added to the plot, unimportant channels are removed
and the time window is truncated to draw out U3 pulse sequence of interest.
.. jupyter-execute::
import numpy as np
import qiskit
from qiskit import pulse
from qiskit.test.mock.backends.almaden import FakeAlmaden
inst_map = FakeAlmaden().defaults().instruction_schedule_map
sched = pulse.Schedule()
sched += inst_map.get('u3', 0, np.pi, 0, np.pi)
sched += inst_map.get('measure', list(range(20))) << sched.duration
channels = [pulse.DriveChannel(0), pulse.MeasureChannel(0)]
scales = {pulse.DriveChannel(0): 10}
qiskit.visualization.pulse_drawer(sched,
channels=channels,
plot_range=(0, 1000),
label=True,
channel_scales=scales)
You are also able to call visualization module from the instance method::
sched.draw(channels=channels, plot_range=(0, 1000), label=True, channel_scales=scales)
To customize the format of the schedule plot, you can setup your style sheet.
.. jupyter-execute::
import numpy as np
import qiskit
from qiskit import pulse
from qiskit.test.mock.backends.almaden import FakeAlmaden
inst_map = FakeAlmaden().defaults().instruction_schedule_map
sched = pulse.Schedule()
sched += inst_map.get('u3', 0, np.pi, 0, np.pi)
sched += inst_map.get('measure', list(range(20))) << sched.duration
# setup style sheet
my_style = qiskit.visualization.SchedStyle(
figsize = (10, 5),
bg_color='w',
d_ch_color = ['#32cd32', '#556b2f'])
channels = [pulse.DriveChannel(0), pulse.MeasureChannel(0)]
scales = {pulse.DriveChannel(0): 10}
qiskit.visualization.pulse_drawer(sched, style=my_style,
channels=channels,
plot_range=(0, 1000),
label=True,
channel_scales=scales)
Raises:
VisualizationError: when invalid data is given
ImportError: when matplotlib is not installed
"""
if scaling is not None:
warnings.warn('The parameter "scaling" is being replaced by "scale"',
DeprecationWarning, 3)
scale = scaling
if channels_to_plot:
warnings.warn('The parameter "channels_to_plot" is being replaced by "channels"',
DeprecationWarning, 3)
channels = channels_to_plot
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed.')
if isinstance(data, SamplePulse):
drawer = _matplotlib.SamplePulseDrawer(style=style)
image = drawer.draw(data, dt=dt, interp_method=interp_method, scale=scale,
draw_title=draw_title)
elif isinstance(data, (Schedule, Instruction)):
drawer = _matplotlib.ScheduleDrawer(style=style)
image = drawer.draw(data, dt=dt, interp_method=interp_method, scale=scale,
channel_scales=channel_scales, plot_range=plot_range,
plot_all=plot_all, table=table, label=label,
framechange=framechange, channels=channels,
show_framechange_channels=show_framechange_channels,
draw_title=draw_title)
else:
raise VisualizationError('This data cannot be visualized.')
if filename:
image.savefig(filename, dpi=drawer.style.dpi, bbox_inches='tight')
if get_backend() in ['module://ipykernel.pylab.backend_inline',
'nbAgg']:
_matplotlib.plt.close(image)
if image and interactive:
image.show()
return image
|
58,535 |
def execution_plan(workers: WorkerSet,
config: TrainerConfigDict) -> LocalIterator[dict]:
"""Execution plan of the MARWIL/BC algorithm. Defines the distributed
dataflow.
Args:
workers (WorkerSet): The WorkerSet for training the Polic(y/ies)
of the Trainer.
config (TrainerConfigDict): The trainer's configuration dict.
Returns:
LocalIterator[dict]: A local iterator over training metrics.
"""
rollouts = ParallelRollouts(workers, mode="bulk_sync")
replay_buffer = SimpleReplayBuffer(config["replay_buffer_size"])
store_op = rollouts \
.for_each(StoreToReplayBuffer(local_buffer=replay_buffer))
replay_op = Replay(local_buffer=replay_buffer) \
.combine(
ConcatBatches(
min_batch_size=config["train_batch_size"],
count_steps_by=config["multiagent"]["count_steps_by"],
)) \
.for_each(TrainOneStep(workers))
train_op = Concurrently(
[store_op, replay_op], mode="round_robin", output_indexes=[1])
return StandardMetricsReporting(train_op, workers, config)
|
def execution_plan(workers: WorkerSet,
config: TrainerConfigDict) -> LocalIterator[dict]:
"""Execution plan of the MARWIL/BC algorithm.
Defines the distributed dataflow.
Args:
workers (WorkerSet): The WorkerSet for training the Polic(y/ies)
of the Trainer.
config (TrainerConfigDict): The trainer's configuration dict.
Returns:
LocalIterator[dict]: A local iterator over training metrics.
"""
rollouts = ParallelRollouts(workers, mode="bulk_sync")
replay_buffer = SimpleReplayBuffer(config["replay_buffer_size"])
store_op = rollouts \
.for_each(StoreToReplayBuffer(local_buffer=replay_buffer))
replay_op = Replay(local_buffer=replay_buffer) \
.combine(
ConcatBatches(
min_batch_size=config["train_batch_size"],
count_steps_by=config["multiagent"]["count_steps_by"],
)) \
.for_each(TrainOneStep(workers))
train_op = Concurrently(
[store_op, replay_op], mode="round_robin", output_indexes=[1])
return StandardMetricsReporting(train_op, workers, config)
|
35,282 |
def non_negative_parafac_hals(tensor, rank, n_iter_max=100, init="svd", svd='numpy_svd', tol=1e-7,
sparsity_coefficients=[], fixed_modes=[],hals='approx',
verbose=False, return_errors=False):
"""
Non-negative CP decomposition
Uses HALS which updates each factor columnwise, fixing every other columns, see [1]_
Parameters
----------
tensor : ndarray
rank : int
number of components
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients: array of float (of length the number of modes)
The sparsity coefficients on each factor.
If set to None, the algorithm is computed without sparsity
Default: [],
fixed_modes: array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: []
verbose: boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors: boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
toc: list
A list with accumulated time at each iterations
fixed_modes = [], normalize = [False, False, False],
verbose = True, return_errors = False)
References
----------
[1]: N. Gillis and F. Glineur, Accelerated Multiplicative Updates and
Hierarchical ALS Algorithms for Nonnegative Matrix Factorization,
Neural Computation 24 (4): 1085-1105, 2012.
"""
weights, factors = initialize_nn_cp(tensor, rank, init=init, svd=svd,
random_state=None,
normalize_factors=False)
norm_tensor = tl.norm(tensor, 2)
nb_modes = len(tensor.shape)
if sparsity_coefficients == None or len(sparsity_coefficients) != nb_modes:
#print(
# "Irrelevant number of sparsity coefficient (different from the number of modes), they have been set to None.")
sparsity_coefficients = [None for i in range(nb_modes)]
if fixed_modes == None:
fixed_modes = []
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
# initialisation - declare local varaibles
rec_errors = []
# Iteratation
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes_list:
# Computing Hadamard of cross-products
pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor))
for i, factor in enumerate(factors):
if i != mode:
pseudo_inverse = pseudo_inverse*tl.dot(tl.transpose(factor), factor)
if not iteration and weights is not None:
# Take into account init weights
mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
else:
mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)
# Call the hals resolution with nnls, optimizing the current mode
if hals=='approx':
factors[mode] = tl.transpose(
hals_nnls_approx(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=100,sparsity_coefficient=sparsity_coefficients[mode])[0])
elif hals=='exact':
factors[mode] = tl.transpose(
hals_nnls_exact(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=5000)[0])
if tol:
factors_norm = cp_norm((weights, factors))
iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights)
rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print('reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
cp_tensor = CPTensor((weights, factors))
if return_errors:
return cp_tensor, rec_errors
else:
return cp_tensor
|
def non_negative_parafac_hals(tensor, rank, n_iter_max=100, init="svd", svd='numpy_svd', tol=1e-7,
sparsity_coefficients=[], fixed_modes=[],hals='approx',
verbose=False, return_errors=False):
"""
Non-negative CP decomposition
Uses HALS which updates each factor columnwise, fixing every other columns, see [1]_
Parameters
----------
tensor : ndarray
rank : int
number of components
n_iter_max : int
maximum number of iteration
init : {'svd', 'random'}, optional
svd : str, default is 'numpy_svd'
function to use to compute the SVD, acceptable values in tensorly.SVD_FUNS
tol : float, optional
tolerance: the algorithm stops when the variation in
the reconstruction error is less than the tolerance
Default: 1e-8
sparsity_coefficients: array of float (of length the number of modes)
The sparsity coefficients on each factor.
If set to None, the algorithm is computed without sparsity
Default: [],
fixed_modes: array of integers (between 0 and the number of modes)
Has to be set not to update a factor, 0 and 1 for U and V respectively
Default: []
verbose: boolean
Indicates whether the algorithm prints the successive
reconstruction errors or not
Default: False
return_errors: boolean
Indicates whether the algorithm should return all reconstruction errors
and computation time of each iteration or not
Default: False
Returns
-------
factors : ndarray list
list of positive factors of the CP decomposition
element `i` is of shape ``(tensor.shape[i], rank)``
errors: list
A list of reconstruction errors at each iteration of the algorithm.
toc: list
A list with accumulated time at each iterations
fixed_modes = [], normalize = [False, False, False],
verbose = True, return_errors = False)
References
----------
[1]: N. Gillis and F. Glineur, Accelerated Multiplicative Updates and
Hierarchical ALS Algorithms for Nonnegative Matrix Factorization,
Neural Computation 24 (4): 1085-1105, 2012.
"""
weights, factors = initialize_nn_cp(tensor, rank, init=init, svd=svd,
random_state=None,
normalize_factors=False)
norm_tensor = tl.norm(tensor, 2)
nb_modes = len(tensor.shape)
if sparsity_coefficients == None or len(sparsity_coefficients) != nb_modes:
#print(
# "Irrelevant number of sparsity coefficient (different from the number of modes), they have been set to None.")
sparsity_coefficients = [None for i in range(nb_modes)]
if fixed_modes is None:
fixed_modes = []
# Avoiding errors
for fixed_value in fixed_modes:
sparsity_coefficients[fixed_value] = None
# Generating the mode update sequence
modes_list = [mode for mode in range(tl.ndim(tensor)) if mode not in fixed_modes]
# initialisation - declare local varaibles
rec_errors = []
# Iteratation
for iteration in range(n_iter_max):
# One pass of least squares on each updated mode
for mode in modes_list:
# Computing Hadamard of cross-products
pseudo_inverse = tl.tensor(tl.ones((rank, rank)), **tl.context(tensor))
for i, factor in enumerate(factors):
if i != mode:
pseudo_inverse = pseudo_inverse*tl.dot(tl.transpose(factor), factor)
if not iteration and weights is not None:
# Take into account init weights
mttkrp = unfolding_dot_khatri_rao(tensor, (weights, factors), mode)
else:
mttkrp = unfolding_dot_khatri_rao(tensor, (None, factors), mode)
# Call the hals resolution with nnls, optimizing the current mode
if hals=='approx':
factors[mode] = tl.transpose(
hals_nnls_approx(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=100,sparsity_coefficient=sparsity_coefficients[mode])[0])
elif hals=='exact':
factors[mode] = tl.transpose(
hals_nnls_exact(tl.transpose(mttkrp), pseudo_inverse, tl.transpose(factors[mode]),
maxiter=5000)[0])
if tol:
factors_norm = cp_norm((weights, factors))
iprod = tl.sum(tl.sum(mttkrp*factor, axis=0)*weights)
rec_error = tl.sqrt(tl.abs(norm_tensor**2 + factors_norm**2 - 2*iprod)) / norm_tensor
rec_errors.append(rec_error)
if iteration > 1:
if verbose:
print('reconstruction error={}, variation={}.'.format(
rec_errors[-1], rec_errors[-2] - rec_errors[-1]))
if tol and abs(rec_errors[-2] - rec_errors[-1]) < tol:
if verbose:
print('converged in {} iterations.'.format(iteration))
break
cp_tensor = CPTensor((weights, factors))
if return_errors:
return cp_tensor, rec_errors
else:
return cp_tensor
|
38,733 |
def _make_json_friendly(obj):
'''Fallback type converter if json dump(s) raises a TypeError.'''
_valid_keys = {str, int, float, bool, type(None)}
if isinstance(obj, MutableMapping):
if not all((any(isinstance(k, t) for t in _valid_keys) for k in obj)):
newobj = type(obj)()
for k, v in obj.items():
newobj[util.toalphanum(str(k))] = v
return newobj
return None
|
def _make_json_friendly(obj):
'''Fallback type converter if json dump(s) raises a TypeError.'''
_valid_keys = {str, int, float, bool, type(None)}
if isinstance(obj, MutableMapping):
if not all(isinstance(k, _valid_keys) for k in obj):
newobj = type(obj)()
for k, v in obj.items():
newobj[util.toalphanum(str(k))] = v
return newobj
return None
|
39,187 |
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file does not
exist as a stand alone file.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raising an error when the library
file is not found.
Returns:
bool:
False if the library file is not found.
True if the library file is found AND the library loaded without failure.
Raises:
Exception:
Exception thrown by the underlying `ctypes.CDLL`.
Expected case is `OSError` when a dynamic dependency is not found.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
|
def _load_lib(lib: str) -> bool:
"""Load extension module
Note:
In case `torchaudio` is deployed with `pex` format, the library file does not
exist as a stand alone file.
In this case, we expect that `libtorchaudio` is available somewhere
in the search path of dynamic loading mechanism, so that importing
`_torchaudio` will have library loader find and load `libtorchaudio`.
This is the reason why the function should not raise an error when the library
file is not found.
Returns:
bool:
False if the library file is not found.
True if the library file is found AND the library loaded without failure.
Raises:
Exception:
Exception thrown by the underlying `ctypes.CDLL`.
Expected case is `OSError` when a dynamic dependency is not found.
"""
path = _get_lib_path(lib)
if not path.exists():
return False
torch.ops.load_library(path)
torch.classes.load_library(path)
return True
|
44,859 |
def test_filter_runs_by_user_id():
runs = [
Run(
run_info=RunInfo(
run_uuid="a",
run_id="a",
experiment_id=0,
user_id="user-id",
status=RunStatus.to_string(RunStatus.FINISHED),
start_time=1,
end_time=1,
lifecycle_stage=LifecycleStage.ACTIVE,
),
run_data=RunData(),
),
Run(
run_info=RunInfo(
run_uuid="b",
run_id="b",
experiment_id=0,
user_id="user-id2",
status=RunStatus.to_string(RunStatus.FINISHED),
start_time=1,
end_time=1,
lifecycle_stage=LifecycleStage.ACTIVE,
),
run_data=RunData(),
),
]
assert SearchUtils.filter(runs, "attribute.user_id = 'user-id2'") == runs[1]
|
def test_filter_runs_by_user_id():
runs = [
Run(
run_info=RunInfo(
run_uuid="a",
run_id="a",
experiment_id=0,
user_id="user-id",
status=RunStatus.to_string(RunStatus.FINISHED),
start_time=1,
end_time=1,
lifecycle_stage=LifecycleStage.ACTIVE,
),
run_data=RunData(),
),
Run(
run_info=RunInfo(
run_uuid="b",
run_id="b",
experiment_id=0,
user_id="user-id2",
status=RunStatus.to_string(RunStatus.FINISHED),
start_time=1,
end_time=1,
lifecycle_stage=LifecycleStage.ACTIVE,
),
run_data=RunData(),
),
]
assert SearchUtils.filter(runs, "attribute.user_id = 'user-id2'")[0] == runs[1]
|
25,205 |
def _infer_stmts(
stmts: Sequence[nodes.NodeNG | type[Uninferable] | Instance],
context: InferenceContext | None,
frame: nodes.NodeNG | Instance | str | None = None,
) -> collections.abc.Generator[InferenceResult, None, None]:
"""Return an iterator on statements inferred by each statement in *stmts*."""
inferred = False
if context is not None:
name = context.lookupname
context = context.clone()
else:
name = None
context = InferenceContext()
for stmt in stmts:
if stmt is Uninferable:
yield stmt
inferred = True
continue
# 'context' is always InferenceContext and Instances get '_infer_name' from ClassDef
context.lookupname = stmt._infer_name(frame, name) # type: ignore[union-attr]
try:
# Mypy doesn't recognize that 'stmt' can't be Uninferable
for inf in stmt.infer(context=context): # type: ignore[union-attr]
yield inf
inferred = True
except NameInferenceError:
continue
except InferenceError:
yield Uninferable
inferred = True
if not inferred:
raise InferenceError(
"Inference failed for all members of {stmts!r}.",
stmts=stmts,
frame=frame,
context=context,
)
|
def _infer_stmts(
stmts: Sequence[nodes.NodeNG | type[Uninferable] | Instance],
context: InferenceContext | None,
frame: nodes.NodeNG | Instance | None = None,
) -> collections.abc.Generator[InferenceResult, None, None]:
"""Return an iterator on statements inferred by each statement in *stmts*."""
inferred = False
if context is not None:
name = context.lookupname
context = context.clone()
else:
name = None
context = InferenceContext()
for stmt in stmts:
if stmt is Uninferable:
yield stmt
inferred = True
continue
# 'context' is always InferenceContext and Instances get '_infer_name' from ClassDef
context.lookupname = stmt._infer_name(frame, name) # type: ignore[union-attr]
try:
# Mypy doesn't recognize that 'stmt' can't be Uninferable
for inf in stmt.infer(context=context): # type: ignore[union-attr]
yield inf
inferred = True
except NameInferenceError:
continue
except InferenceError:
yield Uninferable
inferred = True
if not inferred:
raise InferenceError(
"Inference failed for all members of {stmts!r}.",
stmts=stmts,
frame=frame,
context=context,
)
|
57,649 |
def main():
d_args = demisto.args()
entry_id = d_args['entryID'] if 'entryID' in d_args else None
header_row = d_args['header_row'] if 'header_row' in d_args else None
search_column = d_args['column'] if 'column' in d_args else None
search_value = d_args['value'] if 'value' in d_args else None
res = demisto.getFilePath(entry_id)
if not res:
return_error("Entry {} not found".format(entry_id))
file_path = res['path']
file_name = res['name']
if not file_name.lower().endswith('.csv'):
return_error(
'"{}" is not in csv format. Please ensure the file is in correct format and has a ".csv" extension'.format(
file_name))
csv_data: list = []
with open(file_path) as f:
lines = f.read().splitlines()
if header_row:
headers = lines[0]
headers = headers.split(",")
lines = lines[1:]
for line in lines:
d = {}
row = line.split(",")
for i, h in enumerate(headers):
d[h] = row[i]
csv_data.append(d)
else:
for line in lines:
row = line.split(",")
csv_data.append(row)
# If we're searching the CSV
if search_column:
if header_row:
csv_data = search_dicts(search_column, search_value, csv_data)
else:
# Lists are 0-indexed but this makes it more human readable (column 0 is column 1)
try:
search_column = int(search_column) - 1
except ValueError:
return_error(
"CSV column spec must be integer if header_row not supplied (got {})".format(search_column))
csv_data = search_lists(search_column, search_value, csv_data)
if not csv_data:
output = {
'LookupCSV.missing': search_value
}
else:
output = {
'LookupCSV.result': csv_data
}
demisto.results({
"Type": entryTypes["note"],
"ContentsFormat": formats["json"],
"Contents": csv_data,
"EntryContext": output
})
|
def main():
d_args = demisto.args()
entry_id = d_args['entryID'] if 'entryID' in d_args else None
header_row = d_args['header_row'] if 'header_row' in d_args else None
search_column = d_args['column'] if 'column' in d_args else None
search_value = d_args['value'] if 'value' in d_args else None
res = demisto.getFilePath(entry_id)
if not res:
return_error("Entry {} not found".format(entry_id))
file_path = res['path']
file_name = res['name']
if not file_name.lower().endswith('.csv'):
return_error(
'"{}" is not in csv format. Please ensure the file is in correct format and has a ".csv" extension'.format(
file_name))
csv_data: list = []
with open(file_path) as f:
lines = f.read().splitlines()
if header_row:
headers = lines[0]
headers = headers.split(",")
lines = lines[1:]
for line in lines:
d = {}
row = line.split(",")
for i, h in enumerate(headers):
d[h] = row[i]
csv_data.append(d)
else:
for line in lines:
row = line.split(",")
csv_data.append(row)
# If we're searching the CSV
if search_column:
if header_row:
csv_data = search_dicts(search_column, search_value, csv_data)
else:
# Lists are 0-indexed but this makes it more human readable (column 0 is column 1)
try:
search_column = int(search_column) - 1
except ValueError:
return_error(
"CSV column spec must be integer if header_row not supplied (got {})".format(search_column))
csv_data = search_lists(search_column, search_value, csv_data)
output = {
'LookupCSV': {
'FoundResult': True if csv_data else False,
'Result': csv_data if csv_data else None
}
}
demisto.results({
"Type": entryTypes["note"],
"ContentsFormat": formats["json"],
"Contents": csv_data,
"EntryContext": output
})
|
24,755 |
def _different_parameters(
original: List[astroid.FunctionDef],
overridden: List[astroid.FunctionDef],
dummy_parameter_regex: Pattern,
):
"""Determine if the two methods have different parameters
They are considered to have different parameters if:
* they have different positional parameters, including different names
* one of the methods is having variadics, while the other is not
* they have different keyword only parameters.
"""
output_messages = []
original_parameters = _positional_parameters(original)
overridden_parameters = _positional_parameters(overridden)
# Copy kwonlyargs list so that we don't affect later function linting
original_kwonlyargs = original.args.kwonlyargs
# Allow positional/keyword variadic in overridden to match against any
# positional/keyword argument in original.
# Keep any arguments that are found seperately in overridden to satisfy
# later tests
if overridden.args.vararg:
overidden_names = [v.name for v in overridden_parameters]
original_parameters = [
v for v in original_parameters if v.name in overidden_names
]
if overridden.args.kwarg:
overidden_names = [v.name for v in overridden.args.kwonlyargs]
original_kwonlyargs = [
v for v in original.args.kwonlyargs if v.name in overidden_names
]
arguments = list(original.args.args)
# variable 'count' helps to check if the type of an argument has changed
# at the _has_different_parameters method
if any(arg.name == "self" for arg in arguments) and len(arguments) > 1:
count = 1
else:
count = 0
different_positional = _has_different_parameters(
original_parameters, overridden_parameters, dummy_parameter_regex, count
)
different_kwonly = _has_different_parameters(
original_kwonlyargs, overridden.args.kwonlyargs, dummy_parameter_regex, count
)
if len(different_kwonly) > 0:
output_messages += different_kwonly
if len(different_positional) > 0:
output_messages += different_positional
if original.name in PYMETHODS:
# Ignore the difference for special methods. If the parameter
# numbers are different, then that is going to be caught by
# unexpected-special-method-signature.
# If the names are different, it doesn't matter, since they can't
# be used as keyword arguments anyway.
different_positional = different_kwonly = False
# Arguments will only violate LSP if there are variadics in the original
# that are then removed from the overridden
kwarg_lost = original.args.kwarg and not overridden.args.kwarg
vararg_lost = original.args.vararg and not overridden.args.vararg
if kwarg_lost or vararg_lost:
output_messages += ["Variadics removed in"]
return output_messages
|
def _different_parameters(
original: List[astroid.FunctionDef],
overridden: List[astroid.FunctionDef],
dummy_parameter_regex: Pattern,
) -> List[str]:
"""Determine if the two methods have different parameters
They are considered to have different parameters if:
* they have different positional parameters, including different names
* one of the methods is having variadics, while the other is not
* they have different keyword only parameters.
"""
output_messages = []
original_parameters = _positional_parameters(original)
overridden_parameters = _positional_parameters(overridden)
# Copy kwonlyargs list so that we don't affect later function linting
original_kwonlyargs = original.args.kwonlyargs
# Allow positional/keyword variadic in overridden to match against any
# positional/keyword argument in original.
# Keep any arguments that are found seperately in overridden to satisfy
# later tests
if overridden.args.vararg:
overidden_names = [v.name for v in overridden_parameters]
original_parameters = [
v for v in original_parameters if v.name in overidden_names
]
if overridden.args.kwarg:
overidden_names = [v.name for v in overridden.args.kwonlyargs]
original_kwonlyargs = [
v for v in original.args.kwonlyargs if v.name in overidden_names
]
arguments = list(original.args.args)
# variable 'count' helps to check if the type of an argument has changed
# at the _has_different_parameters method
if any(arg.name == "self" for arg in arguments) and len(arguments) > 1:
count = 1
else:
count = 0
different_positional = _has_different_parameters(
original_parameters, overridden_parameters, dummy_parameter_regex, count
)
different_kwonly = _has_different_parameters(
original_kwonlyargs, overridden.args.kwonlyargs, dummy_parameter_regex, count
)
if len(different_kwonly) > 0:
output_messages += different_kwonly
if len(different_positional) > 0:
output_messages += different_positional
if original.name in PYMETHODS:
# Ignore the difference for special methods. If the parameter
# numbers are different, then that is going to be caught by
# unexpected-special-method-signature.
# If the names are different, it doesn't matter, since they can't
# be used as keyword arguments anyway.
different_positional = different_kwonly = False
# Arguments will only violate LSP if there are variadics in the original
# that are then removed from the overridden
kwarg_lost = original.args.kwarg and not overridden.args.kwarg
vararg_lost = original.args.vararg and not overridden.args.vararg
if kwarg_lost or vararg_lost:
output_messages += ["Variadics removed in"]
return output_messages
|
58,371 |
def validate_derivative_path(path, **kwargs):
# Collect all paths that contain a dataset_description.json
dd = path / 'dataset_description.json'
with dd.open('r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_names = [pipeline["Name"]
for pipeline in description.get("GeneratedBy", [])
if "Name" in pipeline]
if pipeline_names:
pipeline_name = pipeline_names[0]
elif "PipelineDescription" in description:
warnings.warn("The PipelineDescription field was superseded "
"by GeneratedBy in BIDS 1.4.0. You can use "
"``pybids upgrade`` to update your derivative "
"dataset.")
pipeline_name = description["PipelineDescription"].get("Name")
else:
pipeline_name = None
if pipeline_name is None:
raise BIDSDerivativesValidationError(
"Every valid BIDS-derivatives dataset must "
"have a GeneratedBy.Name field set "
"inside 'dataset_description.json'. "
"\nExample: %s" %
MANDATORY_DERIVATIVES_FIELDS['GeneratedBy'])
return pipeline_name
|
def validate_derivative_path(path, **kwargs):
# Collect all paths that contain a dataset_description.json
dd = path / 'dataset_description.json'
with dd.open('r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_names = [pipeline["Name"]
for pipeline in description.get("GeneratedBy", [])
if "Name" in pipeline]
if pipeline_names:
pipeline_name = pipeline_names[0]
elif "PipelineDescription" in description:
warnings.warn("The PipelineDescription field was superseded "
"by GeneratedBy in BIDS 1.4.0. You can use "
"``pybids upgrade`` to update your derivative "
"dataset.")
pipeline_name = description["PipelineDescription"].get("Name")
else:
pipeline_name = None
if pipeline_name is None:
raise BIDSDerivativesValidationError(
"Every valid BIDS-derivatives dataset must "
"have a GeneratedBy.Name field set "
"inside 'dataset_description.json'. "
"\nExample: %s" %
MANDATORY_DERIVATIVES_FIELDS['GeneratedBy'])
return pipeline_name
|
13,532 |
def my_split(inLine):
if sys.version_info > (3, 0): # is there a clean way to to it?
return shlex.split(inLine.strip())
else:
tempArray = shlex.split(inLine.strip().encode('utf-8'))
newArray = []
for item in tempArray:
newArray.append(item.decode('utf-8'))
return newArray
|
def my_split(inLine):
if sys.version_info > (3, 0): # is there a clean way to to it?
return shlex.split(inLine.strip())
else:
tempArray = shlex.split(inLine.strip().encode('utf-8'))
decoded = [item.decode('utf-8') for item in split]
for item in tempArray:
newArray.append(item.decode('utf-8'))
return newArray
|
41,492 |
def test_inferapi_pyhf_independence():
'''
pyhf.infer should eventually be factored out so it should be
infependent from pyhf internals. This is testing that
a much simpler model still can run through pyhf.infer.hypotest
'''
from pyhf import get_backend
class _NonPyhfConfig(object):
def __init__(self):
self.poi_index = 0
self.npars = 2
def suggested_init(self):
return [1.0, 1.0]
def suggested_bounds(self):
return [[0.0, 10.0], [0.0, 10.0]]
class NonPyhfModel(object):
def __init__(self, spec):
self.sig, self.nominal, self.uncert = spec
self.factor = (self.nominal / self.uncert) ** 2
self.aux = 1.0 * self.factor
self.config = _NonPyhfConfig()
def _make_main_pdf(self, pars):
mu, gamma = pars
expected_main = gamma * self.nominal + mu * self.sig
return pyhf.probability.Poisson(expected_main)
def _make_constraint_pdf(self, pars):
mu, gamma = pars
return pyhf.probability.Poisson(gamma * self.factor)
def expected_data(self, pars, include_auxdata=True):
tensorlib, _ = get_backend()
expected_main = tensorlib.astensor(
[self._make_main_pdf(pars).expected_data()]
)
aux_data = tensorlib.astensor(
[self._make_constraint_pdf(pars).expected_data()]
)
if not include_auxdata:
return expected_main
return tensorlib.concatenate([expected_main, aux_data])
def logpdf(self, pars, data):
tensorlib, _ = get_backend()
maindata, auxdata = data
main = self._make_main_pdf(pars).log_prob(maindata)
constraint = self._make_constraint_pdf(pars).log_prob(auxdata)
return tensorlib.astensor([main + constraint])
m = NonPyhfModel([5, 50, 7])
print(pyhf.infer.hypotest(1.0, m.expected_data(m.config.suggested_init()), m))
|
def test_inferapi_pyhf_independence():
'''
pyhf.infer should eventually be factored out so it should be
infependent from pyhf internals. This is testing that
a much simpler model still can run through pyhf.infer.hypotest
'''
from pyhf import get_backend
class _NonPyhfConfig(object):
def __init__(self):
self.poi_index = 0
self.npars = 2
def suggested_init(self):
return [1.0, 1.0]
def suggested_bounds(self):
return [[0.0, 10.0], [0.0, 10.0]]
class NonPyhfModel(object):
def __init__(self, spec):
self.sig, self.nominal, self.uncert = spec
self.factor = (self.nominal / self.uncert) ** 2
self.aux = 1.0 * self.factor
self.config = _NonPyhfConfig()
def _make_main_pdf(self, pars):
mu, gamma = pars
expected_main = gamma * self.nominal + mu * self.sig
return pyhf.probability.Poisson(expected_main)
def _make_constraint_pdf(self, pars):
mu, gamma = pars
return pyhf.probability.Poisson(gamma * self.factor)
def expected_data(self, pars, include_auxdata=True):
tensorlib, _ = get_backend()
expected_main = tensorlib.astensor(
[self._make_main_pdf(pars).expected_data()]
)
aux_data = tensorlib.astensor(
[self._make_constraint_pdf(pars).expected_data()]
)
if not include_auxdata:
return expected_main
return tensorlib.concatenate([expected_main, aux_data])
def logpdf(self, pars, data):
tensorlib, _ = get_backend()
maindata, auxdata = data
main = self._make_main_pdf(pars).log_prob(maindata)
constraint = self._make_constraint_pdf(pars).log_prob(auxdata)
return tensorlib.astensor([main + constraint])
model = NonPyhfModel([5, 50, 7])
print(pyhf.infer.hypotest(1.0, m.expected_data(m.config.suggested_init()), m))
|
43,688 |
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that assigns
lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds
to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2]
..UsageDetails::
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the `edge_driver`
method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example,
given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle}` and the graph :math:`G`,
the `edge_driver` method will output the following Hamiltonian:
..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too
must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same.
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
def edge_driver(graph, reward):
r"""Returns the edge-driver cost Hamiltonian component.
Given some graph, :math:`G`, this method will return a Hamiltonian that assigns
lower energies to two-bit bitstrings supplied in ``reward``. Each bitstring corresponds
to the state of some edge in :math:`G`, which is defined by the states of its vertex endpoints.
See usage details for more information.
Args:
graph (nx.Graph): The graph on which the Hamiltonian is defined
reward (list[str]): The list of two-bit bitstrings that are assigned a lower energy by the Hamiltonian
Returns:
.Hamiltonian
**Example**
>>> graph = nx.Graph([(0, 1), (1, 2)])
>>> hamiltonian = qaoa.edge_driver(graph, ["11", "10", "01"])
>>> print(hamiltonian)
(0.25) [Z0 Z1] + (0.25) [Z0] + (0.25) [Z1] + (0.25) [Z1 Z2] + (0.25) [Z2]
..UsageDetails::
The goal of many combinatorial problems that can be solved with QAOA is to
find a `Graph colouring <https://en.wikipedia.org/wiki/Graph_coloring>`__ of some supplied
graph :math:`G`, that minimizes some cost function. It is oftentimes natural to consider the class
of graph colouring problems that only admit two colours, as we can easily encode these two colours
using the :math:`|1\rangle` and :math:`|0\rangle` states of qubits. Therefore, given
some graph :math:`G`, each edge of the graph can be described by a pair of qubits, :math:`|00\rangle`,
:math:`01\rangle`, :math:`|10\rangle`, or :math:`|11\rangle`, corresponding to the colourings of its endpoints.
When constructing QAOA cost functions, one must "penalize" certain states of the graph, and "reward"
others, by assigning higher and lower energies to these respective configurations. Given a set of vertex-colour
pairs (which each describe a possible state of a graph edge), the `edge_driver`
method will output a Hamiltonian that rewards the edges in the set, and penalizes the others. For example,
given the set: :math:`\{|00\rangle, \ |01\rangle, \ |10\rangle\}` and the graph :math:`G`,
the `edge_driver` method will output the following Hamiltonian:
..math:: H \ = \ \frac{1}{4} \displaystyle\sum_{(i, j) \in E(G)} \big( Z_{i} Z_{j} \ - \ Z_{i} \ - \ Z_{j} \big)
where :math:`E(G)` is the set of edges of :math:`G`, and :math:`Z_i` is the Pauli-Z operator acting on the
:math:`i`-th wire. As can be checked, this Hamiltonian assigns an energy of :math:`-1/4` to the states
:math:`|00\rangle`, :math:`|01\rangle` and :math:`|10\rangle`, and an energy of :math:`3/4` to the state
:math:`|11\rangle`.
.. Note::
If either of the states :math:`\01\rangle` or :math:`|10\rangle` is contained in ``reward``, then so too
must :math:`|10\rangle` or :math:`|01\rangle`, respectively. Within a graph, there is no notion of "order"
of edge endpoints, so these two states are effectively the same.
"""
allowed = ["00", "01", "10", "11"]
if not all([e in allowed for e in reward]):
raise ValueError("Encountered invalid entry in 'reward', expected 2-bit bitstrings.")
if "01" in reward and "10" not in reward or "10" in reward and "01" not in reward:
raise ValueError(
"'reward' cannot contain either '10' or '01', must contain neither or both."
)
if not isinstance(graph, nx.Graph):
raise ValueError("Input graph must be a nx.Graph, got {}".format(type(graph).__name__))
coeffs = []
ops = []
if len(reward) == 0 or len(reward) == 4:
coeffs = [1 for _ in graph.nodes]
ops = [qml.Identity(v) for v in graph.nodes]
else:
reward = list(set(reward) - {"01"})
sign = -1
if len(reward) == 2:
reward = list({"00", "10", "11"} - set(reward))
sign = 1
reward = reward[0]
if reward == "00":
for e in graph.edges:
coeffs.extend([0.25 * sign, 0.25 * sign, 0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
if reward == "10":
for e in graph.edges:
coeffs.append(-0.5 * sign)
ops.append(qml.PauliZ(e[0]) @ qml.PauliZ(e[1]))
if reward == "11":
for e in graph.edges:
coeffs.extend([0.25 * sign, -0.25 * sign, -0.25 * sign])
ops.extend(
[qml.PauliZ(e[0]) @ qml.PauliZ(e[1]), qml.PauliZ(e[0]), qml.PauliZ(e[1])]
)
return qml.Hamiltonian(coeffs, ops)
|
51,558 |
def _init_hoomd_dihedrals(structure, ref_energy=1.0):
""" Periodic dihedrals (dubbed harmonic dihedrals in HOOMD) """
# Identify the unique dihedral types before setting
# need Hoomd 2.8.0 to use proper dihedral implemtnation
# from this PR https://github.com/glotzerlab/hoomd-blue/pull/492
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) < 2 or float(version_numbers[1]) < 8:
from mbuild.exceptions import MBuildError
raise MBuildError("Please upgrade Hoomd to at least 2.8.0")
dihedral_type_params = {}
for dihedral in structure.dihedrals:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = "-".join((t1, t2, t3, t4))
else:
dihedral_type = "-".join((t4, t3, t2, t1))
if dihedral_type not in dihedral_type_params:
if isinstance(dihedral.type, pmd.DihedralType):
dihedral_type_params[dihedral_type] = dihedral.type
elif isinstance(dihedral.type, pmd.DihedralTypeList):
if len(dihedral.type) > 1:
warnings.warn(
"Multiple dihedral types detected"
+ " for single dihedral, will ignore all except "
+ " first diheral type"
)
dihedral_type_params[dihedral_type] = dihedral.type[0]
# Set the hoomd parameters
periodic_torsion = (
hoomd.md.dihedral.harmonic()
) # These are periodic torsions
for name, dihedral_type in dihedral_type_params.items():
periodic_torsion.dihedral_coeff.set(
name,
k=2 * dihedral_type.phi_k / ref_energy,
d=1,
n=dihedral_type.per,
phi_0=np.deg2rad(dihedral_type.phase),
)
return periodic_torsion
|
def _init_hoomd_dihedrals(structure, ref_energy=1.0):
""" Periodic dihedrals (dubbed harmonic dihedrals in HOOMD) """
# Identify the unique dihedral types before setting
# need Hoomd 2.8.0 to use proper dihedral implemtnation
# from this PR https://github.com/glotzerlab/hoomd-blue/pull/492
version_numbers = _check_hoomd_version()
if float(version_numbers[0]) < 2 or float(version_numbers[1]) < 8:
from mbuild.exceptions import MBuildError
raise MBuildError("Please upgrade Hoomd to at least 2.8.0")
dihedral_type_params = {}
for dihedral in structure.dihedrals:
t1, t2 = dihedral.atom1.type, dihedral.atom2.type
t3, t4 = dihedral.atom3.type, dihedral.atom4.type
if [t2, t3] == sorted([t2, t3], key=natural_sort):
dihedral_type = "-".join((t1, t2, t3, t4))
else:
dihedral_type = "-".join((t4, t3, t2, t1))
if dihedral_type not in dihedral_type_params:
if isinstance(dihedral.type, pmd.DihedralType):
dihedral_type_params[dihedral_type] = dihedral.type
elif isinstance(dihedral.type, pmd.DihedralTypeList):
if len(dihedral.type) > 1:
warnings.warn(
"Multiple dihedral types detected"
+ " for single dihedral, will ignore all except "
+ " first diheral type."
+ "First dihedral type: {}".format(dihedral.type[0])
)
dihedral_type_params[dihedral_type] = dihedral.type[0]
# Set the hoomd parameters
periodic_torsion = (
hoomd.md.dihedral.harmonic()
) # These are periodic torsions
for name, dihedral_type in dihedral_type_params.items():
periodic_torsion.dihedral_coeff.set(
name,
k=2 * dihedral_type.phi_k / ref_energy,
d=1,
n=dihedral_type.per,
phi_0=np.deg2rad(dihedral_type.phase),
)
return periodic_torsion
|
20,459 |
def remove_activity_date_deadline_column(env):
activity_mixin_models = [k for k in env.registry if issubclass(
type(env[k]), type(env['mail.activity.mixin'])) and env[k]._auto]
_column_renames = {model._table: ('activity_date_deadline', None)
for model in activity_mixin_models}
openupgrade.rename_columns(env.cr, _column_renames)
|
def remove_activity_date_deadline_column(env):
activity_mixin_models = [k for k in env.registry if issubclass(
type(env[k]), type(env['mail.activity.mixin'])) and env[k]._auto]
_column_renames = {env[model]._table: ('activity_date_deadline', None)
for model in activity_mixin_models}
openupgrade.rename_columns(env.cr, _column_renames)
|
41,832 |
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
"checking": ["black", "hacking", "mypy"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0",
"scikit-optimize",
"mlflow",
],
"document": ["sphinx", "sphinx_rtd_theme"],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"pytorch-ignite",
"scikit-image",
"scikit-learn",
"thop",
"torch==1.4.0+cpu",
"torchvision==0.5.0+cpu",
"xgboost",
]
+ (
["allennlp<1", "fastai<2", "pytorch-lightning>=0.7.1"]
if (3, 5) < sys.version_info[:2] < (3, 8)
else []
)
+ (
["llvmlite<=0.31.0"] if (3, 5) == sys.version_info[:2] else []
) # Newer `llvmlite` is not distributed with wheels for Python 3.5.
+ (
["dask[dataframe]", "dask-ml", "keras", "tensorflow>=2.0.0", "tensorflow-datasets",]
if sys.version_info[:2] < (3, 8)
else []
),
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"fanova",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"pytorch-ignite",
"scikit-learn>=0.19.0",
"scikit-optimize",
"torch==1.4.0+cpu",
"torchvision==0.5.0+cpu",
"xgboost",
]
+ (
["allennlp<1", "fastai<2", "pytorch-lightning>=0.7.1"]
if (3, 5) < sys.version_info[:2] < (3, 8)
else []
)
+ (
["keras", "tensorflow", "tensorflow-datasets"] if sys.version_info[:2] < (3, 8) else []
),
}
return requirements
|
def get_extras_require() -> Dict[str, List[str]]:
requirements = {
"checking": ["black", "hacking", "mypy"],
"codecov": ["codecov", "pytest-cov"],
"doctest": [
"cma",
"pandas",
"plotly>=4.0.0",
"scikit-learn>=0.19.0",
"scikit-optimize",
"mlflow",
],
"document": ["sphinx", "sphinx_rtd_theme"],
"example": [
"catboost",
"chainer",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"nbval",
"pytorch-ignite",
"scikit-image",
"scikit-learn",
"thop",
"torch==1.4.0+cpu",
"torchvision==0.5.0+cpu",
"xgboost",
]
+ (
["allennlp<1", "fastai<2", "pytorch-lightning>=0.7.1"]
if (3, 5) < sys.version_info[:2] < (3, 8)
else []
)
+ (
["llvmlite<=0.31.0"] if (3, 5) == sys.version_info[:2] else []
) # Newer `llvmlite` is not distributed with wheels for Python 3.5.
+ (
["dask[dataframe]", "dask-ml", "keras", "tensorflow>=2.0.0", "tensorflow-datasets"]
if sys.version_info[:2] < (3, 8)
else []
),
"experimental": ["redis"],
"testing": [
# TODO(toshihikoyanase): Remove the version constraint after resolving the issue
# https://github.com/optuna/optuna/issues/1000.
"bokeh<2.0.0",
"chainer>=5.0.0",
"cma",
"fakeredis",
"fanova",
"lightgbm",
"mlflow",
"mpi4py",
"mxnet",
"pandas",
"plotly>=4.0.0",
"pytest",
"pytorch-ignite",
"scikit-learn>=0.19.0",
"scikit-optimize",
"torch==1.4.0+cpu",
"torchvision==0.5.0+cpu",
"xgboost",
]
+ (
["allennlp<1", "fastai<2", "pytorch-lightning>=0.7.1"]
if (3, 5) < sys.version_info[:2] < (3, 8)
else []
)
+ (
["keras", "tensorflow", "tensorflow-datasets"] if sys.version_info[:2] < (3, 8) else []
),
}
return requirements
|
54,099 |
def get_feature_for_hac(n, buses_i, feature=None): #buses_i = n.buses.index
if feature is None:
feature = "solar+onwind-time"
carriers = feature.split('-')[0].split('+')
if "offwind" in carriers:
carriers.remove("offwind")
carriers = np.append(carriers, network.generators.carrier.filter(like='offwind').unique())
if feature.split('-')[1] == 'cap':
feature_data = pd.DataFrame(index=buses_i, columns=carriers)
for carrier in carriers:
try:
feature_data[carrier] = (n.generators_t.p_max_pu.filter(like=carrier).mean()
.rename(index=lambda x: x.split(' ')[0]))
except:
feature_data[carrier] = (n.generators_t.p_max_pu.filter(like=carrier).mean()
.rename(index=lambda x: x.split(' ')[0] + ' ' + x.split(' ')[1]))
if feature.split('-')[1] == 'time':
feature_data = pd.DataFrame(columns=buses_i)
for carrier in carriers:
try:
# without simpl wildcard (bus names are "X X"):
feature_data = feature_data.append(n.generators_t.p_max_pu.filter(like=carrier)
.rename(columns=lambda x: x.split(' ')[0]))[buses_i]
except:
# with simpl wildcard (bus names are "X X X"):
feature_data = feature_data.append(n.generators_t.p_max_pu.filter(like=carrier)
.rename(columns=lambda x: x.split(' ')[0] + ' ' + x.split(' ')[1]))[buses_i]
feature_data = feature_data.T
feature_data.columns = feature_data.columns.astype(str) # Timestamp will raise error in sklearn>=v1.2
feature_data = feature_data.fillna(0)
return feature_data
|
def get_feature_for_hac(n, buses_i=None, feature=None):
if buses_i is None:
buses_i = n.buses.index
if feature is None:
feature = "solar+onwind-time"
carriers = feature.split('-')[0].split('+')
if "offwind" in carriers:
carriers.remove("offwind")
carriers = np.append(carriers, network.generators.carrier.filter(like='offwind').unique())
if feature.split('-')[1] == 'cap':
feature_data = pd.DataFrame(index=buses_i, columns=carriers)
for carrier in carriers:
try:
feature_data[carrier] = (n.generators_t.p_max_pu.filter(like=carrier).mean()
.rename(index=lambda x: x.split(' ')[0]))
except:
feature_data[carrier] = (n.generators_t.p_max_pu.filter(like=carrier).mean()
.rename(index=lambda x: x.split(' ')[0] + ' ' + x.split(' ')[1]))
if feature.split('-')[1] == 'time':
feature_data = pd.DataFrame(columns=buses_i)
for carrier in carriers:
try:
# without simpl wildcard (bus names are "X X"):
feature_data = feature_data.append(n.generators_t.p_max_pu.filter(like=carrier)
.rename(columns=lambda x: x.split(' ')[0]))[buses_i]
except:
# with simpl wildcard (bus names are "X X X"):
feature_data = feature_data.append(n.generators_t.p_max_pu.filter(like=carrier)
.rename(columns=lambda x: x.split(' ')[0] + ' ' + x.split(' ')[1]))[buses_i]
feature_data = feature_data.T
feature_data.columns = feature_data.columns.astype(str) # Timestamp will raise error in sklearn>=v1.2
feature_data = feature_data.fillna(0)
return feature_data
|
58,680 |
def convert(args: argparse.Namespace):
output = Path(args.output[0])
if not os.path.exists(output):
print_error_and_exit(
f"The output path {output} doesn't exist. Please make sure to specify "
f"existing directory and try again."
)
return
for training_data_path in args.training_data:
if not os.path.exists(training_data_path):
print_error_and_exit(
f"The training data path {training_data_path} doesn't exist "
f"and will be skipped."
)
loop = asyncio.get_event_loop()
num_of_files_converted = 0
for file in os.listdir(training_data_path):
source_path = Path(training_data_path) / file
output_path = Path(output) / f"{source_path.stem}{CONVERTED_FILE_POSTFIX}"
if MarkdownReader.is_markdown_nlu_file(source_path):
convert_nlu(source_path, output_path, source_path)
num_of_files_converted += 1
elif MarkdownStoryReader.is_markdown_story_file(source_path):
loop.run_until_complete(
convert_core(source_path, output_path, source_path)
)
num_of_files_converted += 1
else:
print_warning(
f"Skipped file '{source_path}' since it's neither NLU "
"nor Core training data file."
)
print_info(f"Converted {num_of_files_converted} files, saved in '{output}'")
|
def convert(args: argparse.Namespace):
output = Path(args.output[0])
if not os.path.exists(output):
print_error_and_exit(
f"The output path {output} doesn't exist. Please make sure to specify "
f"existing directory and try again."
)
return
for training_data_path in args.training_data:
if not os.path.exists(training_data_path):
print_error_and_exit(
f"The training data path {training_data_path} doesn't exist "
f"and will be skipped."
)
loop = asyncio.get_event_loop()
num_of_files_converted = 0
for file in os.listdir(training_data_path):
source_path = Path(training_data_path) / file
output_path = Path(output) / f"{source_path.stem}{CONVERTED_FILE_POSTFIX}"
if MarkdownReader.is_markdown_nlu_file(source_path):
convert_nlu(source_path, output_path, source_path)
num_of_files_converted += 1
elif MarkdownStoryReader.is_markdown_story_file(source_path):
loop.run_until_complete(
convert_core(source_path, output_path, source_path)
)
num_of_files_converted += 1
else:
print_warning(
f"Skipped file '{source_path}' since it's neither an NLU "
"nor a Core training data file."
)
print_info(f"Converted {num_of_files_converted} files, saved in '{output}'")
|
65 |
def sync_completed_sponsored_books():
from internetarchive import search_items
params = {'page': 1, 'rows': 1000, 'scope': 'all'}
fields = ['identifier', 'openlibrary_edition']
q = 'collection:openlibraryscanningteam AND collection:inlibrary'
# XXX Note: This `search_items` query requires the `ia` tool (the
# one installed via virtualenv) to be configured with (scope:all)
# privileged s3 keys.
config = {'general': {'secure': False}}
items = search_items(q, fields=fields, params=params, config=config)
books = web.ctx.site.get_many([
'/books/%s' % i.get('openlibrary_edition') for i in items
])
unsynced = [b for b in books if not b.ocaid]
ocaid_lookup = dict(('/books/%s' % i.get('openlibrary_edition'), i.get('identifier')) for i in items)
for u in unsynced:
u.ocaid = ocaid_lookup[u.key]
print('saving: ' + u.ocaid)
# TODO: Perform save
# web.ctx.blah[u.key] = u ?
return unsynced
|
def sync_completed_sponsored_books():
from internetarchive import search_items
params = {'page': 1, 'rows': 1000, 'scope': 'all'}
fields = ['identifier', 'openlibrary_edition']
q = 'collection:openlibraryscanningteam AND collection:inlibrary'
# XXX Note: This `search_items` query requires the `ia` tool (the
# one installed via virtualenv) to be configured with (scope:all)
# privileged s3 keys.
config = {'general': {'secure': False}}
items = search_items(q, fields=fields, params=params, config=config)
books = web.ctx.site.get_many([
'/books/%s' % i.get('openlibrary_edition') for i in items
])
unsynced = [book for book in books if not book.ocaid]
ocaid_lookup = dict(('/books/%s' % i.get('openlibrary_edition'), i.get('identifier')) for i in items)
for u in unsynced:
u.ocaid = ocaid_lookup[u.key]
print('saving: ' + u.ocaid)
# TODO: Perform save
# web.ctx.blah[u.key] = u ?
return unsynced
|
22,285 |
def dist_css(*args):
"""
Transition function 'js' helper -- this is the modern way where all bundled
artifacts are in the unified 'dist'.
"""
urls = (url_for("/static/dist/%s.css?v=%s" % (name, server_starttime)) for name in args)
return stylesheet_link(*urls)
|
def dist_css(*args):
"""
Transition function 'css' helper -- this is the modern way where all bundled
artifacts are in the unified 'dist'.
"""
urls = (url_for("/static/dist/%s.css?v=%s" % (name, server_starttime)) for name in args)
return stylesheet_link(*urls)
|
1,606 |
def test_regressor_chain_w_fit_params():
rng = np.random.RandomState(0)
X, y = datasets.make_regression(n_targets=3)
weight = rng.random((y.shape[0], ))
model = RegressorChain(SGDRegressor())
# Fitting with params
fit_param = {'sample_weight': weight}
model.fit(X, y, **fit_param)
|
def test_regressor_chain_w_fit_params():
rng = np.random.RandomState(0)
X, y = datasets.make_regression(n_targets=3)
weight = rng.rand(y.shape[0])
model = RegressorChain(SGDRegressor())
# Fitting with params
fit_param = {'sample_weight': weight}
model.fit(X, y, **fit_param)
|
8,751 |
def rule(*patterns):
"""Decorate a function to be called when a line matches the given pattern
Each argument is a regular expression which will trigger the function.
This decorator can be used multiple times to add more rules.
If the Sopel instance is in a channel, or sent a PRIVMSG, where a string
matching this expression is said, the function will execute. Note that
captured groups here will be retrievable through the Trigger object later.
Inside the regular expression, some special directives can be used. $nick
will be replaced with the nick of the bot and , or :, and $nickname will be
replaced with the nick of the bot.
.. versionchanged:: 7.0
The :func:`rule` decorator can be called with many positional
arguments, each used to add a rule. This is equivalent as decorating
the same function many times with this decorator.
"""
def add_attribute(function):
if not hasattr(function, "rule"):
function.rule = []
for value in patterns:
if value not in function.rule:
function.rule.append(value)
return function
return add_attribute
|
def rule(*patterns):
"""Decorate a function to be called when a line matches the given pattern
Each argument is a regular expression which will trigger the function.
This decorator can be used multiple times to add more rules.
If the Sopel instance is in a channel, or sent a PRIVMSG, where a string
matching this expression is said, the function will execute. Note that
captured groups here will be retrievable through the Trigger object later.
Inside the regular expression, some special directives can be used. $nick
will be replaced with the nick of the bot and , or :, and $nickname will be
replaced with the nick of the bot.
.. versionchanged:: 7.0
The :func:`rule` decorator can be called with many positional
arguments, each used to add a rule. This is equivalent to decorating
the same function many times with this decorator.
"""
def add_attribute(function):
if not hasattr(function, "rule"):
function.rule = []
for value in patterns:
if value not in function.rule:
function.rule.append(value)
return function
return add_attribute
|
7,029 |
def test_get_platform_warn_mode(caplog):
task_conf = {
'remote': {'host': 'cylcdevbox'},
'job': {
'batch system': 'pbs',
'batch submit command template': 'some template'
}
}
output = get_platform(task_conf, warn_only=True)
for forbiddenitem in (
'batch submit command template = some template',
'host = cylcdevbox',
'batch system = pbs'
):
assert forbiddenitem in output
|
def test_get_platform_warn_mode(caplog):
task_conf = {
'remote': {'host': 'cylcdevbox'},
'job': {
'batch system': 'pbs',
'batch submit command template': 'some template'
}
}
output = get_platform(task_conf, warn_only=True)
for forbidden_item in (
'batch submit command template = some template',
'host = cylcdevbox',
'batch system = pbs'
):
assert forbiddenitem in output
|
23,584 |
def read_epw(filename=None, coerce_year=None):
'''
Read an EPW file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the EPW file.
EPW files are commonly used by building simulation professionals
and are widely available on the web. For example via:
https://energyplus.net/weather , http://climate.onebuilding.org or
http://www.ladybug.tools/epwmap/
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value. This can
be a useful feature because EPW data is composed of data from
different years.
Warning: EPW files always have 365*24 = 8760 data rows;
be careful with the use of leap years.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the EnergyPlus Auxiliary Programs documentation
available at: https://energyplus.net/documentation.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
loc String default identifier, not used
city String site loccation
state-prov String state, province or region (if available)
country String site country code
data_type String type of original data source
WMO_code String WMO identifier
latitude Float site latitude
longitude Float site longitude
TZ Float UTC offset
altitude Float site elevation
=============== ====== ===================
============================= ======================================================================================================================================================
EPWData field description
============================= ======================================================================================================================================================
index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
year
month
day
hour
minute
data_source_unct Data source and uncertainty flags. See [1], chapter 2.13
t_drybulb Dry bulb temperature at the time indicated, deg C
t_dewpoint Dew-point temperature at the time indicated, deg C
rel_hum Relatitudeive humidity at the time indicated, percent
atmospheric_pressure Station pressure at the time indicated, Pa
etr Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
etrn Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
ghi_infrared Horizontal infrared radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
ghi Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
dni Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
dhi Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
global_hor_illum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
direct_normal_illum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
diffuse_horizontal_illum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
zenith_luminance Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
wind_direction Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
wind_speed Wind speed at the time indicated, meter/second
total_sky_cover Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
opaque_sky_cover Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
visibility Horizontal visibility at the time indicated, km
ceiling_height Height of cloud base above local terrain (7777=unlimited), meter
present_weather_observation Indicator for remaining fields: If 0, then the observed weather codes are taken from the following field. If 9, then “missing” weather is assumed. Since the primary use of these fields (Present Weather Observation and Present Weather Codes) is for rain/wet surfaces, a missing observation field or a missing weather code implies no rain.
present_weather_codes Present weather code, see [1], chapter 2.9.1.28
precipitable_water Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
aerosol_otpical_depth The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
snow_depth Snow depth in centimeters on the day indicated, (999 = missing data)
days_since_last_snowfall Number of days since last snowfall (maximum value of 88, where 88 = 88 or greater days; 99 = missing data)
albedo The ratio of reflected solar irradiance to global horizontal irradiance, unitless
liquid_precipitation_depth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
liquid_precipitation_quantity The period of accumulation for the liquid precipitation depth field, hour
============================= ======================================================================================================================================================
References
----------
[1] EnergyPlus documentation, Auxiliary Programs
https://energyplus.net/documentation.
'''
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError('Interactive load failed. Tkinter not supported '
'on this system. Try installing X-Quartz and '
'reloading')
if filename.startswith('http'):
# Attempts to download online EPW file
# See comments above for possible online sources
request = Request(filename, headers={'User-Agent': (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 '
'Safari/537.36')})
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
# Assume it's accessible via the file system
csvdata = open(filename, 'r')
# Read line with metadata
firstline = csvdata.readline()
head = ['loc','city', 'state-prov', 'country', 'data_type','WMO_code',
'latitude', 'longitude', 'TZ','altitude']
meta = dict(zip(head, firstline.rstrip('\n').split(",")))
meta['altitude'] = float(meta['altitude'])
meta['latitude'] = float(meta['latitude'])
meta['longitude'] = float(meta['longitude'])
meta['TZ'] = float(meta['TZ'])
colnames = ['year', 'month', 'day', 'hour', 'minute', 'data_source_unct',
't_drybulb', 't_dewpoint', 'rel_hum', 'atmospheric_pressure',
'etr', 'etrn', 'ghi_infrared', 'ghi', 'dni', 'dhi',
'global_hor_illum', 'direct_normal_illum',
'diffuse_horizontal_illum', 'zenith_luminance',
'wind_direction', 'wind_speed', 'total_sky_cover',
'opaque_sky_cover', 'visibility', 'ceiling_height',
'present_weather_observation', 'present_weather_codes',
'precipitable_water', 'aerosol_otpical_depth', 'snow_depth',
'days_since_last_snowfall', 'albedo',
'liquid_precipitation_depth', 'liquid_precipitation_quantity']
# We only have to skip 6 rows instead of 7 because we have already used
# the realine call above.
data = pd.read_csv(csvdata, skiprows=6, header = 0, names=colnames)
# Shift one hour back because EPW's usage of hour 24
# and dateutil's inability to handle that.
data["hour"] = data["hour"].apply(lambda x: x - 1)
# Change to single year if requested
if coerce_year is not None:
data["year"] = coerce_year
# Update index with correct date information
data = data.set_index(pd.to_datetime(data[['year','month','day','hour']]))
# Localize time series
data = data.tz_localize(int(meta['TZ'] * 3600))
return data, meta
|
def read_epw(filename=None, coerce_year=None):
'''
Read an EPW file in to a pandas dataframe.
Note that values contained in the metadata dictionary are unchanged
from the EPW file.
EPW files are commonly used by building simulation professionals
and are widely available on the web. For example via:
https://energyplus.net/weather , http://climate.onebuilding.org or
http://www.ladybug.tools/epwmap/
Parameters
----------
filename : None or string, default None
If None, attempts to use a Tkinter file browser. A string can be
a relative file path, absolute file path, or url.
coerce_year : None or int, default None
If supplied, the year of the data will be set to this value. This can
be a useful feature because EPW data is composed of data from
different years.
Warning: EPW files always have 365*24 = 8760 data rows;
be careful with the use of leap years.
Returns
-------
Tuple of the form (data, metadata).
data : DataFrame
A pandas dataframe with the columns described in the table
below. For more detailed descriptions of each component, please
consult the EnergyPlus Auxiliary Programs documentation
available at: https://energyplus.net/documentation.
metadata : dict
The site metadata available in the file.
Notes
-----
The returned structures have the following fields.
=============== ====== ===================
key format description
=============== ====== ===================
loc String default identifier, not used
city String site loccation
state-prov String state, province or region (if available)
country String site country code
data_type String type of original data source
WMO_code String WMO identifier
latitude Float site latitude
longitude Float site longitude
TZ Float UTC offset
altitude Float site elevation
=============== ====== ===================
============================= ======================================================================================================================================================
EPWData field description
============================= ======================================================================================================================================================
index A pandas datetime index. NOTE, the index is currently timezone unaware, and times are set to local standard time (daylight savings is not included)
year
month
day
hour
minute
data_source_unct Data source and uncertainty flags. See [1], chapter 2.13
t_drybulb Dry bulb temperature at the time indicated, deg C
t_dewpoint Dew-point temperature at the time indicated, deg C
rel_hum Relatitudeive humidity at the time indicated, percent
atmospheric_pressure Station pressure at the time indicated, Pa
etr Extraterrestrial horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
etrn Extraterrestrial normal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
ghi_infrared Horizontal infrared radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
ghi Direct and diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
dni Amount of direct normal radiation (modeled) recv'd during 60 mintues prior to timestamp, Wh/m^2
dhi Amount of diffuse horizontal radiation recv'd during 60 minutes prior to timestamp, Wh/m^2
global_hor_illum Avg. total horizontal illuminance recv'd during the 60 minutes prior to timestamp, lx
direct_normal_illum Avg. direct normal illuminance recv'd during the 60 minutes prior to timestamp, lx
diffuse_horizontal_illum Avg. horizontal diffuse illuminance recv'd during the 60 minutes prior to timestamp, lx
zenith_luminance Avg. luminance at the sky's zenith during the 60 minutes prior to timestamp, cd/m^2
wind_direction Wind direction at time indicated, degrees from north (360 = north; 0 = undefined,calm)
wind_speed Wind speed at the time indicated, meter/second
total_sky_cover Amount of sky dome covered by clouds or obscuring phenonema at time stamp, tenths of sky
opaque_sky_cover Amount of sky dome covered by clouds or obscuring phenonema that prevent observing the sky at time stamp, tenths of sky
visibility Horizontal visibility at the time indicated, km
ceiling_height Height of cloud base above local terrain (7777=unlimited), meter
present_weather_observation Indicator for remaining fields: If 0, then the observed weather codes are taken from the following field. If 9, then “missing” weather is assumed. Since the primary use of these fields (Present Weather Observation and Present Weather Codes) is for rain/wet surfaces, a missing observation field or a missing weather code implies no rain.
present_weather_codes Present weather code, see [1], chapter 2.9.1.28
precipitable_water Total precipitable water contained in a column of unit cross section from earth to top of atmosphere, cm
aerosol_otpical_depth The broadband aerosol optical depth per unit of air mass due to extinction by aerosol component of atmosphere, unitless
snow_depth Snow depth in centimeters on the day indicated, (999 = missing data)
days_since_last_snowfall Number of days since last snowfall (maximum value of 88, where 88 = 88 or greater days; 99 = missing data)
albedo The ratio of reflected solar irradiance to global horizontal irradiance, unitless
liquid_precipitation_depth The amount of liquid precipitation observed at indicated time for the period indicated in the liquid precipitation quantity field, millimeter
liquid_precipitation_quantity The period of accumulation for the liquid precipitation depth field, hour
============================= ======================================================================================================================================================
References
----------
[1] EnergyPlus documentation, Auxiliary Programs
https://energyplus.net/documentation.
'''
if filename is None:
try:
filename = _interactive_load()
except ImportError:
raise ImportError('Interactive load failed. Tkinter not supported '
'on this system. Try installing X-Quartz and '
'reloading')
if filename.startswith('http'):
# Attempts to download online EPW file
# See comments above for possible online sources
request = Request(filename, headers={'User-Agent': (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 '
'Safari/537.36')})
response = urlopen(request)
csvdata = io.StringIO(response.read().decode(errors='ignore'))
else:
# Assume it's accessible via the file system
csvdata = open(filename, 'r')
# Read line with metadata
firstline = csvdata.readline()
head = ['loc','city', 'state-prov', 'country', 'data_type','WMO_code',
'latitude', 'longitude', 'TZ','altitude']
meta = dict(zip(head, firstline.rstrip('\n').split(",")))
meta['altitude'] = float(meta['altitude'])
meta['latitude'] = float(meta['latitude'])
meta['longitude'] = float(meta['longitude'])
meta['TZ'] = float(meta['TZ'])
colnames = ['year', 'month', 'day', 'hour', 'minute', 'data_source_unct',
't_drybulb', 't_dewpoint', 'rel_hum', 'atmospheric_pressure',
'etr', 'etrn', 'ghi_infrared', 'ghi', 'dni', 'dhi',
'global_hor_illum', 'direct_normal_illum',
'diffuse_horizontal_illum', 'zenith_luminance',
'wind_direction', 'wind_speed', 'total_sky_cover',
'opaque_sky_cover', 'visibility', 'ceiling_height',
'present_weather_observation', 'present_weather_codes',
'precipitable_water', 'aerosol_otpical_depth', 'snow_depth',
'days_since_last_snowfall', 'albedo',
'liquid_precipitation_depth', 'liquid_precipitation_quantity']
# We only have to skip 6 rows instead of 7 because we have already used
# the realine call above.
data = pd.read_csv(csvdata, skiprows=6, header = 0, names=colnames)
# Shift one hour back because EPW's usage of hour 24
# and dateutil's inability to handle that.
data["hour"] = data["hour"].apply(lambda x: x - 1)
# Change to single year if requested
if coerce_year is not None:
data["year"] = coerce_year
# Update index with correct date information
data = data.set_index(pd.to_datetime(data[['year', 'month', 'day', 'hour']]))
# Localize time series
data = data.tz_localize(int(meta['TZ'] * 3600))
return data, meta
|
32,450 |
def main() -> None: # pragma: no cover
params = demisto.params()
url = params.get('url')
api_version = params.get('api_version')
token = demisto.params().get('credentials', {}).get('password')
base_url = urljoin(url, f'/api/{api_version}/')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
first_fetch = params.get('first_fetch')
max_fetch = params.get('max_fetch')
vendor, product = params.get('vendor', 'netskope'), params.get('product', 'netskope')
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(base_url, token, api_version, verify_certificate, proxy)
last_run = demisto.getLastRun()
if 'alert' not in last_run and 'application' not in last_run and 'audit' not in last_run \
and 'network' not in last_run:
last_run = arg_to_seconds_timestamp(first_fetch)
last_run = {
'alert': last_run,
'application': last_run,
'audit': last_run,
'network': last_run
}
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, api_version, last_run)
return_results(result)
elif demisto.command() == 'netskope-get-events':
if api_version == 'v1':
return_results(v1_get_events_command(client, demisto.args(), last_run))
else:
return_results(v2_get_events_command(client, demisto.args(), last_run))
elif demisto.command() == 'fetch-events':
if api_version == 'v1':
events = client.get_events_request_v1(last_run, max_fetch)
alerts = client.v1_get_alerts_request(last_run, max_fetch)
if alerts:
events.extend(alerts)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
else:
events = client.get_events_request_v2(last_run, max_fetch)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None: # pragma: no cover
params = demisto.params()
url = params.get('url')
api_version = params.get('api_version')
token = demisto.params().get('credentials', {}).get('password')
base_url = urljoin(url, f'/api/{api_version}/')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
first_fetch = params.get('first_fetch')
max_fetch = params.get('max_fetch')
vendor, product = params.get('vendor', 'netskope'), params.get('product', 'netskope')
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(base_url, token, api_version, verify_certificate, proxy)
last_run = demisto.getLastRun()
if 'alert' not in last_run and 'application' not in last_run and 'audit' not in last_run \
and 'network' not in last_run:
first_fetch = arg_to_datetime(first_fetch).timestamp() // 100
last_run = {
'alert': last_run,
'application': last_run,
'audit': last_run,
'network': last_run
}
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, api_version, last_run)
return_results(result)
elif demisto.command() == 'netskope-get-events':
if api_version == 'v1':
return_results(v1_get_events_command(client, demisto.args(), last_run))
else:
return_results(v2_get_events_command(client, demisto.args(), last_run))
elif demisto.command() == 'fetch-events':
if api_version == 'v1':
events = client.get_events_request_v1(last_run, max_fetch)
alerts = client.v1_get_alerts_request(last_run, max_fetch)
if alerts:
events.extend(alerts)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
else:
events = client.get_events_request_v2(last_run, max_fetch)
demisto.setLastRun(get_last_run(events, last_run))
demisto.debug(f'Setting the last_run to: {last_run}')
send_events_to_xsiam(events=events, vendor=vendor, product=product)
# Log exceptions and return errors
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
45,815 |
def load_camera_data(file_name):
"""Loads the camera data using the syntel SDK and converts to torch.Tensor."""
if not os.path.isfile(file_name):
raise AssertionError("Invalid file {}".format(file_name))
import sintel_io
intrinsic, extrinsic = sintel_io.cam_read(file_name)
return intrinsic, extrinsic
|
def load_camera_data(file_name):
"""Loads the camera data using the syntel SDK and converts to torch.Tensor."""
if not os.path.isfile(file_name):
raise FileExistsError("Invalid file {}".format(file_name))
import sintel_io
intrinsic, extrinsic = sintel_io.cam_read(file_name)
return intrinsic, extrinsic
|
55,601 |
def export_original_specular(blender_material, export_settings):
specular_extension = {}
original_specular_socket = gltf2_blender_get.get_socket_old(blender_material, 'specular glTF')
original_specularcolor_socket = gltf2_blender_get.get_socket_old(blender_material, 'specularColor glTF')
if original_specular_socket is None or original_specularcolor_socket is None:
return None, None
specular_non_linked = isinstance(original_specular_socket, bpy.types.NodeSocket) and not original_specular_socket.is_linked
specularcolor_non_linked = isinstance(original_specularcolor_socket, bpy.types.NodeSocket) and not original_specularcolor_socket.is_linked
use_actives_uvmaps = []
if specular_non_linked is True:
fac = original_specular_socket.default_value
if fac != 1.0:
specular_extension['specularFactor'] = fac
else:
# Factor
fac = gltf2_blender_get.get_factor_from_socket(original_specular_socket, kind='VALUE')
if fac is not None and fac != 1.0:
specular_extension['specularFactor'] = fac
# Texture
if gltf2_blender_get.has_image_node_from_socket(original_specular_socket):
original_specular_texture, original_specular_use_active_uvmap, _ = gltf2_blender_gather_texture_info.gather_texture_info(
original_specular_socket,
(original_specular_socket,),
export_settings,
)
specular_extension['specularTexture'] = original_specular_texture
if original_specular_use_active_uvmap:
use_actives_uvmaps.append("specularTexture")
if specularcolor_non_linked is True:
color = original_specularcolor_socket.default_value[:3]
if color != [1.0, 1.0, 1.0]:
specular_extension['specularColorFactor'] = color
else:
# Factor
fac = gltf2_blender_get.get_factor_from_socket(original_specularcolor_socket, kind='RGB')
if fac is not None and fac != [1.0, 1.0, 1.0]:
specular_extension['specularColorFactor'] = fac
# Texture
if gltf2_blender_get.has_image_node_from_socket(original_specularcolor_socket):
original_specularcolor_texture, original_specularcolor_use_active_uvmap, _ = gltf2_blender_gather_texture_info.gather_texture_info(
original_specularcolor_socket,
(original_specularcolor_socket,),
export_settings,
)
specular_extension['specularColorTexture'] = original_specularcolor_texture
if original_specularcolor_use_active_uvmap:
use_actives_uvmaps.append("specularColorTexture")
return Extension('KHR_materials_specular', specular_extension, False), use_actives_uvmaps
|
def export_original_specular(blender_material, export_settings):
specular_extension = {}
original_specular_socket = gltf2_blender_get.get_socket_old(blender_material, 'Specular')
original_specularcolor_socket = gltf2_blender_get.get_socket_old(blender_material, 'Specular Color')
if original_specular_socket is None or original_specularcolor_socket is None:
return None, None
specular_non_linked = isinstance(original_specular_socket, bpy.types.NodeSocket) and not original_specular_socket.is_linked
specularcolor_non_linked = isinstance(original_specularcolor_socket, bpy.types.NodeSocket) and not original_specularcolor_socket.is_linked
use_actives_uvmaps = []
if specular_non_linked is True:
fac = original_specular_socket.default_value
if fac != 1.0:
specular_extension['specularFactor'] = fac
else:
# Factor
fac = gltf2_blender_get.get_factor_from_socket(original_specular_socket, kind='VALUE')
if fac is not None and fac != 1.0:
specular_extension['specularFactor'] = fac
# Texture
if gltf2_blender_get.has_image_node_from_socket(original_specular_socket):
original_specular_texture, original_specular_use_active_uvmap, _ = gltf2_blender_gather_texture_info.gather_texture_info(
original_specular_socket,
(original_specular_socket,),
export_settings,
)
specular_extension['specularTexture'] = original_specular_texture
if original_specular_use_active_uvmap:
use_actives_uvmaps.append("specularTexture")
if specularcolor_non_linked is True:
color = original_specularcolor_socket.default_value[:3]
if color != [1.0, 1.0, 1.0]:
specular_extension['specularColorFactor'] = color
else:
# Factor
fac = gltf2_blender_get.get_factor_from_socket(original_specularcolor_socket, kind='RGB')
if fac is not None and fac != [1.0, 1.0, 1.0]:
specular_extension['specularColorFactor'] = fac
# Texture
if gltf2_blender_get.has_image_node_from_socket(original_specularcolor_socket):
original_specularcolor_texture, original_specularcolor_use_active_uvmap, _ = gltf2_blender_gather_texture_info.gather_texture_info(
original_specularcolor_socket,
(original_specularcolor_socket,),
export_settings,
)
specular_extension['specularColorTexture'] = original_specularcolor_texture
if original_specularcolor_use_active_uvmap:
use_actives_uvmaps.append("specularColorTexture")
return Extension('KHR_materials_specular', specular_extension, False), use_actives_uvmaps
|
4,305 |
def _read_nedf_eeg(filename: str):
"""
Read header info and EEG data from an .nedf file
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
eeg : array, shape (n_samples, n_channels)
Unscaled EEG data
info : dict
Information from the file header
triggers : array, shape (n_annots, 2)
Start samples and values of each trigger
scale : float
Scaling factor for the EEG data
"""
info, dt = parse_nedf_header(filename)
# to quote the original matlab implementation:
# "binary data will always start at byte 5120"
binstart = 10240
with open(filename, mode='rb') as f:
f.seek(binstart, os.SEEK_SET)
data = np.fromfile(f, dtype=dt)
# convert uint8-triplet -> float32
eeg = data['data']['eeg'] @ [1 << 16, 1 << 8, 1.]
eeg = eeg.reshape((-1, info['nchan']))
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
triggers = data['data']['trig'].flatten()
triggerind = triggers.nonzero()[0]
triggers = np.stack((triggerind, triggers[triggerind])).T
# scale channels accordingly (here: to volts)
scale = 2.4 / (6.0 * 8388607)
return eeg, info, triggers, scale
|
def _read_nedf_eeg(filename: str):
"""
Read header info and EEG data from an .nedf file
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
eeg : array, shape (n_samples, n_channels)
Unscaled EEG data
info : dict
Information from the file header
triggers : array, shape (n_annots, 2)
Start samples and values of each trigger
scale : float
Scaling factor for the EEG data.
"""
info, dt = parse_nedf_header(filename)
# to quote the original matlab implementation:
# "binary data will always start at byte 5120"
binstart = 10240
with open(filename, mode='rb') as f:
f.seek(binstart, os.SEEK_SET)
data = np.fromfile(f, dtype=dt)
# convert uint8-triplet -> float32
eeg = data['data']['eeg'] @ [1 << 16, 1 << 8, 1.]
eeg = eeg.reshape((-1, info['nchan']))
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
triggers = data['data']['trig'].flatten()
triggerind = triggers.nonzero()[0]
triggers = np.stack((triggerind, triggers[triggerind])).T
# scale channels accordingly (here: to volts)
scale = 2.4 / (6.0 * 8388607)
return eeg, info, triggers, scale
|
35,569 |
def pad(img: Tensor, padding: List[int], fill: int, padding_mode: str = "constant") -> Tensor:
r"""Pad the given Tensor Image on all sides with specified padding mode and fill value.
Args:
img (Tensor): Image to be padded.
padding (int or tuple or list): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int): Pixel fill value for constant fill. Default is 0.
This value is only used when the padding_mode is constant
padding_mode (str): Type of padding. Only constant supported for Tensors.
- constant: pads with a constant value, this value is specified with fill
Returns:
Tensor: Padded image.
"""
if not _is_tensor_a_torch_image(img):
raise TypeError("tensor is not a torch image.")
if not isinstance(padding, (int, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (int, float)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, tuple):
padding = list(padding)
if isinstance(padding, list) and len(padding) not in [1, 2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
raise ValueError("Only constant padding_mode supported for torch tensors")
if isinstance(padding, int):
if torch.jit.is_scripting():
raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]")
pad_left = pad_right = pad_top = pad_bottom = padding
elif len(padding) == 1:
pad_left = pad_right = pad_top = pad_bottom = padding[0]
elif len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
else:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
p = [pad_left, pad_right, pad_top, pad_bottom]
img = torch.nn.functional.pad(img, p, mode=padding_mode, value=float(fill))
return img
|
def pad(img: Tensor, padding: List[int], fill: int, padding_mode: str = "constant") -> Tensor:
r"""Pad the given Tensor Image on all sides with specified padding mode and fill value.
Args:
img (Tensor): Image to be padded.
padding (int or tuple or list): Padding on each border. If a single int is provided this
is used to pad all borders. If tuple of length 2 is provided this is the padding
on left/right and top/bottom respectively. If a tuple of length 4 is provided
this is the padding for the left, top, right and bottom borders
respectively.
fill (int): Pixel fill value for constant fill. Default is 0.
This value is only used when the padding_mode is constant
padding_mode (str): Type of padding. Only constant supported for Tensors.
- constant: pads with a constant value, this value is specified with fill
Returns:
Tensor: Padded image.
"""
if not _is_tensor_a_torch_image(img):
raise TypeError("tensor is not a torch image.")
if not isinstance(padding, (int, tuple, list)):
raise TypeError("Got inappropriate padding arg")
if not isinstance(fill, (int, float)):
raise TypeError("Got inappropriate fill arg")
if not isinstance(padding_mode, str):
raise TypeError("Got inappropriate padding_mode arg")
if isinstance(padding, tuple):
padding = list(padding)
if isinstance(padding, list) and len(padding) not in [1, 2, 4]:
raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
"{} element tuple".format(len(padding)))
if padding_mode not in ["constant"]:
raise ValueError("Only constant padding_mode supported for torch tensors")
if isinstance(padding, int):
if torch.jit.is_scripting():
raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]")
pad_left = pad_right = pad_top = pad_bottom = padding
elif len(padding) == 1:
pad_left = pad_right = pad_top = pad_bottom = padding[0]
elif len(padding) == 2:
pad_left = pad_right = padding[0]
pad_top = pad_bottom = padding[1]
else:
pad_left = padding[0]
pad_top = padding[1]
pad_right = padding[2]
pad_bottom = padding[3]
p = [pad_left, pad_right, pad_top, pad_bottom]
img = torch.nn.functional.pad(img, p, mode=padding_mode, value=float(fill))
return img
|
2,827 |
def chi2_kernel(X, Y=None, gamma=1.0):
"""Compute the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Input array/matrix X.
Y : ndarray of shape (n_samples_Y, n_features), default=None
Input array/matrix Y.
gamma : float, default=1
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
Returns the exponential chi-squared kernel X and Y.
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://hal.archives-ouvertes.fr/hal-00171412/document
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
|
def chi2_kernel(X, Y=None, gamma=1.0):
"""Compute the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Input array/matrix X.
Y : ndarray of shape (n_samples_Y, n_features), default=None
An optional second feature array. If `None`, uses `Y=X`.
gamma : float, default=1
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : ndarray of shape (n_samples_X, n_samples_Y)
Returns the exponential chi-squared kernel X and Y.
See Also
--------
additive_chi2_kernel : The additive version of this kernel.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://hal.archives-ouvertes.fr/hal-00171412/document
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
|
42,349 |
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get a role argument specification.
.. note:: Version added: 2.2
:param str role: Simple role name, or fully qualified collection role name, to query.
:param str collection: If specified, will be combined with the role name to form a fully qualified collection role name.
If this is supplied, the ``role`` param should not be fully qualified.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
def get_role_argspec(role, collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get a role argument specification.
.. note:: Version added: 2.2
:param str role: Simple role name, or fully qualified collection role name, to query.
:param str collection: If specified, will be combined with the role name to form a fully qualified collection role name.
If this is supplied, the ``role`` param should not be fully qualified.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception. If set to 'False', log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_argspec_command(role, collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
55,030 |
def pauli_mult_with_phase(pauli_1, pauli_2, wire_map=None):
r"""Multiply two Pauli words together including the global phase.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations. The phase is computed by
looking at the number of times we have the products XY, YZ, or ZX (adds a
phase of :math:`i`), or YX, ZY, XZ (adds a phase of :math:`-i`).
Args:
pauli_1 (.Operation): A Pauli word.
pauli_2 (.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
tuple[.Operation, complex]: The product of ``pauli_1`` and ``pauli_2``, and the
global phase.
**Example**
This function works the same as ``pauli_mult`` but also returns the global
phase accumulated as a result of the Pauli product rules
:math:`\sigma_i \sigma_j = i \sigma_k`.
>>> from pennylane.pauli import pauli_mult_with_phase
>>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
>>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
>>> product, phase = pauli_mult_with_phase(pauli_1, pauli_2)
>>> product
PauliZ(wires=[0])
>>> phase
1j
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Get the product; use our earlier function
pauli_product = pauli_mult(pauli_1, pauli_2, wire_map)
pauli_1_names = [pauli_1.name] if isinstance(pauli_1.name, str) else pauli_1.name
pauli_2_names = [pauli_2.name] if isinstance(pauli_2.name, str) else pauli_2.name
pauli_1_placeholder = 0
pauli_2_placeholder = 0
phase = 1
for wire in wire_map.keys():
if wire in pauli_1.wires:
pauli_1_op_name = pauli_1_names[pauli_1_placeholder]
pauli_1_placeholder += 1
else:
pauli_1_op_name = "Identity"
if wire in pauli_2.wires:
pauli_2_op_name = pauli_2_names[pauli_2_placeholder]
pauli_2_placeholder += 1
else:
pauli_2_op_name = "Identity"
# If we have identities anywhere we don't pick up a phase
if pauli_1_op_name == "Identity" or pauli_2_op_name == "Identity":
continue
# Likewise, no additional phase if the Paulis are the same
if pauli_1_op_name == pauli_2_op_name:
continue
# Use Pauli commutation rules to determine the phase
pauli_ordering = (pauli_1_op_name, pauli_2_op_name)
pos_phases = [("PauliX", "PauliY"), ("PauliY", "PauliZ"), ("PauliZ", "PauliX")]
if pauli_ordering in pos_phases:
phase *= 1j
else:
phase *= -1j
return pauli_product, phase
|
def pauli_mult_with_phase(pauli_1, pauli_2, wire_map=None):
r"""Multiply two Pauli words together including the global phase.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations. The phase is computed by
looking at the number of times we have the products XY, YZ, or ZX (adds a
phase of :math:`i`), or YX, ZY, XZ (adds a phase of :math:`-i`).
Args:
pauli_1 (.Operation): A Pauli word.
pauli_2 (.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
tuple[.Operation, complex]: The product of ``pauli_1`` and ``pauli_2``, and the
global phase.
**Example**
This function works the same as ``pauli_mult`` but also returns the global
phase accumulated as a result of the Pauli product rules
:math:`\sigma_i \sigma_j = i \sigma_k`.
>>> from pennylane.pauli import pauli_mult_with_phase
>>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
>>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
>>> product, phase = pauli_mult_with_phase(pauli_1, pauli_2)
>>> product
PauliZ(wires=[0])
>>> phase
1j
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Get the product; use our earlier function
pauli_product = pauli_mult(pauli_1, pauli_2, wire_map)
pauli_1_names = [pauli_1.name] if isinstance(pauli_1.name, str) else pauli_1.name
pauli_2_names = [pauli_2.name] if isinstance(pauli_2.name, str) else pauli_2.name
pauli_1_placeholder = 0
pauli_2_placeholder = 0
phase = 1
for wire in wire_map.keys():
if wire in pauli_1.wires:
pauli_1_op_name = pauli_1_names[pauli_1_placeholder]
pauli_1_placeholder += 1
else:
pauli_1_op_name = "Identity"
if wire in pauli_2.wires:
pauli_2_op_name = pauli_2_names[pauli_2_placeholder]
pauli_2_placeholder += 1
else:
pauli_2_op_name = "Identity"
# If we have identities anywhere we don't pick up a phase
if pauli_1_op_name == "Identity" or pauli_2_op_name == "Identity":
continue
# Likewise, no additional phase if the Paulis are the same
if pauli_1_op_name == pauli_2_op_name:
continue
# Use Pauli commutation rules to determine the phase
pauli_ordering = (pauli_1_op_name, pauli_2_op_name)
pos_phases = [("PauliX", "PauliY"), ("PauliY", "PauliZ"), ("PauliZ", "PauliX")]
if pauli_ordering in pos_phases:
phase *= 1j
else:
phase *= -1j
return pauli_product, phase
|
20,524 |
def get_parser():
# initialize default param
param_default = Param()
# Initialize the parser
parser = argparse.ArgumentParser(
description="Utility function for label image.",
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: tw_labels.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-add',
metavar=Metavar.int,
type=int,
help="Add value to all labels. Value can be negative."
)
optional.add_argument(
'-create',
metavar=Metavar.list,
type=list_type(':', Coordinate),
help=("Create labels in a new image. List labels as: x1,y1,z1,value1:x2,y2,z2,value2."
"Example: 12,34,32,1:12,35,33,2")
)
optional.add_argument(
'-create-add',
metavar=Metavar.list,
type=list_type(':', Coordinate),
help=("Same as '-create', but add labels to the input image instead of creating a new image."
"Example: 12,34,32,1:12,35,33,2")
)
optional.add_argument(
'-create-seg',
metavar=Metavar.list,
type=list_type(':', str),
help=("R|Create labels along cord segmentation (or centerline) defined by '-i'. First value is 'z', second is "
"the value of the label. Separate labels with ':'. Example: 5,1:14,2:23,3. \n"
"To select the mid-point in the superior-inferior direction, set z to '-1'. For example if you know that "
"C2-C3 disc is centered in the S-I direction, then enter: -1,3")
)
optional.add_argument(
'-create-viewer',
metavar=Metavar.list,
type=list_type(',', int),
help="Manually label from a GUI a list of labels IDs, separated with ','. Example: 2,3,4,5"
)
optional.add_argument(
'-ilabel',
metavar=Metavar.file,
help="File that contain labels that you want to correct. It is possible to add new points with this option. "
"Use with -create-viewer. Example: t2_labels_auto.nii.gz"
)
optional.add_argument(
'-cubic-to-point',
action="store_true",
help="Compute the center-of-mass for each label value."
)
optional.add_argument(
'-display',
action="store_true",
help="Display all labels (i.e. non-zero values)."
)
optional.add_argument(
'-increment',
action="store_true",
help=("Takes all non-zero values, sort them along the inverse z direction, and attributes the values "
"1, 2, 3, etc.")
)
optional.add_argument(
'-vert-body',
metavar=Metavar.list,
type=list_type(',', int),
help=("R|From vertebral labeling, create points that are centered at the mid-vertebral levels. Separate "
"desired levels with ','. Example: 3,8\n"
"To get all levels, enter '0'.")
)
optional.add_argument(
'-vert-continuous',
action="store_true",
help="Convert discrete vertebral labeling to continuous vertebral labeling.",
)
optional.add_argument(
'-MSE',
metavar=Metavar.file,
help="Compute Mean Square Error between labels from input and reference image. Specify reference image here."
)
optional.add_argument(
'-remove-reference',
metavar=Metavar.file,
help="Remove labels from input image (-i) that are not in reference image (specified here)."
)
optional.add_argument(
'-remove-sym',
metavar=Metavar.file,
help=("Remove labels from input image (-i) and reference image (specified here) that don't match. You must "
"provide two output names separated by ','.")
)
optional.add_argument(
'-remove',
metavar=Metavar.list,
type=list_type(',', int),
help="Remove labels of specific value (specified here) from reference image."
)
optional.add_argument(
'-keep',
metavar=Metavar.list,
type=list_type(',', int),
help="Keep labels of specific value (specified here) from reference image."
)
optional.add_argument(
'-msg',
metavar=Metavar.str,
help="Display a message to explain the labeling task. Use with -create-viewer"
)
optional.add_argument(
'-o',
metavar=Metavar.list,
type=list_type(',', str),
default="labels.nii.gz",
help="Output image(s). t2_labels_cross.nii.gz"
)
optional.add_argument(
'-v',
choices=['0', '1', '2'],
default=param_default.verbose,
help="Verbose. 0: nothing. 1: basic. 2: extended."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
|
def get_parser():
# initialize default param
param_default = Param()
# Initialize the parser
parser = argparse.ArgumentParser(
description="Utility function for label images.",
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: tw_labels.nii.gz"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-add',
metavar=Metavar.int,
type=int,
help="Add value to all labels. Value can be negative."
)
optional.add_argument(
'-create',
metavar=Metavar.list,
type=list_type(':', Coordinate),
help=("Create labels in a new image. List labels as: x1,y1,z1,value1:x2,y2,z2,value2."
"Example: 12,34,32,1:12,35,33,2")
)
optional.add_argument(
'-create-add',
metavar=Metavar.list,
type=list_type(':', Coordinate),
help=("Same as '-create', but add labels to the input image instead of creating a new image."
"Example: 12,34,32,1:12,35,33,2")
)
optional.add_argument(
'-create-seg',
metavar=Metavar.list,
type=list_type(':', str),
help=("R|Create labels along cord segmentation (or centerline) defined by '-i'. First value is 'z', second is "
"the value of the label. Separate labels with ':'. Example: 5,1:14,2:23,3. \n"
"To select the mid-point in the superior-inferior direction, set z to '-1'. For example if you know that "
"C2-C3 disc is centered in the S-I direction, then enter: -1,3")
)
optional.add_argument(
'-create-viewer',
metavar=Metavar.list,
type=list_type(',', int),
help="Manually label from a GUI a list of labels IDs, separated with ','. Example: 2,3,4,5"
)
optional.add_argument(
'-ilabel',
metavar=Metavar.file,
help="File that contain labels that you want to correct. It is possible to add new points with this option. "
"Use with -create-viewer. Example: t2_labels_auto.nii.gz"
)
optional.add_argument(
'-cubic-to-point',
action="store_true",
help="Compute the center-of-mass for each label value."
)
optional.add_argument(
'-display',
action="store_true",
help="Display all labels (i.e. non-zero values)."
)
optional.add_argument(
'-increment',
action="store_true",
help=("Takes all non-zero values, sort them along the inverse z direction, and attributes the values "
"1, 2, 3, etc.")
)
optional.add_argument(
'-vert-body',
metavar=Metavar.list,
type=list_type(',', int),
help=("R|From vertebral labeling, create points that are centered at the mid-vertebral levels. Separate "
"desired levels with ','. Example: 3,8\n"
"To get all levels, enter '0'.")
)
optional.add_argument(
'-vert-continuous',
action="store_true",
help="Convert discrete vertebral labeling to continuous vertebral labeling.",
)
optional.add_argument(
'-MSE',
metavar=Metavar.file,
help="Compute Mean Square Error between labels from input and reference image. Specify reference image here."
)
optional.add_argument(
'-remove-reference',
metavar=Metavar.file,
help="Remove labels from input image (-i) that are not in reference image (specified here)."
)
optional.add_argument(
'-remove-sym',
metavar=Metavar.file,
help=("Remove labels from input image (-i) and reference image (specified here) that don't match. You must "
"provide two output names separated by ','.")
)
optional.add_argument(
'-remove',
metavar=Metavar.list,
type=list_type(',', int),
help="Remove labels of specific value (specified here) from reference image."
)
optional.add_argument(
'-keep',
metavar=Metavar.list,
type=list_type(',', int),
help="Keep labels of specific value (specified here) from reference image."
)
optional.add_argument(
'-msg',
metavar=Metavar.str,
help="Display a message to explain the labeling task. Use with -create-viewer"
)
optional.add_argument(
'-o',
metavar=Metavar.list,
type=list_type(',', str),
default="labels.nii.gz",
help="Output image(s). t2_labels_cross.nii.gz"
)
optional.add_argument(
'-v',
choices=['0', '1', '2'],
default=param_default.verbose,
help="Verbose. 0: nothing. 1: basic. 2: extended."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
return parser
|
48,790 |
def chain(*tasks: Union[BaseOperator, "XComArg", Sequence[BaseOperator], Sequence["XComArg"]]):
r"""
Given a number of tasks, builds a dependency chain.
Support mix airflow.models.BaseOperator, List[airflow.models.BaseOperator], XComArg, and
List[airflow.models.XComArg]. If you want to chain between two List[airflow.models.BaseOperator]
or List[airflow.models.XComArg], you have to make sure they have same length.
.. code-block:: python
chain(t1, [t2, t3], [t4, t5], t6)
is equivalent to::
/ -> t2 -> t4 \
t1 -> t6
\ -> t3 -> t5 /
.. code-block:: python
t1.set_downstream(t2)
t1.set_downstream(t3)
t2.set_downstream(t4)
t3.set_downstream(t5)
t4.set_downstream(t6)
t5.set_downstream(t6)
:param tasks: List of tasks, List[airflow.models.BaseOperator], XComArg, or List[airflow.models.XComArg]
to set dependencies
:type tasks: List[airflow.models.BaseOperator], airflow.models.BaseOperator, List[airflow.models.XComArg],
or XComArg
"""
from airflow.models.xcom_arg import XComArg
for index, up_task in enumerate(tasks[:-1]):
down_task = tasks[index + 1]
if isinstance(up_task, (BaseOperator, XComArg)):
up_task.set_downstream(down_task)
continue
if isinstance(down_task, (BaseOperator, XComArg)):
down_task.set_upstream(up_task)
continue
if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence):
raise TypeError(
"Chain not supported between instances of {up_type} and {down_type}".format(
up_type=type(up_task), down_type=type(down_task)
)
)
up_task_list = up_task
down_task_list = down_task
if len(up_task_list) != len(down_task_list):
raise AirflowException(
f"Chain not supported different length Iterable "
f"but get {len(up_task_list)} and {len(down_task_list)}"
)
for up_t, down_t in zip(up_task_list, down_task_list):
up_t.set_downstream(down_t)
|
def chain(*tasks: Union[BaseOperator, "XComArg", Sequence[BaseOperator], Sequence["XComArg"]]):
r"""
Given a number of tasks, builds a dependency chain.
Support mix airflow.models.BaseOperator, List[airflow.models.BaseOperator], XComArg, and
List[airflow.models.XComArg]. If you want to chain between two List[airflow.models.BaseOperator]
or List[airflow.models.XComArg], you have to make sure they have same length.
.. code-block:: python
chain(t1, [t2, t3], [t4, t5], t6)
is equivalent to::
/ -> t2 -> t4 \
t1 -> t6
\ -> t3 -> t5 /
.. code-block:: python
t1.set_downstream(t2)
t1.set_downstream(t3)
t2.set_downstream(t4)
t3.set_downstream(t5)
t4.set_downstream(t6)
t5.set_downstream(t6)
:param tasks: List of tasks, List[airflow.models.BaseOperator], XComArg, or List[airflow.models.XComArg]
to set dependencies
:type tasks: List[airflow.models.BaseOperator], airflow.models.BaseOperator, List[airflow.models.XComArg],
or XComArg
"""
from airflow.models.xcom_arg import XComArg
for index, up_task in enumerate(tasks[:-1]):
down_task = tasks[index + 1]
if isinstance(up_task, (BaseOperator, XComArg)):
up_task.set_downstream(down_task)
continue
if isinstance(down_task, (BaseOperator, XComArg)):
down_task.set_upstream(up_task)
continue
if not isinstance(up_task, Sequence) or not isinstance(down_task, Sequence):
raise TypeError(
"Chain not supported between instances of {up_type} and {down_type}".format(
up_type=type(up_task), down_type=type(down_task)
)
)
up_task_list = up_task
down_task_list = down_task
if len(up_task_list) != len(down_task_list):
raise AirflowException(
f'Chain not supported different length Iterable '
f'but get {len(up_task_list)} and {len(down_task_list)}'
)
for up_t, down_t in zip(up_task_list, down_task_list):
up_t.set_downstream(down_t)
|
39,189 |
def kenlm_lexicon_decoder(
lexicon: str,
tokens: Union[str, List[str]],
kenlm: str,
nbest: int = 1,
beam_size: int = 50,
beam_size_token: Optional[int] = None,
beam_threshold: float = 50,
lm_weight: float = 2,
word_score: float = 0,
unk_score: float = float("-inf"),
sil_score: float = 0,
log_add: bool = False,
blank_token: str = "-",
sil_token: str = "|",
unk_word: str = "<unk>",
) -> LexiconDecoder:
"""
Builds Ken LM CTC Lexicon Decoder with given parameters
Args:
lexicon (str): lexicon file containing the possible words and corresponding spellings.
Each line consists of a word and its space separated spelling
tokens (str or List[str]): file or list containing valid tokens. If using a file, the expected
format is for tokens mapping to the same index to be on the same line
kenlm (str): file containing languge model, or empty string if not using a language model
nbest (int, optional): number of best decodings to return (Default: 1)
beam_size (int, optional): max number of hypos to hold after each decode step (Default: 50)
beam_size_token (int, optional): max number of tokens to consider at each decode step.
If None, it is set to the total number of tokens (Default: None)
beam_threshold (float, optional): threshold for pruning hypothesis (Default: 50)
lm_weight (float, optional): weight of language model (Default: 2)
word_score (float, optional): word insertion score (Default: 0)
unk_score (float, optional): unknown word insertion score (Default: -inf)
sil_score (float, optional): silence insertion score (Default: 0)
log_add (bool, optional): whether or not to use logadd when merging hypotheses (Default: False)
blank_token (str, optional): token corresponding to blank (Default: "-")
sil_token (str, optional): token corresponding to silence (Default: "|")
unk_word (str, optional): word corresponding to unknown (Default: "<unk>")
Returns:
LexiconDecoder: decoder
Example
>>> decoder = kenlm_lexicon_decoder(
>>> lexicon="lexicon.txt",
>>> tokens="tokens.txt",
>>> kenlm="kenlm.bin",
>>> )
>>> results = decoder(emissions) # List of shape (B, nbest) of Hypotheses
"""
lexicon = _load_words(lexicon)
word_dict = _create_word_dict(lexicon)
lm = _KenLM(kenlm, word_dict) if kenlm != "" else _ZeroLM()
tokens_dict = _Dictionary(tokens)
decoder_options = _LexiconDecoderOptions(
beam_size=beam_size,
beam_size_token=beam_size_token or tokens_dict.index_size(),
beam_threshold=beam_threshold,
lm_weight=lm_weight,
word_score=word_score,
unk_score=unk_score,
sil_score=sil_score,
log_add=log_add,
criterion_type=_CriterionType.CTC,
)
return LexiconDecoder(
nbest=nbest,
lexicon=lexicon,
word_dict=word_dict,
tokens_dict=tokens_dict,
lm=lm,
decoder_options=decoder_options,
blank_token=blank_token,
sil_token=sil_token,
unk_word=unk_word,
)
|
def kenlm_lexicon_decoder(
lexicon: str,
tokens: Union[str, List[str]],
kenlm: str,
nbest: int = 1,
beam_size: int = 50,
beam_size_token: Optional[int] = None,
beam_threshold: float = 50,
lm_weight: float = 2,
word_score: float = 0,
unk_score: float = float("-inf"),
sil_score: float = 0,
log_add: bool = False,
blank_token: str = "-",
sil_token: str = "|",
unk_word: str = "<unk>",
) -> LexiconDecoder:
"""
Builds Ken LM CTC Lexicon Decoder with given parameters
Args:
lexicon (str): lexicon file containing the possible words and corresponding spellings.
Each line consists of a word and its space separated spelling
tokens (str or List[str]): file or list containing valid tokens. If using a file, the expected
format is for tokens mapping to the same index to be on the same line
kenlm (str): file containing language model, or empty string if not using a language model
nbest (int, optional): number of best decodings to return (Default: 1)
beam_size (int, optional): max number of hypos to hold after each decode step (Default: 50)
beam_size_token (int, optional): max number of tokens to consider at each decode step.
If None, it is set to the total number of tokens (Default: None)
beam_threshold (float, optional): threshold for pruning hypothesis (Default: 50)
lm_weight (float, optional): weight of language model (Default: 2)
word_score (float, optional): word insertion score (Default: 0)
unk_score (float, optional): unknown word insertion score (Default: -inf)
sil_score (float, optional): silence insertion score (Default: 0)
log_add (bool, optional): whether or not to use logadd when merging hypotheses (Default: False)
blank_token (str, optional): token corresponding to blank (Default: "-")
sil_token (str, optional): token corresponding to silence (Default: "|")
unk_word (str, optional): word corresponding to unknown (Default: "<unk>")
Returns:
LexiconDecoder: decoder
Example
>>> decoder = kenlm_lexicon_decoder(
>>> lexicon="lexicon.txt",
>>> tokens="tokens.txt",
>>> kenlm="kenlm.bin",
>>> )
>>> results = decoder(emissions) # List of shape (B, nbest) of Hypotheses
"""
lexicon = _load_words(lexicon)
word_dict = _create_word_dict(lexicon)
lm = _KenLM(kenlm, word_dict) if kenlm != "" else _ZeroLM()
tokens_dict = _Dictionary(tokens)
decoder_options = _LexiconDecoderOptions(
beam_size=beam_size,
beam_size_token=beam_size_token or tokens_dict.index_size(),
beam_threshold=beam_threshold,
lm_weight=lm_weight,
word_score=word_score,
unk_score=unk_score,
sil_score=sil_score,
log_add=log_add,
criterion_type=_CriterionType.CTC,
)
return LexiconDecoder(
nbest=nbest,
lexicon=lexicon,
word_dict=word_dict,
tokens_dict=tokens_dict,
lm=lm,
decoder_options=decoder_options,
blank_token=blank_token,
sil_token=sil_token,
unk_word=unk_word,
)
|
22,747 |
def _build_snap(target, archs, status, lock):
status[target] = {arch: '...' for arch in archs}
if target == 'certbot':
workspace = CERTBOT_DIR
else:
workspace = join(CERTBOT_DIR, target)
subprocess.check_output(
('"{0}" tools/strip_hashes.py letsencrypt-auto-source/pieces/dependency-requirements.txt '
'| grep -v python-augeas > "{1}/snap-constraints.txt"').format(sys.executable, workspace),
shell=True, cwd=CERTBOT_DIR)
retry = 3
while retry:
exit_code, process_output = _execute_build(target, archs, status, workspace)
print(f'Build {target} for {",".join(archs)} (attempt {4-retry}/3) ended with exit code {exit_code}.')
sys.stdout.flush()
with lock:
failed_archs = [arch for arch in archs if status[target][arch] == 'Failed to build']
if exit_code == 0 and not failed_archs:
# We expect to have all target snaps available, or something bad happened.
snaps_list = glob.glob(join(workspace, '*.snap'))
if not len(snaps_list) == len(archs):
print(f'Some of the expected snaps for a successful build are missing (current list: {snaps_list}).')
print('Dumping snapcraft remote-build output build:')
print('\n'.join(process_output))
else:
break
if failed_archs:
# We expect for each failed builds to have a build output, or something bad happened.
missing_outputs = False
for arch in failed_archs:
if not exists(join(workspace, f'{target}_{failed_archs}.txt')):
missing_outputs = True
print(f'Missing output on a failed build {target} for {arch}.')
if missing_outputs:
print('Dumping snapcraft remote-build output build:')
print('\n'.join(process_output))
# Retry the remote build if it has been interrupted (non zero status code) or if some builds have failed.
retry = retry - 1
return {target: workspace}
|
def _build_snap(target, archs, status, lock):
status[target] = {arch: '...' for arch in archs}
if target == 'certbot':
workspace = CERTBOT_DIR
else:
workspace = join(CERTBOT_DIR, target)
subprocess.check_output(
('"{0}" tools/strip_hashes.py letsencrypt-auto-source/pieces/dependency-requirements.txt '
'| grep -v python-augeas > "{1}/snap-constraints.txt"').format(sys.executable, workspace),
shell=True, cwd=CERTBOT_DIR)
retry = 3
while retry:
exit_code, process_output = _execute_build(target, archs, status, workspace)
print(f'Build {target} for {",".join(archs)} (attempt {4-retry}/3) ended with exit code {exit_code}.')
sys.stdout.flush()
with lock:
failed_archs = [arch for arch in archs if status[target][arch] == 'Failed to build']
if exit_code == 0 and not failed_archs:
# We expect to have all target snaps available, or something bad happened.
snaps_list = glob.glob(join(workspace, '*.snap'))
if not len(snaps_list) == len(archs):
print(f'Some of the expected snaps for a successful build are missing (current list: {snaps_list}).')
print('Dumping snapcraft remote-build output build:')
print('\n'.join(process_output))
else:
break
if failed_archs:
# We expect each failed build to have a log file, or something bad happened.
missing_outputs = False
for arch in failed_archs:
if not exists(join(workspace, f'{target}_{failed_archs}.txt')):
missing_outputs = True
print(f'Missing output on a failed build {target} for {arch}.')
if missing_outputs:
print('Dumping snapcraft remote-build output build:')
print('\n'.join(process_output))
# Retry the remote build if it has been interrupted (non zero status code) or if some builds have failed.
retry = retry - 1
return {target: workspace}
|
13,147 |
def test_merge_request_get(project):
mr = project.mergerequests.list()[0]
mr_iid = mr.iid
mr = project.mergerequests.get(mr_iid)
assert mr.iid == mr_iid
# Make sure a 'lazy' get works
mr = project.mergerequests.get(mr_iid, lazy=True)
assert mr.iid == mr_iid
|
def test_merge_request_get_lazy_returns_mr_iid(project):
mr = project.mergerequests.list()[0]
mr_iid = mr.iid
mr = project.mergerequests.get(mr_iid)
assert mr.iid == mr_iid
# Make sure a 'lazy' get works
mr = project.mergerequests.get(mr_iid, lazy=True)
assert mr.iid == mr_iid
|
57,158 |
def send_mail(
sender_email: str, recipient_email: str, subject: str,
plaintext_body: str, html_body: str, bcc_admin: Optional[bool] = False
) -> None:
"""Sends an email.
In general this function should only be called from
email_manager._send_email().
Args:
sender_email: str. The email address of the sender. This should be in
the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Format must be utf-8.
recipient_email: str. The email address of the recipient. Format must
be utf-8.
subject: str. The subject line of the email. Format must be utf-8.
plaintext_body: str. The plaintext body of the email. Format must be
utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Format must be utf-8.
bcc_admin: bool. Whether to bcc feconf.ADMIN_EMAIL_ADDRESS on the email.
Raises:
Exception. The configuration in feconf.py forbids emails from being
sent.
ValueError. Any recipient email address is malformed.
ValueError. Any sender email address is malformed.
Exception. The email was not sent correctly. In other words, the
send_email_to_recipients() function returned False
(signifying API returned bad status code).
"""
if not feconf.CAN_SEND_EMAILS:
raise Exception('This app cannot send emails to users.')
if not _is_email_valid(recipient_email):
raise ValueError(
'Malformed recipient email address: %s' % recipient_email)
if not _is_sender_email_valid(sender_email):
raise ValueError(
'Malformed sender email address: %s' % sender_email)
bcc = [feconf.ADMIN_EMAIL_ADDRESS] if bcc_admin else None
response = email_services.send_email_to_recipients(
sender_email, [recipient_email], subject,
plaintext_body, html_body, bcc, '', None)
if not response:
raise Exception((
'Email to %s failed to send. Please try again later or ' +
'contact us to report a bug at ' +
'https://www.oppia.org/contact.') % recipient_email)
|
def send_mail(
sender_email: str,
recipient_email: str,
subject: str,
plaintext_body: str,
html_body: str,
bcc_admin: Optional[bool] = False
) -> None:
"""Sends an email.
In general this function should only be called from
email_manager._send_email().
Args:
sender_email: str. The email address of the sender. This should be in
the form 'SENDER_NAME <SENDER_EMAIL_ADDRESS>' or
'SENDER_EMAIL_ADDRESS'. Format must be utf-8.
recipient_email: str. The email address of the recipient. Format must
be utf-8.
subject: str. The subject line of the email. Format must be utf-8.
plaintext_body: str. The plaintext body of the email. Format must be
utf-8.
html_body: str. The HTML body of the email. Must fit in a datastore
entity. Format must be utf-8.
bcc_admin: bool. Whether to bcc feconf.ADMIN_EMAIL_ADDRESS on the email.
Raises:
Exception. The configuration in feconf.py forbids emails from being
sent.
ValueError. Any recipient email address is malformed.
ValueError. Any sender email address is malformed.
Exception. The email was not sent correctly. In other words, the
send_email_to_recipients() function returned False
(signifying API returned bad status code).
"""
if not feconf.CAN_SEND_EMAILS:
raise Exception('This app cannot send emails to users.')
if not _is_email_valid(recipient_email):
raise ValueError(
'Malformed recipient email address: %s' % recipient_email)
if not _is_sender_email_valid(sender_email):
raise ValueError(
'Malformed sender email address: %s' % sender_email)
bcc = [feconf.ADMIN_EMAIL_ADDRESS] if bcc_admin else None
response = email_services.send_email_to_recipients(
sender_email, [recipient_email], subject,
plaintext_body, html_body, bcc, '', None)
if not response:
raise Exception((
'Email to %s failed to send. Please try again later or ' +
'contact us to report a bug at ' +
'https://www.oppia.org/contact.') % recipient_email)
|
20,295 |
def detect_cpu_family(compilers: CompilersDict) -> str:
"""
Python is inconsistent in its platform module.
It returns different values for the same cpu.
For x86 it might return 'x86', 'i686' or somesuch.
Do some canonicalization.
"""
if mesonlib.is_windows():
trial = detect_windows_arch(compilers)
elif mesonlib.is_freebsd() or mesonlib.is_netbsd() or mesonlib.is_openbsd() or mesonlib.is_qnx() or mesonlib.is_aix():
trial = platform.processor().lower()
else:
trial = platform.machine().lower()
if trial.startswith('i') and trial.endswith('86'):
trial = 'x86'
elif trial == 'bepc':
trial = 'x86'
elif trial == 'arm64':
trial = 'aarch64'
elif trial.startswith('arm') or trial.startswith('earm'):
trial = 'arm'
elif trial.startswith(('powerpc64', 'ppc64')):
trial = 'ppc64'
elif trial.startswith(('powerpc', 'ppc')) or trial in {'macppc', 'power macintosh'}:
trial = 'ppc'
elif trial in ('amd64', 'x64', 'i86pc'):
trial = 'x86_64'
elif trial in {'sun4u', 'sun4v'}:
trial = 'sparc64'
elif trial.startswith('mips'):
if not '64' in trial:
trial = 'mips'
else:
trial = 'mips64'
elif trial in {'ip30', 'ip35'}:
trial = 'mips64'
# On Linux (and maybe others) there can be any mixture of 32/64 bit code in
# the kernel, Python, system, 32-bit chroot on 64-bit host, etc. The only
# reliable way to know is to check the compiler defines.
if trial == 'x86_64':
if any_compiler_has_define(compilers, '__i386__'):
trial = 'x86'
elif trial == 'aarch64':
if any_compiler_has_define(compilers, '__arm__'):
trial = 'arm'
# Add more quirks here as bugs are reported. Keep in sync with detect_cpu()
# below.
elif trial == 'parisc64':
# ATM there is no 64 bit userland for PA-RISC. Thus always
# report it as 32 bit for simplicity.
trial = 'parisc'
elif trial == 'ppc':
# AIX always returns powerpc, check here for 64-bit
if any_compiler_has_define(compilers, '__64BIT__'):
trial = 'ppc64'
if trial not in known_cpu_families:
mlog.warning('Unknown CPU family {!r}, please report this at '
'https://github.com/mesonbuild/meson/issues/new with the '
'output of `uname -a` and `cat /proc/cpuinfo`'.format(trial))
return trial
|
def detect_cpu_family(compilers: CompilersDict) -> str:
"""
Python is inconsistent in its platform module.
It returns different values for the same cpu.
For x86 it might return 'x86', 'i686' or somesuch.
Do some canonicalization.
"""
if mesonlib.is_windows():
trial = detect_windows_arch(compilers)
elif mesonlib.is_freebsd() or mesonlib.is_netbsd() or mesonlib.is_openbsd() or mesonlib.is_qnx() or mesonlib.is_aix():
trial = platform.processor().lower()
else:
trial = platform.machine().lower()
if trial.startswith('i') and trial.endswith('86'):
trial = 'x86'
elif trial == 'bepc':
trial = 'x86'
elif trial == 'arm64':
trial = 'aarch64'
elif trial.startswith('arm') or trial.startswith('earm'):
trial = 'arm'
elif trial.startswith(('powerpc64', 'ppc64')):
trial = 'ppc64'
elif trial.startswith(('powerpc', 'ppc')) or trial in {'macppc', 'power macintosh'}:
trial = 'ppc'
elif trial in ('amd64', 'x64', 'i86pc'):
trial = 'x86_64'
elif trial in {'sun4u', 'sun4v'}:
trial = 'sparc64'
elif trial.startswith('mips'):
if '64' not in trial:
trial = 'mips'
else:
trial = 'mips64'
elif trial in {'ip30', 'ip35'}:
trial = 'mips64'
# On Linux (and maybe others) there can be any mixture of 32/64 bit code in
# the kernel, Python, system, 32-bit chroot on 64-bit host, etc. The only
# reliable way to know is to check the compiler defines.
if trial == 'x86_64':
if any_compiler_has_define(compilers, '__i386__'):
trial = 'x86'
elif trial == 'aarch64':
if any_compiler_has_define(compilers, '__arm__'):
trial = 'arm'
# Add more quirks here as bugs are reported. Keep in sync with detect_cpu()
# below.
elif trial == 'parisc64':
# ATM there is no 64 bit userland for PA-RISC. Thus always
# report it as 32 bit for simplicity.
trial = 'parisc'
elif trial == 'ppc':
# AIX always returns powerpc, check here for 64-bit
if any_compiler_has_define(compilers, '__64BIT__'):
trial = 'ppc64'
if trial not in known_cpu_families:
mlog.warning('Unknown CPU family {!r}, please report this at '
'https://github.com/mesonbuild/meson/issues/new with the '
'output of `uname -a` and `cat /proc/cpuinfo`'.format(trial))
return trial
|
8,772 |
def is_triggerable(obj):
"""Check if ``obj`` can handle the bot's triggers.
:param obj: any :term:`function` to check.
:return: ``True`` if ``obj`` can handle the bot's triggers.
A triggerable is a callable that will be used by the bot to handle a
particular trigger (i.e. an IRC message): it can be a regex rule, an event,
an intent, a command or nickname command. However, it must not be a job
or an URL callback.
.. seealso::
The :mod:`sopel.module` defines decorators to make a function a
triggerable object.
"""
forbidden = (
'interval',
'url_regex',
)
must_not = not any(hasattr(obj, attr) for attr in forbidden)
allowed = (
'rule',
'event',
'intents',
'commands',
'nickname_commands',
)
return must_not and any(hasattr(obj, attr) for attr in allowed)
|
def is_triggerable(obj):
"""Check if ``obj`` can handle the bot's triggers.
:param obj: any :term:`function` to check.
:return: ``True`` if ``obj`` can handle the bot's triggers.
A triggerable is a callable that will be used by the bot to handle a
particular trigger (i.e. an IRC message): it can be a regex rule, an event,
an intent, a command, or a nickname command. However, it must not be a job
or an URL callback.
.. seealso::
The :mod:`sopel.module` defines decorators to make a function a
triggerable object.
"""
forbidden = (
'interval',
'url_regex',
)
must_not = not any(hasattr(obj, attr) for attr in forbidden)
allowed = (
'rule',
'event',
'intents',
'commands',
'nickname_commands',
)
return must_not and any(hasattr(obj, attr) for attr in allowed)
|
58,838 |
def _exec_fft(a, direction, value_type, norm, axis, overwrite_x,
out_size=None, out=None, plan=None):
fft_type = _convert_fft_type(a.dtype, value_type)
if axis % a.ndim != a.ndim - 1:
a = a.swapaxes(axis, -1)
if a.base is not None or not a.flags.c_contiguous:
a = a.copy()
elif (value_type == 'C2R' and not overwrite_x and
10010 <= cupy.cuda.runtime.runtimeGetVersion()):
# The input array may be modified in CUDA 10.1 and above.
# See #3763 for the discussion.
a = a.copy()
elif cupy.cuda.runtime.is_hip and value_type != 'C2C':
# hipFFT's R2C would overwrite input
# hipFFT's C2R needs a workaround (see below)
a = a.copy()
n = a.shape[-1]
if n < 1:
raise ValueError(
'Invalid number of FFT data points (%d) specified.' % n)
# Workaround for hipFFT/rocFFT:
# Both cuFFT and hipFFT/rocFFT have this requirement that 0-th and
# N/2-th element must be real, but cuFFT internally simply ignores it
# while hipFFT handles it badly in both Plan1d and PlanNd, so we must
# do the correction ourselves to ensure the condition is met.
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
a[..., 0].imag = 0
if out_size is None:
a[..., -1].imag = 0
elif out_size % 2 == 0:
a[..., out_size // 2].imag = 0
if out_size is None:
out_size = n
batch = a.size // n
# plan search precedence:
# 1. plan passed in as an argument
# 2. plan as context manager
# 3. cached plan
# 4. create a new one
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
if plan is None:
plan = curr_plan
else:
raise RuntimeError('Use the cuFFT plan either as a context manager'
' or as an argument.')
if plan is None:
devices = None if not config.use_multi_gpus else config._devices
# TODO(leofang): do we need to add the current stream to keys?
keys = (out_size, fft_type, batch, devices)
mgr = config.get_current_callback_manager()
if mgr is not None:
# to avoid a weird segfault, we generate and cache distinct plans
# for every possible (load_aux, store_aux) pairs; the plans are
# still generated from the same external Python module
load_aux = mgr.cb_load_aux_arr
store_aux = mgr.cb_store_aux_arr
keys += (mgr.cb_load, mgr.cb_store,
0 if load_aux is None else load_aux.data.ptr,
0 if store_aux is None else store_aux.data.ptr)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
elif mgr is None:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
cache[keys] = plan
else: # has callback
# TODO(leofang): support multi-GPU callback (devices is ignored)
if devices:
raise NotImplementedError('multi-GPU cuFFT callbacks are not '
'yet supported')
plan = mgr.create_plan(('Plan1d', keys[:-5]))
mgr.set_callbacks(plan)
cache[keys] = plan
else:
# check plan validity
if not isinstance(plan, cufft.Plan1d):
raise ValueError('expected plan to have type cufft.Plan1d')
if fft_type != plan.fft_type:
raise ValueError('cuFFT plan dtype mismatch.')
if out_size != plan.nx:
raise ValueError('Target array size does not match the plan.',
out_size, plan.nx)
if batch != plan.batch:
raise ValueError('Batch size does not match the plan.')
if config.use_multi_gpus != (plan.gpus is not None):
raise ValueError('Unclear if multiple GPUs are to be used or not.')
if overwrite_x and value_type == 'C2C':
out = a
elif out is not None:
# verify that out has the expected shape and dtype
plan.check_output_array(a, out)
else:
out = plan.get_output_array(a)
if batch != 0:
plan.fft(a, out, direction)
sz = out.shape[-1]
if fft_type == cufft.CUFFT_R2C or fft_type == cufft.CUFFT_D2Z:
sz = n
if norm == 'backward':
if direction == cufft.CUFFT_INVERSE:
out /= sz
elif norm == 'ortho':
out /= math.sqrt(sz)
elif norm == 'forward':
if direction == cufft.CUFFT_FORWARD:
out /= sz
if axis % a.ndim != a.ndim - 1:
out = out.swapaxes(axis, -1)
return out
|
def _exec_fft(a, direction, value_type, norm, axis, overwrite_x,
out_size=None, out=None, plan=None):
fft_type = _convert_fft_type(a.dtype, value_type)
if axis % a.ndim != a.ndim - 1:
a = a.swapaxes(axis, -1)
if a.base is not None or not a.flags.c_contiguous:
a = a.copy()
elif (value_type == 'C2R' and not overwrite_x and
10010 <= cupy.cuda.runtime.runtimeGetVersion()):
# The input array may be modified in CUDA 10.1 and above.
# See #3763 for the discussion.
a = a.copy()
elif cupy.cuda.runtime.is_hip and value_type != 'C2C':
# hipFFT's R2C would overwrite input
# hipFFT's C2R needs a workaround (see below)
a = a.copy()
n = a.shape[-1]
if n < 1:
raise ValueError(
'Invalid number of FFT data points (%d) specified.' % n)
# Workaround for hipFFT/rocFFT:
# Both cuFFT and hipFFT/rocFFT have this requirement that 0-th and
# N/2-th element must be real, but cuFFT internally simply ignores it
# while hipFFT handles it badly in both Plan1d and PlanNd, so we must
# do the correction ourselves to ensure the condition is met.
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
a[..., 0].imag = 0
if out_size is None:
a[..., -1].imag = 0
elif out_size % 2 == 0:
a[..., out_size // 2].imag = 0
if out_size is None:
out_size = n
batch = a.size // n
# plan search precedence:
# 1. plan passed in as an argument
# 2. plan as context manager
# 3. cached plan
# 4. create a new one
curr_plan = cufft.get_current_plan()
if curr_plan is not None:
if plan is None:
plan = curr_plan
else:
raise RuntimeError('Use the cuFFT plan either as a context manager'
' or as an argument.')
if plan is None:
devices = None if not config.use_multi_gpus else config._devices
# TODO(leofang): do we need to add the current stream to keys?
keys = (out_size, fft_type, batch, devices)
mgr = config.get_current_callback_manager()
if mgr is not None:
# to avoid a weird segfault, we generate and cache distinct plans
# for every possible (load_aux, store_aux) pairs; the plans are
# still generated from the same external Python module
load_aux = mgr.cb_load_aux_arr
store_aux = mgr.cb_store_aux_arr
keys += (mgr.cb_load, mgr.cb_store,
0 if load_aux is None else load_aux.data.ptr,
0 if store_aux is None else store_aux.data.ptr)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
elif mgr is None:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
cache[keys] = plan
else: # has callback
# TODO(leofang): support multi-GPU callback (devices is ignored)
if devices:
raise NotImplementedError('multi-GPU cuFFT callbacks are not '
'yet supported')
plan = mgr.create_plan(('Plan1d', keys[:-5]))
mgr.set_callbacks(plan)
cache[keys] = plan
else:
# check plan validity
if not isinstance(plan, cufft.Plan1d):
raise ValueError('expected plan to have type cufft.Plan1d')
if fft_type != plan.fft_type:
raise ValueError('cuFFT plan dtype mismatch.')
if out_size != plan.nx:
raise ValueError('Target array size does not match the plan.',
out_size, plan.nx)
if batch != plan.batch:
raise ValueError('Batch size does not match the plan.')
if config.use_multi_gpus != (plan.gpus is not None):
raise ValueError('Unclear if multiple GPUs are to be used or not.')
if overwrite_x and value_type == 'C2C':
out = a
elif out is not None:
# verify that out has the expected shape and dtype
plan.check_output_array(a, out)
else:
out = plan.get_output_array(a)
if batch != 0:
plan.fft(a, out, direction)
sz = out.shape[-1]
if fft_type == cufft.CUFFT_R2C or fft_type == cufft.CUFFT_D2Z:
sz = n
if norm == 'backward':
if direction == cufft.CUFFT_INVERSE:
out /= sz
elif norm == 'ortho':
out /= math.sqrt(sz)
elif norm == 'forward' and direction == cufft.CUFFT_FORWARD:
out /= sz
if axis % a.ndim != a.ndim - 1:
out = out.swapaxes(axis, -1)
return out
|
22,096 |
def rec_iter(
filenames: List[str],
sensor: Optional[str],
ignore_rules: Dict[str, Dict[str, List[Tuple[int, int]]]],
) -> Generator[Record, None, None]:
ignorenets = ignore_rules.get("IGNORENETS", {})
neverignore = ignore_rules.get("NEVERIGNORE", {})
for fname in filenames:
with P0fFile(fname) as fdesc:
for line in fdesc:
if not line:
continue
if "mod" not in line:
LOGGER.warning("no mod detected [%r]", line)
continue
if line["mod"] not in ["syn", "syn+ack"]:
continue
if "subj" not in line or line["subj"] not in line:
LOGGER.warning("no subj detected [%r]", line)
continue
if "raw_sig" not in line:
LOGGER.warning("no raw_sig detected [%r]", line)
continue
infos = {}
if "os" in line and line["os"] != "???":
infos["os"] = line["os"]
if "dist" in line:
infos["dist"] = line["dist"]
if "params" in line and line["params"].lower() != "none":
infos["params"] = line["params"]
host = line[line["subj"]].split("/")[0]
srvport = int(line["srv"].split("/")[1])
for rec in handle_rec(
# sensor
sensor,
# ignorenets,
ignorenets,
# neverignore,
neverignore,
# timestamp
timestamp=line["ts"],
# uid
uid=None,
# host
host=host,
# srvport
srvport=srvport,
# recon_type
recon_type="P0FV3_%s" % line["mod"].upper(),
# source
source="P0FV3",
# value
value=line["raw_sig"],
# targetval
targetval=None,
):
rec[1]["infos"] = infos
yield rec
|
def rec_iter(
filenames: List[str],
sensor: Optional[str],
ignore_rules: Dict[str, Dict[str, List[Tuple[int, int]]]],
) -> Generator[Record, None, None]:
ignorenets = ignore_rules.get("IGNORENETS", {})
neverignore = ignore_rules.get("NEVERIGNORE", {})
for fname in filenames:
with P0fFile(fname) as fdesc:
for line in fdesc:
if not line:
continue
if "mod" not in line:
LOGGER.warning("no mod detected [%r]", line)
continue
if line["mod"] not in ["syn", "syn+ack"]:
continue
if "subj" not in line or line["subj"] not in line:
LOGGER.warning("no subj detected [%r]", line)
continue
if "raw_sig" not in line:
LOGGER.warning("no raw_sig detected [%r]", line)
continue
infos = {}
if "os" in line and line["os"] != "???":
infos["os"] = line["os"]
if "dist" in line:
infos["dist"] = line["dist"]
if "params" in line and line["params"].lower() != "none":
infos["params"] = line["params"]
host = line[line["subj"]].split("/")[0]
srvport = int(line["srv"].split("/")[1])
for rec in handle_rec(
# sensor
sensor,
# ignorenets,
ignorenets,
# neverignore,
neverignore,
# timestamp
timestamp=line["ts"],
# uid
uid=None,
# host
host=host,
# srvport
srvport=srvport,
# recon_type
recon_type="P0FV3_%s" % line["mod"].upper(),
# source
source="P0FV3",
# value
value=line["raw_sig"],
# targetval
targetval=None,
):
rec[1]["infos"] = infos
yield tstamp, rec
|
31,186 |
def main() -> None:
params = demisto.params()
args = demisto.args()
hostname = params.get('hostname')
api_id = params.get('api_id')
api_key = params.get('api_key')
handle_proxy()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
commands = {
'cloudshare-get-envs': get_envs_command,
'cloudshare-get-projects': get_projects_command,
'cloudshare-get-project': get_project_command,
'cloudshare-get-project-policies': get_project_policies_command,
'cloudshare-get-project-blueprints': get_project_blueprints_command,
'cloudshare-get-project-blueprint': get_project_blueprint_command,
'cloudshare-get-classes': get_classes_command,
'cloudshare-get-class': get_class_command,
'cloudshare-delete-class': delete_class_command,
'cloudshare-delete-class-environemtns': delete_class_environments_command, # This is here for maintaining BC
'cloudshare-delete-class-environments': delete_class_environments_command,
'cloudshare-get-classes-countries': get_classes_countries_command,
'cloudshare-get-classes-customfields': get_classes_customfields_command,
'cloudshare-get-classes-detailed': get_classes_detailed_command,
'cloudshare-get-classes-instructors': get_classes_instructors_command,
'cloudshare-create-class': create_class_command,
'cloudshare-send-class-invitations': send_class_invitations_command,
'cloudshare-suspend-class-environments': suspend_class_environments_command,
'cloudshare-modify-class': modify_class_command,
'cloudshare-get-students': get_students_command,
'cloudshare-get-student': get_student_command,
'cloudshare-delete-student': delete_student_command,
'cloudshare-register-student': register_student_command,
'cloudshare-modify-student': modify_student_command,
'cloudshare-get-regions': get_regions_command,
'cloudshare-get-timezones': get_timezones_command,
'cloudshare-get-env-resource': get_env_resources_command,
'cloudshare-get-env-extended': get_env_extended_command,
'cloudshare-get-env-extended-vanity': get_env_extended_vanity_command,
'cloudshare-get-env-extended-token': get_env_extended_token_command,
'cloudshare-get-env-multiple-resources': get_env_multiple_resources_command,
'cloudshare-extend-env': extend_env_command,
'cloudshare-postpone-env-suspend': postpone_env_suspend_command,
'cloudshare-resume-env': resume_env_command,
'cloudshare-revert-env': revert_env_command,
'cloudshare-suspend-env': suspend_env_command,
'cloudshare-get-env': get_env_command,
'cloudshare-delete-env': delete_env_command,
'cloudshare-create-env': create_env_command,
'cloudshare-modify-env': modify_env_command,
'cloudshare-delete-vm': delete_vm_command,
'cloudshare-check-vm-execution-status': vm_check_execution_status_command,
'cloudshare-get-vm-remote-access-file': vm_get_remote_command,
'cloudshare-execute-vm-command': vm_execute_command,
'cloudshare-modify-vm-hardware': vm_modify_hardware_command,
'cloudshare-reboot-vm': reboot_vm_command,
'cloudshare-revert-vm': revert_vm_command,
'cloudshare-get-cloud-folders': get_cloud_folders_command,
'cloudshare-get-env-cloud-folders': get_env_cloud_folders_command,
'cloudshare-generate-cloud-folder-password': generate_password_folder_command,
'cloudshare-unmount-env-folders': unmount_env_folders_command,
'cloudshare-get-templates': get_templates_command,
'cloudshare-get-snapshot': get_snapshot_command,
'cloudshare-get-env-snapshots': get_env_snapshots_command,
'cloudshare-mark-default-snapshot': mark_default_snapshot_command,
'cloudshare-take-snapshot-env': take_snapshot_env_command,
'cloudshare-get-teams': get_teams_command,
'cloudshare-invite-user-poc': invite_user_poc_command,
'cloudshare-get-poc-invitations': get_poc_invitations_command
}
client = Client(
hostname,
api_id=api_id,
api_key=api_key
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
test_module_command(client, args)
else:
commands[command](client, args)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
def main() -> None:
params = demisto.params()
args = demisto.args()
hostname = params.get('hostname')
api_id = params.get('api_id')
api_key = params.get('api_key')
handle_proxy()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
commands = {
'cloudshare-get-envs': get_envs_command,
'cloudshare-get-projects': get_projects_command,
'cloudshare-get-project': get_project_command,
'cloudshare-get-project-policies': get_project_policies_command,
'cloudshare-get-project-blueprints': get_project_blueprints_command,
'cloudshare-get-project-blueprint': get_project_blueprint_command,
'cloudshare-get-classes': get_classes_command,
'cloudshare-get-class': get_class_command,
'cloudshare-delete-class': delete_class_command,
'cloudshare-delete-class-environemtns': delete_class_environments_command, # This is here for maintaining BC
'cloudshare-delete-class-environments': delete_class_environments_command,
'cloudshare-get-classes-countries': get_classes_countries_command,
'cloudshare-get-classes-customfields': get_classes_customfields_command,
'cloudshare-get-classes-detailed': get_classes_detailed_command,
'cloudshare-get-classes-instructors': get_classes_instructors_command,
'cloudshare-create-class': create_class_command,
'cloudshare-send-class-invitations': send_class_invitations_command,
'cloudshare-suspend-class-environments': suspend_class_environments_command,
'cloudshare-modify-class': modify_class_command,
'cloudshare-get-students': get_students_command,
'cloudshare-get-student': get_students_command, # This is here for maintaining BC
'cloudshare-delete-student': delete_student_command,
'cloudshare-register-student': register_student_command,
'cloudshare-modify-student': modify_student_command,
'cloudshare-get-regions': get_regions_command,
'cloudshare-get-timezones': get_timezones_command,
'cloudshare-get-env-resource': get_env_resources_command,
'cloudshare-get-env-extended': get_env_extended_command,
'cloudshare-get-env-extended-vanity': get_env_extended_vanity_command,
'cloudshare-get-env-extended-token': get_env_extended_token_command,
'cloudshare-get-env-multiple-resources': get_env_multiple_resources_command,
'cloudshare-extend-env': extend_env_command,
'cloudshare-postpone-env-suspend': postpone_env_suspend_command,
'cloudshare-resume-env': resume_env_command,
'cloudshare-revert-env': revert_env_command,
'cloudshare-suspend-env': suspend_env_command,
'cloudshare-get-env': get_env_command,
'cloudshare-delete-env': delete_env_command,
'cloudshare-create-env': create_env_command,
'cloudshare-modify-env': modify_env_command,
'cloudshare-delete-vm': delete_vm_command,
'cloudshare-check-vm-execution-status': vm_check_execution_status_command,
'cloudshare-get-vm-remote-access-file': vm_get_remote_command,
'cloudshare-execute-vm-command': vm_execute_command,
'cloudshare-modify-vm-hardware': vm_modify_hardware_command,
'cloudshare-reboot-vm': reboot_vm_command,
'cloudshare-revert-vm': revert_vm_command,
'cloudshare-get-cloud-folders': get_cloud_folders_command,
'cloudshare-get-env-cloud-folders': get_env_cloud_folders_command,
'cloudshare-generate-cloud-folder-password': generate_password_folder_command,
'cloudshare-unmount-env-folders': unmount_env_folders_command,
'cloudshare-get-templates': get_templates_command,
'cloudshare-get-snapshot': get_snapshot_command,
'cloudshare-get-env-snapshots': get_env_snapshots_command,
'cloudshare-mark-default-snapshot': mark_default_snapshot_command,
'cloudshare-take-snapshot-env': take_snapshot_env_command,
'cloudshare-get-teams': get_teams_command,
'cloudshare-invite-user-poc': invite_user_poc_command,
'cloudshare-get-poc-invitations': get_poc_invitations_command
}
client = Client(
hostname,
api_id=api_id,
api_key=api_key
)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
test_module_command(client, args)
else:
commands[command](client, args)
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
55,162 |
def apply_to_measurement(func: Callable):
"""
Apply an arbitrary function to a `MeasurementValue` or set of `MeasurementValue`s.
(func should be a "pure" function)
Ex:
.. code-block:: python
m0 = qml.measure(0)
m0_sin = qml.apply_to_measurement(np.sin)(m0)
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
partial = MeasurementLeaf()
for arg in args:
if not isinstance(arg, MeasurementValue):
arg = MeasurementLeaf(arg)
partial = partial.merge(arg)
partial.transform_leaves_inplace(
lambda *unwrapped: func(*unwrapped, **kwargs) # pylint: disable=unnecessary-lambda
)
return partial
return wrapper
|
def apply_to_measurement(func: Callable):
"""
Apply an arbitrary function to a `MeasurementValue` or set of `MeasurementValue`s.
The applied function should be pure, i.e., it only depends on its input
values and doesn't have any side-effects.
Ex:
.. code-block:: python
m0 = qml.measure(0)
m0_sin = qml.apply_to_measurement(np.sin)(m0)
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
partial = MeasurementLeaf()
for arg in args:
if not isinstance(arg, MeasurementValue):
arg = MeasurementLeaf(arg)
partial = partial.merge(arg)
partial.transform_leaves_inplace(
lambda *unwrapped: func(*unwrapped, **kwargs) # pylint: disable=unnecessary-lambda
)
return partial
return wrapper
|
1,362 |
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples in each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for j in [0, 1]:
if merge[j] < n_samples:
current_count += 1
else:
current_count += counts[merge[j] - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
|
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples in each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for j in [0, 1]:
if merge[j] < n_samples:
current_count += 1
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
|
30,690 |
def rasterize_image_command():
args = demisto.args()
entry_id = args.get('EntryID')
w = args.get('width', DEFAULT_W).rstrip('px')
h = args.get('height', DEFAULT_H).rstrip('px')
file_path = demisto.getFilePath(entry_id).get('path')
filename = 'document.pdf'
with open(file_path, 'rb') as f, open('output_image', 'w') as image:
data = base64.b64encode(f.read()).decode('utf-8')
image.write(data)
output = rasterize(path=f'file://{os.path.realpath(f.name)}', width=w, height=h, r_type='pdf')
res = fileResult(filename=filename, data=output)
res['Type'] = entryTypes['image']
demisto.results(res)
|
def rasterize_image_command():
args = demisto.args()
entry_id = args.get('EntryID')
w = args.get('width', DEFAULT_W).rstrip('px')
h = args.get('height', DEFAULT_H).rstrip('px')
file_path = demisto.getFilePath(entry_id).get('path')
filename = 'entry_id.pdf'
with open(file_path, 'rb') as f, open('output_image', 'w') as image:
data = base64.b64encode(f.read()).decode('utf-8')
image.write(data)
output = rasterize(path=f'file://{os.path.realpath(f.name)}', width=w, height=h, r_type='pdf')
res = fileResult(filename=filename, data=output)
res['Type'] = entryTypes['image']
demisto.results(res)
|
3,914 |
def graph_str(graph, with_labels=True, sources=None, write=None, ascii_only=False):
"""
Creates a nice text representation of a graph
This works via a depth-first traversal the graph and writing a line for
each unique node encountered. Non-tree edges are written to the right of
each node, and connection to a non-tree edge is indicated with an ellipsis.
This representation works best when the input graph is a forest, but any
graph can be represented.
Parameters
----------
graph : nx.DiGraph | nx.Graph
Graph to represent
with_labels : bool
If True will use the "label" attribute of a node to display if it
exists otherwise it will use the node value itself. Defaults to True.
sources : List
Specifies which nodes to start traversal from. Note: nodes that are not
reachable from one of these sources may not be shown. If unspecified,
the minimal set of nodes needed to reach all others will be used.
write : callable
Function to use to write to, if None new lines are appended to
a list and returned. If set to the `print` function, lines will
be written to stdout as they are generated. If specified,
this function will return None. Defaults to None.
ascii_only : Boolean
If True only ASCII characters are used to construct the visualization
Returns
-------
str | None :
text representation of the graph
Example
-------
>>> graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
>>> print(nx.graph_str(graph))
╙── 0
├─╼ 1
│ ├─╼ 3
│ └─╼ 4
└─╼ 2
├─╼ 5
└─╼ 6
>>> # A near tree with one non-tree edge
>>> graph.add_edge(5, 1)
>>> print(nx.graph_str(graph))
╙── 0
├─╼ 1 ╾ 5
│ ├─╼ 3
│ └─╼ 4
└─╼ 2
├─╼ 5
│ └─╼ ...
└─╼ 6
>>> graph = nx.cycle_graph(5)
>>> print(nx.graph_str(graph))
╙── 0
├── 1
│ └── 2
│ └── 3
│ └── 4 ─ 0
└── ...
>>> graph = nx.generators.barbell_graph(4, 2)
>>> print(nx.graph_str(graph))
╙── 4
├── 5
│ └── 6
│ ├── 7
│ │ ├── 8 ─ 6
│ │ │ └── 9 ─ 6, 7
│ │ └── ...
│ └── ...
└── 3
├── 0
│ ├── 1 ─ 3
│ │ └── 2 ─ 0, 3
│ └── ...
└── ...
>>> graph = nx.complete_graph(5, create_using=nx.Graph)
>>> print(nx.graph_str(graph))
╙── 0
├── 1
│ ├── 2 ─ 0
│ │ ├── 3 ─ 0, 1
│ │ │ └── 4 ─ 0, 1, 2
│ │ └── ...
│ └── ...
└── ...
>>> graph = nx.complete_graph(3, create_using=nx.DiGraph)
>>> print(nx.graph_str(graph))
╙── 0 ╾ 1, 2
├─╼ 1 ╾ 2
│ ├─╼ 2 ╾ 0
│ │ └─╼ ...
│ └─╼ ...
└─╼ ...
"""
from collections import defaultdict
import networkx as nx
printbuf = []
if write is None:
_write = printbuf.append
else:
_write = write
# Define glphys
# Notes on available box and arrow characters
# https://en.wikipedia.org/wiki/Box-drawing_character
# https://stackoverflow.com/questions/2701192/triangle-arrow
if ascii_only:
glyph_empty = "+"
glyph_newtree_last = "+-- "
glyph_newtree_mid = "+-- "
glyph_endof_forest = " "
glyph_within_forest = ": "
glyph_within_tree = "| "
glyph_directed_last = "L-> "
glyph_directed_mid = "|-> "
glyph_directed_backedge = "<-"
glyph_undirected_last = "L-- "
glyph_undirected_mid = "|-- "
glyph_undirected_backedge = "-"
else:
glyph_empty = "╙"
glyph_newtree_last = "╙── "
glyph_newtree_mid = "╟── "
glyph_endof_forest = " "
glyph_within_forest = "╎ "
glyph_within_tree = "│ "
glyph_directed_last = "└─╼ "
glyph_directed_mid = "├─╼ "
glyph_directed_backedge = "╾"
glyph_undirected_last = "└── "
glyph_undirected_mid = "├── "
glyph_undirected_backedge = "─"
if len(graph.nodes) == 0:
_write(glyph_empty)
else:
is_directed = graph.is_directed()
if is_directed:
glyph_last = glyph_directed_last
glyph_mid = glyph_directed_mid
glyph_backedge = glyph_directed_backedge
succ = graph.succ
pred = graph.pred
else:
glyph_last = glyph_undirected_last
glyph_mid = glyph_undirected_mid
glyph_backedge = glyph_undirected_backedge
succ = graph.adj
pred = graph.adj
if sources is None:
# For each connected part of the graph, choose at least
# one node as a starting point, preferably without a parent
if is_directed:
# Choose one node from each SCC with minimum in_degree
sccs = list(nx.strongly_connected_components(graph))
# condensing the SCCs forms a dag, the nodes in this graph with
# 0 in-degree correspond to the SCCs from which the minimum set
# of nodes from which all other nodes can be reached.
scc_graph = nx.condensation(graph, sccs)
supernode_to_nodes = {sn: [] for sn in scc_graph.nodes()}
# Note: the order of mapping differs between pypy and cpython
# so we have to loop over graph nodes for consistency
mapping = scc_graph.graph["mapping"]
for n in graph.nodes:
sn = mapping[n]
supernode_to_nodes[sn].append(n)
sources = []
for sn in scc_graph.nodes():
if scc_graph.in_degree[sn] == 0:
scc = supernode_to_nodes[sn]
node = min(scc, key=lambda n: graph.in_degree[n])
sources.append(node)
else:
# For undirected graph, the entire graph will be reachable as
# long as we consider one node from every connected component
sources = [
min(cc, key=lambda n: graph.degree[n])
for cc in nx.connected_components(graph)
]
sources = sorted(sources, key=lambda n: graph.degree[n])
# Populate the stack with each source node, empty indentation, and mark
# the final node. Reverse the stack so sources are popped in the
# correct order.
last_idx = len(sources) - 1
stack = [
(None, node, "", (idx == last_idx)) for idx, node in enumerate(sources)
][::-1]
num_skipped_children = defaultdict(lambda: 0)
seen_nodes = set()
while stack:
parent, node, indent, this_islast = stack.pop()
if node is not Ellipsis:
skip = node in seen_nodes
if skip:
# Mark that we skipped a parent's child
num_skipped_children[parent] += 1
if this_islast:
# If we reached the last child of a parent, and we skipped
# any of that parents children, then we should emit an
# ellipsis at the end after this.
if num_skipped_children[parent] and parent is not None:
# Append the ellipsis to be emitted last
next_islast = True
try_frame = (node, Ellipsis, indent, next_islast)
stack.append(try_frame)
# Redo this frame, but not as a last object
next_islast = False
try_frame = (parent, node, indent, next_islast)
stack.append(try_frame)
continue
if skip:
continue
seen_nodes.add(node)
if not indent:
# Top level items (i.e. trees in the forest) get different
# glyphs to indicate they are not actually connected
if this_islast:
this_prefix = indent + glyph_newtree_last
next_prefix = indent + glyph_endof_forest
else:
this_prefix = indent + glyph_newtree_mid
next_prefix = indent + glyph_within_forest
else:
# For individual tree edges distinguish between directed and
# undirected cases
if this_islast:
this_prefix = indent + glyph_last
next_prefix = indent + glyph_endof_forest
else:
this_prefix = indent + glyph_mid
next_prefix = indent + glyph_within_tree
if node is Ellipsis:
label = " ..."
suffix = ""
children = []
else:
if with_labels:
label = str(graph.nodes[node].get("label", node))
else:
label = str(node)
# Determine:
# (1) children to traverse into after showing this node.
# (2) parents to immediately show to the right of this node.
if is_directed:
# In the directed case we must show every successor node
# note: it may be skipped later, but we don't have that
# information here.
children = list(succ[node])
# In the directed case we must show every predecessor
# except for parent we directly traversed from.
handled_parents = {parent}
else:
# Showing only the unseen children results in a more
# concise representation for the undirected case.
children = [
child for child in succ[node] if child not in seen_nodes
]
# In the undirected case, parents are also children, so we
# only need to immediately show the ones we can no longer
# traverse
handled_parents = {*children, parent}
# The other parents are other predecessors of this node that
# are not handled elsewhere.
other_parents = [p for p in pred[node] if p not in handled_parents]
if other_parents:
if with_labels:
other_parents_labels = ", ".join(
[str(graph.nodes[p].get("label", p)) for p in other_parents]
)
else:
other_parents_labels = ", ".join(
[str(p) for p in other_parents]
)
suffix = " ".join(["", glyph_backedge, other_parents_labels])
else:
suffix = ""
# Emit the line for this node, this will be called for each node
# exactly once.
_write(this_prefix + label + suffix)
# Push children on the stack in reverse order so they are popped in
# the original order.
for idx, child in enumerate(children[::-1]):
next_islast = idx == 0
try_frame = (node, child, next_prefix, next_islast)
stack.append(try_frame)
if write is None:
# Only return a string if the custom write function was not specified
return "\n".join(printbuf)
|
def graph_str(graph, with_labels=True, sources=None, write=None, ascii_only=False):
"""
Creates a nice text representation of a graph
This works via a depth-first traversal of the graph and writing a line for
each unique node encountered. Non-tree edges are written to the right of
each node, and connection to a non-tree edge is indicated with an ellipsis.
This representation works best when the input graph is a forest, but any
graph can be represented.
Parameters
----------
graph : nx.DiGraph | nx.Graph
Graph to represent
with_labels : bool
If True will use the "label" attribute of a node to display if it
exists otherwise it will use the node value itself. Defaults to True.
sources : List
Specifies which nodes to start traversal from. Note: nodes that are not
reachable from one of these sources may not be shown. If unspecified,
the minimal set of nodes needed to reach all others will be used.
write : callable
Function to use to write to, if None new lines are appended to
a list and returned. If set to the `print` function, lines will
be written to stdout as they are generated. If specified,
this function will return None. Defaults to None.
ascii_only : Boolean
If True only ASCII characters are used to construct the visualization
Returns
-------
str | None :
text representation of the graph
Example
-------
>>> graph = nx.balanced_tree(r=2, h=2, create_using=nx.DiGraph)
>>> print(nx.graph_str(graph))
╙── 0
├─╼ 1
│ ├─╼ 3
│ └─╼ 4
└─╼ 2
├─╼ 5
└─╼ 6
>>> # A near tree with one non-tree edge
>>> graph.add_edge(5, 1)
>>> print(nx.graph_str(graph))
╙── 0
├─╼ 1 ╾ 5
│ ├─╼ 3
│ └─╼ 4
└─╼ 2
├─╼ 5
│ └─╼ ...
└─╼ 6
>>> graph = nx.cycle_graph(5)
>>> print(nx.graph_str(graph))
╙── 0
├── 1
│ └── 2
│ └── 3
│ └── 4 ─ 0
└── ...
>>> graph = nx.generators.barbell_graph(4, 2)
>>> print(nx.graph_str(graph))
╙── 4
├── 5
│ └── 6
│ ├── 7
│ │ ├── 8 ─ 6
│ │ │ └── 9 ─ 6, 7
│ │ └── ...
│ └── ...
└── 3
├── 0
│ ├── 1 ─ 3
│ │ └── 2 ─ 0, 3
│ └── ...
└── ...
>>> graph = nx.complete_graph(5, create_using=nx.Graph)
>>> print(nx.graph_str(graph))
╙── 0
├── 1
│ ├── 2 ─ 0
│ │ ├── 3 ─ 0, 1
│ │ │ └── 4 ─ 0, 1, 2
│ │ └── ...
│ └── ...
└── ...
>>> graph = nx.complete_graph(3, create_using=nx.DiGraph)
>>> print(nx.graph_str(graph))
╙── 0 ╾ 1, 2
├─╼ 1 ╾ 2
│ ├─╼ 2 ╾ 0
│ │ └─╼ ...
│ └─╼ ...
└─╼ ...
"""
from collections import defaultdict
import networkx as nx
printbuf = []
if write is None:
_write = printbuf.append
else:
_write = write
# Define glphys
# Notes on available box and arrow characters
# https://en.wikipedia.org/wiki/Box-drawing_character
# https://stackoverflow.com/questions/2701192/triangle-arrow
if ascii_only:
glyph_empty = "+"
glyph_newtree_last = "+-- "
glyph_newtree_mid = "+-- "
glyph_endof_forest = " "
glyph_within_forest = ": "
glyph_within_tree = "| "
glyph_directed_last = "L-> "
glyph_directed_mid = "|-> "
glyph_directed_backedge = "<-"
glyph_undirected_last = "L-- "
glyph_undirected_mid = "|-- "
glyph_undirected_backedge = "-"
else:
glyph_empty = "╙"
glyph_newtree_last = "╙── "
glyph_newtree_mid = "╟── "
glyph_endof_forest = " "
glyph_within_forest = "╎ "
glyph_within_tree = "│ "
glyph_directed_last = "└─╼ "
glyph_directed_mid = "├─╼ "
glyph_directed_backedge = "╾"
glyph_undirected_last = "└── "
glyph_undirected_mid = "├── "
glyph_undirected_backedge = "─"
if len(graph.nodes) == 0:
_write(glyph_empty)
else:
is_directed = graph.is_directed()
if is_directed:
glyph_last = glyph_directed_last
glyph_mid = glyph_directed_mid
glyph_backedge = glyph_directed_backedge
succ = graph.succ
pred = graph.pred
else:
glyph_last = glyph_undirected_last
glyph_mid = glyph_undirected_mid
glyph_backedge = glyph_undirected_backedge
succ = graph.adj
pred = graph.adj
if sources is None:
# For each connected part of the graph, choose at least
# one node as a starting point, preferably without a parent
if is_directed:
# Choose one node from each SCC with minimum in_degree
sccs = list(nx.strongly_connected_components(graph))
# condensing the SCCs forms a dag, the nodes in this graph with
# 0 in-degree correspond to the SCCs from which the minimum set
# of nodes from which all other nodes can be reached.
scc_graph = nx.condensation(graph, sccs)
supernode_to_nodes = {sn: [] for sn in scc_graph.nodes()}
# Note: the order of mapping differs between pypy and cpython
# so we have to loop over graph nodes for consistency
mapping = scc_graph.graph["mapping"]
for n in graph.nodes:
sn = mapping[n]
supernode_to_nodes[sn].append(n)
sources = []
for sn in scc_graph.nodes():
if scc_graph.in_degree[sn] == 0:
scc = supernode_to_nodes[sn]
node = min(scc, key=lambda n: graph.in_degree[n])
sources.append(node)
else:
# For undirected graph, the entire graph will be reachable as
# long as we consider one node from every connected component
sources = [
min(cc, key=lambda n: graph.degree[n])
for cc in nx.connected_components(graph)
]
sources = sorted(sources, key=lambda n: graph.degree[n])
# Populate the stack with each source node, empty indentation, and mark
# the final node. Reverse the stack so sources are popped in the
# correct order.
last_idx = len(sources) - 1
stack = [
(None, node, "", (idx == last_idx)) for idx, node in enumerate(sources)
][::-1]
num_skipped_children = defaultdict(lambda: 0)
seen_nodes = set()
while stack:
parent, node, indent, this_islast = stack.pop()
if node is not Ellipsis:
skip = node in seen_nodes
if skip:
# Mark that we skipped a parent's child
num_skipped_children[parent] += 1
if this_islast:
# If we reached the last child of a parent, and we skipped
# any of that parents children, then we should emit an
# ellipsis at the end after this.
if num_skipped_children[parent] and parent is not None:
# Append the ellipsis to be emitted last
next_islast = True
try_frame = (node, Ellipsis, indent, next_islast)
stack.append(try_frame)
# Redo this frame, but not as a last object
next_islast = False
try_frame = (parent, node, indent, next_islast)
stack.append(try_frame)
continue
if skip:
continue
seen_nodes.add(node)
if not indent:
# Top level items (i.e. trees in the forest) get different
# glyphs to indicate they are not actually connected
if this_islast:
this_prefix = indent + glyph_newtree_last
next_prefix = indent + glyph_endof_forest
else:
this_prefix = indent + glyph_newtree_mid
next_prefix = indent + glyph_within_forest
else:
# For individual tree edges distinguish between directed and
# undirected cases
if this_islast:
this_prefix = indent + glyph_last
next_prefix = indent + glyph_endof_forest
else:
this_prefix = indent + glyph_mid
next_prefix = indent + glyph_within_tree
if node is Ellipsis:
label = " ..."
suffix = ""
children = []
else:
if with_labels:
label = str(graph.nodes[node].get("label", node))
else:
label = str(node)
# Determine:
# (1) children to traverse into after showing this node.
# (2) parents to immediately show to the right of this node.
if is_directed:
# In the directed case we must show every successor node
# note: it may be skipped later, but we don't have that
# information here.
children = list(succ[node])
# In the directed case we must show every predecessor
# except for parent we directly traversed from.
handled_parents = {parent}
else:
# Showing only the unseen children results in a more
# concise representation for the undirected case.
children = [
child for child in succ[node] if child not in seen_nodes
]
# In the undirected case, parents are also children, so we
# only need to immediately show the ones we can no longer
# traverse
handled_parents = {*children, parent}
# The other parents are other predecessors of this node that
# are not handled elsewhere.
other_parents = [p for p in pred[node] if p not in handled_parents]
if other_parents:
if with_labels:
other_parents_labels = ", ".join(
[str(graph.nodes[p].get("label", p)) for p in other_parents]
)
else:
other_parents_labels = ", ".join(
[str(p) for p in other_parents]
)
suffix = " ".join(["", glyph_backedge, other_parents_labels])
else:
suffix = ""
# Emit the line for this node, this will be called for each node
# exactly once.
_write(this_prefix + label + suffix)
# Push children on the stack in reverse order so they are popped in
# the original order.
for idx, child in enumerate(children[::-1]):
next_islast = idx == 0
try_frame = (node, child, next_prefix, next_islast)
stack.append(try_frame)
if write is None:
# Only return a string if the custom write function was not specified
return "\n".join(printbuf)
|
41,166 |
def _pad_setting(
max_setting: InitObsSetting,
qubits: List['cirq.Qid'],
pad_init_state_with=value.KET_ZERO,
pad_obs_with: 'cirq.Gate' = ops.Z,
) -> InitObsSetting:
"""Pad `max_setting`'s `init_state` and `observable` with `pad_xx_with` operations
(defaults: |0> and Z) so each max_setting has the same qubits. We need this
to be the case so we can fill in all the parameters, see `_get_params_for_setting`.
"""
obs = max_setting.observable
assert obs.coefficient == 1, "Only max_setting's should be padded."
for qubit in qubits:
if not qubit in obs:
obs *= pad_obs_with(qubit)
init_state = max_setting.init_state
init_state_original_qubits = init_state.qubits
for qubit in qubits:
if not qubit in init_state_original_qubits:
init_state *= pad_init_state_with(qubit)
return InitObsSetting(init_state=init_state, observable=obs)
|
def _pad_setting(
max_setting: InitObsSetting,
qubits: List['cirq.Qid'],
pad_init_state_with=value.KET_ZERO,
pad_obs_with: 'cirq.Gate' = ops.Z,
) -> InitObsSetting:
"""Pad `max_setting`'s `init_state` and `observable` with `pad_xx_with` operations
(defaults: |0> and Z) so each max_setting has the same qubits. We need this
to be the case so we can fill in all the parameters, see `_get_params_for_setting`.
"""
obs = max_setting.observable
assert obs.coefficient == 1, "Only the max_setting should be padded."
for qubit in qubits:
if not qubit in obs:
obs *= pad_obs_with(qubit)
init_state = max_setting.init_state
init_state_original_qubits = init_state.qubits
for qubit in qubits:
if not qubit in init_state_original_qubits:
init_state *= pad_init_state_with(qubit)
return InitObsSetting(init_state=init_state, observable=obs)
|
31,657 |
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abxcortexxsoar-get-details-of-a-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
mock_data = str(args.get('mock-data', ''))
if mock_data.lower() == "true":
headers['Mock-Data'] = "True"
headers['Authorization'] = f'Bearer {params["api_key"]}'
headers['Soar-Integration-Origin'] = "Cortex XSOAR"
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
commands = {
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-case':
check_the_status_of_an_action_requested_on_a_case_command,
'abxcortexxsoar-check-the-status-of-an-action-requested-on-a-threat':
check_the_status_of_an_action_requested_on_a_threat_command,
'abxcortexxsoar-get-a-list-of-abnormal-cases-identified-by-abnormal-security':
get_a_list_of_abnormal_cases_identified_by_abnormal_security_command,
'abxcortexxsoar-get-a-list-of-threats':
get_a_list_of_threats_command,
'abnormal-security-get-threat':
get_details_of_a_threat_command,
'abxcortexxsoar-get-details-of-an-abnormal-case':
get_details_of_an_abnormal_case_command,
'abxcortexxsoar-get-the-latest-threat-intel-feed': get_the_latest_threat_intel_feed_command,
'abxcortexxsoar-manage-a-threat-identified-by-abnormal-security':
manage_a_threat_identified_by_abnormal_security_command,
'abxcortexxsoar-manage-an-abnormal-case':
manage_an_abnormal_case_command,
'abxcortexxsoar-submit-an-inquiry-to-request-a-report-on-misjudgement-by-abnormal-security':
submit_an_inquiry_to_request_a_report_on_misjudgement_by_abnormal_security_command,
}
if command == 'test-module':
headers['Mock-Data'] = "True"
test_client = Client(urljoin(url, ''), verify_certificate, proxy, headers=headers, auth=None)
test_module(test_client)
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
|
39,392 |
def test_pointset(pointset):
assert pointset.n_points == pointset.points.shape[0]
assert pointset.n_cells == 0
arr_name = 'arr'
pointset.point_data[arr_name] = np.random.random(10)
assert arr_name in pointset.point_data
# test that points can be modified
pointset.points[:] = 0
assert np.allclose(pointset.points, 0)
pointset.points = np.ones((10, 3))
assert np.allclose(pointset.points, 1)
filename = str(tmpdir.mkdir("tmpdir").join(f'tmp.xyz'))
pointset.save("")
|
def test_pointset(pointset):
assert pointset.n_points == pointset.points.shape[0]
assert pointset.n_cells == 0
arr_name = 'arr'
pointset.point_data[arr_name] = np.random.random(10)
assert arr_name in pointset.point_data
# test that points can be modified
pointset.points[:] = 0
assert np.allclose(pointset.points, 0)
pointset.points = np.ones((10, 3))
assert np.allclose(pointset.points, 1)
filename = str(tmpdir.mkdir("tmpdir").join(f'tmp.xyz'))
pointset.save("filename")
|
32,458 |
def reply_email_command(client: MsGraphClient, args):
"""
Reply to an email from user's mailbox, the sent message will appear in Sent Items folder
"""
email_to = argToList(args.get('to'))
email_from = args.get('from', client._mailbox_to_fetch)
message_id = args.get('inReplyTo')
reply_to = argToList(args.get('replyTo'))
email_body = args.get('body', "")
email_subject = args.get('subject', "")
email_subject = f'Re: {email_subject}'
attach_ids = argToList(args.get('attachIDs'))
email_cc = argToList(args.get('cc'))
email_bcc = argToList(args.get('bcc'))
html_body = args.get('htmlBody')
attach_names = argToList(args.get('attachNames'))
attach_cids = argToList(args.get('attachCIDs'))
message_body = html_body or email_body
reply = client.build_message_to_reply(email_to, email_cc, email_bcc, email_subject, message_body, attach_ids,
attach_names, attach_cids, reply_to)
less_than_3mb_attachments, more_than_3mb_attachments = divide_attachments_according_to_size(
attachments=reply.get('attachments')
)
if more_than_3mb_attachments:
reply['attachments'] = less_than_3mb_attachments
client.send_mail_with_upload_session_flow(
email=email_from,
json_data={'message': reply, 'comment': message_body},
attachments_more_than_3mb=more_than_3mb_attachments,
message_id=message_id
)
else:
client.send_reply(
email_from=email_from, message_id=message_id, json_data={'message': reply, 'comment': message_body}
)
return prepare_outputs_for_reply_mail_command(reply, email_to, message_id)
|
def reply_email_command(client: MsGraphClient, args):
"""
Reply to an email from user's mailbox, the sent message will appear in Sent Items folder
"""
email_to = argToList(args.get('to'))
email_from = args.get('from', client._mailbox_to_fetch)
message_id = args.get('inReplyTo')
reply_to = argToList(args.get('replyTo'))
email_body = args.get('body', "")
email_subject = args.get('subject', "")
email_subject = f'Re: {email_subject}'
attach_ids = argToList(args.get('attachIDs'))
email_cc = argToList(args.get('cc'))
email_bcc = argToList(args.get('bcc'))
html_body = args.get('htmlBody')
attach_names = argToList(args.get('attachNames'))
attach_cids = argToList(args.get('attachCIDs'))
message_body = html_body or email_body
reply = client.build_message_to_reply(email_to, email_cc, email_bcc, email_subject, message_body, attach_ids,
attach_names, attach_cids, reply_to)
less_than_3mb_attachments, more_than_3mb_attachments = divide_attachments_according_to_size(
attachments=reply.get('attachments')
)
if more_than_3mb_attachments:
reply['attachments'] = less_than_3mb_attachments
client.send_mail_with_upload_session_flow(
email=email_from,
json_data={'message': reply, 'comment': message_body},
attachments_more_than_3mb=more_than_3mb_attachments,
reply_to_message_id=message_id
)
else:
client.send_reply(
email_from=email_from, message_id=message_id, json_data={'message': reply, 'comment': message_body}
)
return prepare_outputs_for_reply_mail_command(reply, email_to, message_id)
|
30,653 |
def find_ip_by_mac(client, args):
mac_address = args.get('mac')
search_query = "assets | where mac_address match " + mac_address
try:
title = ("%s - Results for the Search Query" % INTEGRATION_NAME)
raws = []
nozomi_ec = []
raw_response = client.query(search_query)['result']
if raw_response:
for item in raw_response:
if 'ip' in item and item['ip'][0] is not None and len(item['ip'][0].split('.')) == 4:
raws.append(item)
nozomi_ec.append({
'IP': item['ip'],
'MAC': mac_address
})
if not raws:
return "%s - Could not find any results for given query" % INTEGRATION_NAME
context_entry = {
"NozomiGuardian": {"Mappings": nozomi_ec}
}
human_readable = tableToMarkdown(t=context_entry['NozomiGuardian']['Mappings'], name=title)
return [human_readable, context_entry, raws]
return_error("Could not find the mac address: %s" % mac_address)
except Exception as e:
LOG(e)
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def find_ip_by_mac(client, args):
mac_address = args.get('mac')
search_query = "assets | where mac_address match " + mac_address
try:
title = (f'{INTEGRATION_NAME} - Results for the Search Query')
raws = []
nozomi_ec = []
raw_response = client.query(search_query)['result']
if raw_response:
for item in raw_response:
if 'ip' in item and item['ip'][0] is not None and len(item['ip'][0].split('.')) == 4:
raws.append(item)
nozomi_ec.append({
'IP': item['ip'],
'MAC': mac_address
})
if not raws:
return "%s - Could not find any results for given query" % INTEGRATION_NAME
context_entry = {
"NozomiGuardian": {"Mappings": nozomi_ec}
}
human_readable = tableToMarkdown(t=context_entry['NozomiGuardian']['Mappings'], name=title)
return [human_readable, context_entry, raws]
return_error("Could not find the mac address: %s" % mac_address)
except Exception as e:
LOG(e)
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
46,763 |
def append_filename_template_legend(
text=None,
pdf_format=False,
):
if text is None:
return
md = MarkdownIt().disable("image").enable("table")
legend = """
- Filename entities or folders between square brackets
(for example, `[_ses-<label>]`) are OPTIONAL.
- Some entities may only allow specific values,
in which case those values are listed in `<>`, separated by `|`.
- `_<suffix>` means that there are several (>6) valid suffixes for this filename pattern.
- `.<extension>` means that there are several (>6) valid extensions for this file type.
- `[.gz]` means that both the unzipped and gzipped versions of the extension are valid.
"""
if pdf_format:
text += f"""
**Legend**:
{legend}
"""
else:
text += f"""
<details>
<summary><strong>Legend:</strong></summary>
<ul>
{md.render(legend)}
</ul>
</details>
"""
return text
|
def append_filename_template_legend(text, pdf_format=False):
md = MarkdownIt().disable("image").enable("table")
legend = """
- Filename entities or folders between square brackets
(for example, `[_ses-<label>]`) are OPTIONAL.
- Some entities may only allow specific values,
in which case those values are listed in `<>`, separated by `|`.
- `_<suffix>` means that there are several (>6) valid suffixes for this filename pattern.
- `.<extension>` means that there are several (>6) valid extensions for this file type.
- `[.gz]` means that both the unzipped and gzipped versions of the extension are valid.
"""
if pdf_format:
text += f"""
**Legend**:
{legend}
"""
else:
text += f"""
<details>
<summary><strong>Legend:</strong></summary>
<ul>
{md.render(legend)}
</ul>
</details>
"""
return text
|
117 |
def print_dump(json_records, filter=None, print=print):
"""Print the given json_records in the dump format."""
for i, raw_json_data in enumerate(json_records):
if i % 1_000_000 == 0:
log(f"{i:,}")
d = json.loads(raw_json_data)
d.pop("id", None)
d = _process_data(d)
key = web.safestr(d["key"])
# skip user and admin pages
if key.startswith(("/people/", "/admin/")):
continue
# skip obsolete pages. Obsolete pages include volumes, scan_records and users
# marked as spam.
if key.startswith(("/b/", "/scan", "/old/")) or not key.startswith("/"):
continue
if filter and not filter(d):
continue
type_key = d["type"]["key"]
timestamp = d["last_modified"]["value"]
json_data = json.dumps(d)
print("\t".join([type_key, key, str(d["revision"]), timestamp, json_data]))
|
def print_dump(json_records, filter=None, print=print):
"""Print the given json_records in the dump format."""
for i, raw_json_data in enumerate(json_records):
if i % 1_000_000 == 0:
log(f"{i:,}")
d = json.loads(raw_json_data)
d.pop("id", None)
d = _process_data(d)
key = web.safestr(d["key"])
# skip user and admin pages
if key.startswith(("/people/", "/admin/")):
continue
# skip obsolete pages. Obsolete pages include volumes, scan_records and users
# marked as spam.
if key.startswith(("/b/", "/scan", "/old/")) or not key.startswith("/"):
continue
if filter and filter(d) is False:
continue
type_key = d["type"]["key"]
timestamp = d["last_modified"]["value"]
json_data = json.dumps(d)
print("\t".join([type_key, key, str(d["revision"]), timestamp, json_data]))
|
46,112 |
def save(panel, filename, title=None, resources=None, template=None,
template_variables=None, embed=False, max_states=1000,
max_opts=3, embed_json=False, json_prefix='', save_path='./',
load_path=None):
"""
Saves Panel objects to file.
Arguments
---------
panel: Viewable
The Panel Viewable to save to file
filename: string or file-like object
Filename to save the plot to
title: string
Optional title for the plot
resources: bokeh resources
One of the valid bokeh.resources (e.g. CDN or INLINE)
template:
template file, as used by bokeh.file_html. If None will use bokeh defaults
template_variables:
template_variables file dict, as used by bokeh.file_html
embed: bool
Whether the state space should be embedded in the saved file. If None use bokeh defaults
max_states: int
The maximum number of states to embed
max_opts: int
The maximum number of states for a single widget
embed_json: boolean (default=True)
Whether to export the data to json files
json_prefix: str (default='')
Prefix for the randomly json directory
save_path: str (default='./')
The path to save json files to
load_path: str (default=None)
The path or URL the json files will be loaded from.
"""
doc = Document()
comm = Comm()
with config.set(embed=embed):
model = panel.get_root(doc, comm)
if embed:
embed_state(panel, model, doc, max_states, max_opts,
embed_json, json_prefix, save_path, load_path)
else:
add_to_doc(model, doc, True)
if isinstance(filename, string_types):
if filename.endswith('png'):
save_png(model, filename=filename)
return
if not filename.endswith('.html'):
filename = filename + '.html'
kwargs = {}
if title is None:
title = 'Panel'
if resources is None:
resources = CDN
if template:
kwargs['template'] = template
if template_variables:
kwargs['template_variables'] = template_variables
html = file_html(doc, resources, title, **kwargs)
if hasattr(filename, 'write'):
html = decode_utf8(html)
if isinstance(filename, io.BytesIO):
html = html.encode('utf-8')
filename.write(html)
return
with io.open(filename, mode="w", encoding="utf-8") as f:
f.write(decode_utf8(html))
|
def save(panel, filename, title=None, resources=None, template=None,
template_variables=None, embed=False, max_states=1000,
max_opts=3, embed_json=False, json_prefix='', save_path='./',
load_path=None):
"""
Saves Panel objects to file.
Arguments
---------
panel: Viewable
The Panel Viewable to save to file
filename: string or file-like object
Filename to save the plot to
title: string
Optional title for the plot
resources: bokeh resources
One of the valid bokeh.resources (e.g. CDN or INLINE)
template:
template file, as used by bokeh.file_html. If None will use bokeh defaults
template_variables:
template_variables file dict, as used by bokeh.file_html
embed: bool
Whether the state space should be embedded in the saved file.
max_states: int
The maximum number of states to embed
max_opts: int
The maximum number of states for a single widget
embed_json: boolean (default=True)
Whether to export the data to json files
json_prefix: str (default='')
Prefix for the randomly json directory
save_path: str (default='./')
The path to save json files to
load_path: str (default=None)
The path or URL the json files will be loaded from.
"""
doc = Document()
comm = Comm()
with config.set(embed=embed):
model = panel.get_root(doc, comm)
if embed:
embed_state(panel, model, doc, max_states, max_opts,
embed_json, json_prefix, save_path, load_path)
else:
add_to_doc(model, doc, True)
if isinstance(filename, string_types):
if filename.endswith('png'):
save_png(model, filename=filename)
return
if not filename.endswith('.html'):
filename = filename + '.html'
kwargs = {}
if title is None:
title = 'Panel'
if resources is None:
resources = CDN
if template:
kwargs['template'] = template
if template_variables:
kwargs['template_variables'] = template_variables
html = file_html(doc, resources, title, **kwargs)
if hasattr(filename, 'write'):
html = decode_utf8(html)
if isinstance(filename, io.BytesIO):
html = html.encode('utf-8')
filename.write(html)
return
with io.open(filename, mode="w", encoding="utf-8") as f:
f.write(decode_utf8(html))
|
31,816 |
def main():
"""
Client is created with a session id. if a session id was given as argument
use it, else use the session id from the integration context.
"""
params = demisto.params()
args = demisto.args()
username = demisto.get(params, 'username.identifier')
password = demisto.get(params, 'username.password')
domain_arg = params.get('domain', '')
sid_arg = args.pop('session_id', None)
login_args = {'username': username,
'password': password,
'session_timeout': args.get('session_timeout', 600),
'domain_arg': domain_arg}
server = params.get('server')
port = params.get('port')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
if server[-1] == "/":
server = server[:-1]
client = Client(base_url=f'https://{server}:{port}/web_api/',
use_ssl=verify_certificate,
use_proxy=proxy,
sid=sid_arg)
try:
# commands that perform login
command = demisto.command()
if demisto.command() == 'test-module':
client.login(**login_args)
return_results(client.test_connection())
client.logout()
return
elif command == 'checkpoint-login-and-get-session-id':
return_results(client.login(**login_args))
# note that the "if client.has_logged in: client.logout()" mechanism is NOT used here, to allow sid reuse
return
elif command == 'checkpoint-logout':
return_results(checkpoint_logout_command(client,sid_arg))
return
else:
if not client.sid: # client.sid is None if `sid_arg in {None, "None"}`
client.restore_sid_from_context_or_login(**login_args)
demisto.info(f'Command being called is {demisto.command()}')
if command == 'checkpoint-host-list':
return_results(checkpoint_list_hosts_command(client, **demisto.args()))
elif command == 'checkpoint-host-get':
return_results(checkpoint_get_host_command(client, **demisto.args()))
elif command == 'checkpoint-host-add':
return_results(checkpoint_add_host_command(client, **demisto.args()))
elif command == 'checkpoint-host-update':
return_results(checkpoint_update_host_command(client, **demisto.args()))
elif command == 'checkpoint-host-delete':
return_results(checkpoint_delete_host_command(client, **demisto.args()))
elif command == 'checkpoint-group-list':
return_results(checkpoint_list_groups_command(client, **demisto.args()))
elif command == 'checkpoint-group-get':
return_results(checkpoint_get_group_command(client, **demisto.args()))
elif command == 'checkpoint-group-add':
return_results(checkpoint_add_group_command(client, **demisto.args()))
elif command == 'checkpoint-group-update':
return_results(checkpoint_update_group_command(client, **demisto.args()))
elif command == 'checkpoint-group-delete':
return_results(checkpoint_delete_group_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-list':
return_results(checkpoint_list_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-get':
return_results(checkpoint_get_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-add':
return_results(checkpoint_add_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-update':
return_results(checkpoint_update_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-delete':
return_results(checkpoint_delete_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-list':
return_results(checkpoint_list_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-get':
return_results(checkpoint_get_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-add':
return_results(checkpoint_add_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-update':
return_results(checkpoint_update_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-delete':
return_results(checkpoint_delete_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-access-rule-list':
return_results(checkpoint_list_access_rule_command(client, **demisto.args()))
elif command == 'checkpoint-access-rule-add':
return_results(checkpoint_add_access_rule_command(client, **demisto.args()))
elif command == 'checkpoint-access-rule-update':
return_results(checkpoint_update_access_rule_command(client, **demisto.args()))
elif command == 'checkpoint-access-rule-delete':
return_results(checkpoint_delete_access_rule_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-list':
return_results(checkpoint_list_application_site_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-add':
return_results(checkpoint_add_application_site_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-update':
return_results(checkpoint_update_application_site_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-delete':
return_results(checkpoint_delete_application_site_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-category-list':
return_results(checkpoint_list_application_site_categories_command(client,
**demisto.args()))
elif command == 'checkpoint-application-site-category-get':
return_results(checkpoint_get_application_site_category_command(client,
**demisto.args()))
elif command == 'checkpoint-application-site-category-add':
return_results(checkpoint_add_application_site_category_command(client,
**demisto.args()))
elif command == 'checkpoint-packages-list':
return_results(checkpoint_list_packages_command(client, **demisto.args()))
elif command == 'checkpoint-gateways-list':
return_results(checkpoint_list_gateways_command(client, **demisto.args()))
elif command == 'checkpoint-show-objects':
return_results(checkpoint_list_objects_command(client, **demisto.args()))
elif command == 'checkpoint-show-task':
return_results(checkpoint_show_task_command(client, **demisto.args()))
elif command == 'checkpoint-publish':
return_results(checkpoint_publish_command(client))
elif command == 'checkpoint-install-policy':
return_results(checkpoint_install_policy_command(client, **demisto.args()))
elif command == 'checkpoint-verify-policy':
return_results(checkpoint_verify_policy_command(client, **demisto.args()))
elif command == 'checkpoint-package-list':
return_results(checkpoint_list_package_command(client, **demisto.args()))
else:
raise NotImplementedError(f"Unknown command {demisto.command()}.")
if client.has_performed_login:
# this party is not reached when login() is explicitly called
demisto.debug("main: client.has_performed_login==True, logging out.")
client.logout()
except DemistoException as e:
error_text_parts = [f'Failed to execute {demisto.command()} command.']
e_message = e.args[0]
if e.res:
status = e.res.http_status
if status == 401:
error_text_parts.extend(
('The current session is unreachable. All changes done after last publish are saved.',
'Please contact IT for more information.'))
demisto.setIntegrationContext({})
elif status == 500:
error_text_parts.append('Server Error: make sure Server URL and Server Port are correctly set')
demisto.setIntegrationContext({})
elif 'Missing header: [X-chkp-sid]' in e_message \
or 'Authentication to server failed' in e_message:
error_text_parts.append('Wrong credentials! '
'Please check the username and password you entered and try again.')
demisto.setIntegrationContext({})
error_text_parts.append(f'\nError: {str(e)}')
return_error("\n".join(error_text_parts))
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
Client is created with a session id. if a session id was given as argument
use it, else use the session id from the integration context.
"""
params = demisto.params()
args = demisto.args()
username = demisto.get(params, 'username.identifier')
password = demisto.get(params, 'username.password')
domain_arg = params.get('domain', '')
sid_arg = args.pop('session_id', None)
login_args = {'username': username,
'password': password,
'session_timeout': args.get('session_timeout', 600),
'domain_arg': domain_arg}
server = params.get('server')
port = params.get('port')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
if server[-1] == "/":
server = server[:-1]
client = Client(base_url=f'https://{server}:{port}/web_api/',
use_ssl=verify_certificate,
use_proxy=proxy,
sid=sid_arg)
try:
# commands that perform login
command = demisto.command()
if demisto.command() == 'test-module':
client.login(**login_args)
return_results(client.test_connection())
client.logout()
return
elif command == 'checkpoint-login-and-get-session-id':
return_results(client.login(**login_args))
# note that the "if client.has_logged in: client.logout()" mechanism is NOT used here, to allow sid reuse
return
elif command == 'checkpoint-logout':
return_results(checkpoint_logout_command(client,sid_arg))
return
else:
if not client.sid: # client.sid is None if `sid_arg in {None, "None"}`
client.restore_sid_from_context_or_login(**login_args)
demisto.info(f'Command being called is {demisto.command()}')
if command == 'checkpoint-host-list':
return_results(checkpoint_list_hosts_command(client, **demisto.args()))
elif command == 'checkpoint-host-get':
return_results(checkpoint_get_host_command(client, **demisto.args()))
elif command == 'checkpoint-host-add':
return_results(checkpoint_add_host_command(client, **demisto.args()))
elif command == 'checkpoint-host-update':
return_results(checkpoint_update_host_command(client, **demisto.args()))
elif command == 'checkpoint-host-delete':
return_results(checkpoint_delete_host_command(client, **demisto.args()))
elif command == 'checkpoint-group-list':
return_results(checkpoint_list_groups_command(client, **demisto.args()))
elif command == 'checkpoint-group-get':
return_results(checkpoint_get_group_command(client, **demisto.args()))
elif command == 'checkpoint-group-add':
return_results(checkpoint_add_group_command(client, **demisto.args()))
elif command == 'checkpoint-group-update':
return_results(checkpoint_update_group_command(client, **demisto.args()))
elif command == 'checkpoint-group-delete':
return_results(checkpoint_delete_group_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-list':
return_results(checkpoint_list_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-get':
return_results(checkpoint_get_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-add':
return_results(checkpoint_add_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-update':
return_results(checkpoint_update_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-address-range-delete':
return_results(checkpoint_delete_address_range_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-list':
return_results(checkpoint_list_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-get':
return_results(checkpoint_get_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-add':
return_results(checkpoint_add_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-update':
return_results(checkpoint_update_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-threat-indicator-delete':
return_results(checkpoint_delete_threat_indicator_command(client, **demisto.args()))
elif command == 'checkpoint-access-rule-list':
return_results(checkpoint_list_access_rule_command(client, **demisto.args()))
elif command == 'checkpoint-access-rule-add':
return_results(checkpoint_add_access_rule_command(client, **demisto.args()))
elif command == 'checkpoint-access-rule-update':
return_results(checkpoint_update_access_rule_command(client, **demisto.args()))
elif command == 'checkpoint-access-rule-delete':
return_results(checkpoint_delete_access_rule_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-list':
return_results(checkpoint_list_application_site_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-add':
return_results(checkpoint_add_application_site_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-update':
return_results(checkpoint_update_application_site_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-delete':
return_results(checkpoint_delete_application_site_command(client, **demisto.args()))
elif command == 'checkpoint-application-site-category-list':
return_results(checkpoint_list_application_site_categories_command(client,
**demisto.args()))
elif command == 'checkpoint-application-site-category-get':
return_results(checkpoint_get_application_site_category_command(client,
**demisto.args()))
elif command == 'checkpoint-application-site-category-add':
return_results(checkpoint_add_application_site_category_command(client,
**demisto.args()))
elif command == 'checkpoint-packages-list':
return_results(checkpoint_list_packages_command(client, **demisto.args()))
elif command == 'checkpoint-gateways-list':
return_results(checkpoint_list_gateways_command(client, **demisto.args()))
elif command == 'checkpoint-show-objects':
return_results(checkpoint_list_objects_command(client, **demisto.args()))
elif command == 'checkpoint-show-task':
return_results(checkpoint_show_task_command(client, **demisto.args()))
elif command == 'checkpoint-publish':
return_results(checkpoint_publish_command(client))
elif command == 'checkpoint-install-policy':
return_results(checkpoint_install_policy_command(client, **demisto.args()))
elif command == 'checkpoint-verify-policy':
return_results(checkpoint_verify_policy_command(client, **demisto.args()))
elif command == 'checkpoint-package-list':
return_results(checkpoint_list_package_command(client, **demisto.args()))
else:
raise NotImplementedError(f"Unknown command {demisto.command()}.")
if client.has_performed_login:
# this part is not reached when login() is explicitly called
demisto.debug("main: client.has_performed_login==True, logging out.")
client.logout()
except DemistoException as e:
error_text_parts = [f'Failed to execute {demisto.command()} command.']
e_message = e.args[0]
if e.res:
status = e.res.http_status
if status == 401:
error_text_parts.extend(
('The current session is unreachable. All changes done after last publish are saved.',
'Please contact IT for more information.'))
demisto.setIntegrationContext({})
elif status == 500:
error_text_parts.append('Server Error: make sure Server URL and Server Port are correctly set')
demisto.setIntegrationContext({})
elif 'Missing header: [X-chkp-sid]' in e_message \
or 'Authentication to server failed' in e_message:
error_text_parts.append('Wrong credentials! '
'Please check the username and password you entered and try again.')
demisto.setIntegrationContext({})
error_text_parts.append(f'\nError: {str(e)}')
return_error("\n".join(error_text_parts))
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
40,477 |
def coalesce(
edge_index: Tensor,
edge_attr: Optional[Union[Tensor, List[Tensor]]] = None,
num_nodes: Optional[int] = None,
reduce: str = "add",
is_sorted: bool = False,
sort_by_row: bool = True,
) -> Union[Tensor, Tuple[Tensor, Tensor], Tuple[Tensor, List[Tensor]]]:
"""Row-wise sorts :obj:`edge_index` and removes its duplicated entries.
Duplicate entries in :obj:`edge_attr` are merged by scattering them
together according to the given :obj:`reduce` option.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-
dimensional edge features.
If given as a list, will re-shuffle and remove duplicates for all
its entries. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
reduce (string, optional): The reduce operation to use for merging edge
features (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"mul"`). (default: :obj:`"add"`)
is_sorted (bool, optional): If set to :obj:`True`, will expect
:obj:`edge_index` to be already sorted row-wise.
sort_by_row (bool, optional): If set to :obj:`False`, will sort
:obj:`edge_index` column-wise.
:rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else
(:class:`LongTensor`, :obj:`Tensor` or :obj:`List[Tensor]]`)
Examples:
>>> edge_index = torch.tensor([[1, 1, 2, 3], [3, 3, 1, 2]])
>>> edge_attr = torch.tensor([1., 1., 1., 1.])
>>> coalesce(edge_index)
tensor([[1, 2, 3],
[3, 1, 2]])
>>> # Sort `edge_index` column-wise
>>> coalesce(edge_index, sort_by_row=False)
tensor([[2, 3, 1],
[1, 2, 3]])
>>> coalesce(edge_index, edge_attr)
(tensor([[1, 2, 3],
[3, 1, 2]]),
tensor([2., 1., 1.]))
>>> # Use 'mean' operation to merge edge features
>>> coalesce(edge_index, edge_attr, reduce='mean')
(tensor([[1, 2, 3],
[3, 1, 2]]),
tensor([1., 1., 1.]))
"""
nnz = edge_index.size(1)
num_nodes = maybe_num_nodes(edge_index, num_nodes)
idx = edge_index.new_empty(nnz + 1)
idx[0] = -1
idx[1:] = edge_index[1 - int(sort_by_row)]
idx[1:].mul_(num_nodes).add_(edge_index[int(sort_by_row)])
if not is_sorted:
idx[1:], perm = idx[1:].sort()
edge_index = edge_index[:, perm]
if edge_attr is not None and isinstance(edge_attr, Tensor):
edge_attr = edge_attr[perm]
elif edge_attr is not None:
edge_attr = [e[perm] for e in edge_attr]
mask = idx[1:] > idx[:-1]
# Only perform expensive merging in case there exists duplicates:
if mask.all():
return edge_index if edge_attr is None else (edge_index, edge_attr)
edge_index = edge_index[:, mask]
if edge_attr is None:
return edge_index
dim_size = edge_index.size(1)
idx = torch.arange(0, nnz, device=edge_index.device)
idx.sub_(mask.logical_not_().cumsum(dim=0))
if isinstance(edge_attr, Tensor):
edge_attr = scatter(edge_attr, idx, 0, None, dim_size, reduce)
else:
edge_attr = [
scatter(e, idx, 0, None, dim_size, reduce) for e in edge_attr
]
return edge_index, edge_attr
|
def coalesce(
edge_index: Tensor,
edge_attr: Optional[Union[Tensor, List[Tensor]]] = None,
num_nodes: Optional[int] = None,
reduce: str = "add",
is_sorted: bool = False,
sort_by_row: bool = True,
) -> Union[Tensor, Tuple[Tensor, Tensor], Tuple[Tensor, List[Tensor]]]:
"""Row-wise sorts :obj:`edge_index` and removes its duplicated entries.
Duplicate entries in :obj:`edge_attr` are merged by scattering them
together according to the given :obj:`reduce` option.
Args:
edge_index (LongTensor): The edge indices.
edge_attr (Tensor or List[Tensor], optional): Edge weights or multi-
dimensional edge features.
If given as a list, will re-shuffle and remove duplicates for all
its entries. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes, *i.e.*
:obj:`max_val + 1` of :attr:`edge_index`. (default: :obj:`None`)
reduce (string, optional): The reduce operation to use for merging edge
features (:obj:`"add"`, :obj:`"mean"`, :obj:`"min"`, :obj:`"max"`,
:obj:`"mul"`). (default: :obj:`"add"`)
is_sorted (bool, optional): If set to :obj:`True`, will expect
:obj:`edge_index` to be already sorted row-wise.
sort_by_row (bool, optional): If set to :obj:`False`, will sort
:obj:`edge_index` column-wise.
:rtype: :class:`LongTensor` if :attr:`edge_attr` is :obj:`None`, else
(:class:`LongTensor`, :obj:`Tensor` or :obj:`List[Tensor]]`)
Example:
>>> edge_index = torch.tensor([[1, 1, 2, 3], [3, 3, 1, 2]])
>>> edge_attr = torch.tensor([1., 1., 1., 1.])
>>> coalesce(edge_index)
tensor([[1, 2, 3],
[3, 1, 2]])
>>> # Sort `edge_index` column-wise
>>> coalesce(edge_index, sort_by_row=False)
tensor([[2, 3, 1],
[1, 2, 3]])
>>> coalesce(edge_index, edge_attr)
(tensor([[1, 2, 3],
[3, 1, 2]]),
tensor([2., 1., 1.]))
>>> # Use 'mean' operation to merge edge features
>>> coalesce(edge_index, edge_attr, reduce='mean')
(tensor([[1, 2, 3],
[3, 1, 2]]),
tensor([1., 1., 1.]))
"""
nnz = edge_index.size(1)
num_nodes = maybe_num_nodes(edge_index, num_nodes)
idx = edge_index.new_empty(nnz + 1)
idx[0] = -1
idx[1:] = edge_index[1 - int(sort_by_row)]
idx[1:].mul_(num_nodes).add_(edge_index[int(sort_by_row)])
if not is_sorted:
idx[1:], perm = idx[1:].sort()
edge_index = edge_index[:, perm]
if edge_attr is not None and isinstance(edge_attr, Tensor):
edge_attr = edge_attr[perm]
elif edge_attr is not None:
edge_attr = [e[perm] for e in edge_attr]
mask = idx[1:] > idx[:-1]
# Only perform expensive merging in case there exists duplicates:
if mask.all():
return edge_index if edge_attr is None else (edge_index, edge_attr)
edge_index = edge_index[:, mask]
if edge_attr is None:
return edge_index
dim_size = edge_index.size(1)
idx = torch.arange(0, nnz, device=edge_index.device)
idx.sub_(mask.logical_not_().cumsum(dim=0))
if isinstance(edge_attr, Tensor):
edge_attr = scatter(edge_attr, idx, 0, None, dim_size, reduce)
else:
edge_attr = [
scatter(e, idx, 0, None, dim_size, reduce) for e in edge_attr
]
return edge_index, edge_attr
|
30,431 |
def getValueToSet(args):
value = args.get('value')
applyIfEmpty = True if args.get('applyIfEmpty') == 'true' else False
if value is None or (applyIfEmpty and len(value) < 1):
value = args.get('defaultValue')
return value
|
def getValueToSet(args):
value = args.get('value')
applyIfEmpty = True if args.get('applyIfEmpty', '').lower() == 'true' else False
if value is None or (applyIfEmpty and len(value) < 1):
value = args.get('defaultValue')
return value
|
8,438 |
def _snr_single_region(spectrum, region=None):
"""
Calculate the mean S/N of the spectrum based on the flux and uncertainty
in the spectrum.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the equivalent width will be calculated.
region: `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the SNR.
Returns
-------
snr : `~astropy.units.Quantity` or list (based on region input)
Signal to noise ratio of the spectrum or within the regions
Notes
-----
This is a helper function for the above `snr()` method.
"""
if region is not None:
calc_spectrum = extract_region(spectrum, region)
else:
calc_spectrum = spectrum
if hasattr(spectrum, 'mask') and spectrum.mask is not None:
flux = calc_spectrum.flux[~spectrum.mask]
uncertainty = calc_spectrum.uncertainty.array[~spectrum.mask] * \
spectrum.uncertainty.unit
else:
flux = calc_spectrum.flux
uncertainty = calc_spectrum.uncertainty.array * spectrum.uncertainty.unit
# the axis=-1 will enable this to run on single-dispersion, single-flux
# and single-dispersion, multiple-flux
return np.mean(flux / uncertainty, axis=-1)
|
def _snr_single_region(spectrum, region=None):
"""
Calculate the mean S/N of the spectrum based on the flux and uncertainty
in the spectrum.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the equivalent width will be calculated.
region: `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the SNR.
Returns
-------
snr : `~astropy.units.Quantity` or list (based on region input)
Signal to noise ratio of the spectrum or within the regions
Notes
-----
This is a helper function for the above `snr()` method.
"""
if region is not None:
calc_spectrum = extract_region(spectrum, region)
else:
calc_spectrum = spectrum
if hasattr(spectrum, 'mask') and spectrum.mask is not None:
flux = calc_spectrum.flux[~spectrum.mask]
uncertainty = calc_spectrum.uncertainty.array[~spectrum.mask] * \
spectrum.uncertainty.unit
else:
flux = calc_spectrum.flux
uncertainty = calc_spectrum.uncertainty.quantity
# the axis=-1 will enable this to run on single-dispersion, single-flux
# and single-dispersion, multiple-flux
return np.mean(flux / uncertainty, axis=-1)
|
40,797 |
def _test_distrib_all_gather_group(device):
if idist.get_world_size() > 1:
rank = idist.get_rank()
group = [0, 1]
t = torch.tensor([rank], device=idist.device())
res = idist.all_gather(t, group=group)
assert torch.equal(res, torch.tensor(group))
|
def _test_distrib_all_gather_group(device):
if idist.get_world_size() > 1:
rank = idist.get_rank()
group = [0, 1]
t = torch.tensor([rank], device=device)
res = idist.all_gather(t, group=group)
assert torch.equal(res, torch.tensor(group))
|
9,396 |
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='str'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create']),
expires=dict(type='float'),
password_lock=dict(type='bool'),
local=dict(type='bool'),
profile=dict(type='str'),
authorization=dict(type='str'),
role=dict(type='str'),
),
supports_check_mode=True
)
user = User(module)
user.check_password_encrypted()
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
result['diff'] = {
'before': 'user exists\n',
'after': 'user removed\n',
}
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
result['diff'] = {
'before': 'user does not exist\n',
'after': 'user created\n',
}
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
result['diff'] = {
'before':
''.join([
'%s = %s\n' % (key, oldv)
for key, (oldv, newv) in sorted(user.changes.items())
]),
'after':
''.join([
'%s = %s\n' % (key, newv)
for key, (oldv, newv) in sorted(user.changes.items())
]),
}
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
result['diff']['after'] += 'created %s\n' % user.home
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
result['diff']['after'] += 'generated SSH key'
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
|
def main():
ssh_defaults = dict(
bits=0,
type='rsa',
passphrase=None,
comment='ansible-generated on %s' % socket.gethostname()
)
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
name=dict(type='str', required=True, aliases=['user']),
uid=dict(type='str'),
non_unique=dict(type='bool', default=False),
group=dict(type='str'),
groups=dict(type='list'),
comment=dict(type='str'),
home=dict(type='path'),
shell=dict(type='str'),
password=dict(type='str', no_log=True),
login_class=dict(type='str'),
# following options are specific to macOS
hidden=dict(type='bool'),
# following options are specific to selinux
seuser=dict(type='str'),
# following options are specific to userdel
force=dict(type='bool', default=False),
remove=dict(type='bool', default=False),
# following options are specific to useradd
create_home=dict(type='bool', default=True, aliases=['createhome']),
skeleton=dict(type='str'),
system=dict(type='bool', default=False),
# following options are specific to usermod
move_home=dict(type='bool', default=False),
append=dict(type='bool', default=False),
# following are specific to ssh key generation
generate_ssh_key=dict(type='bool'),
ssh_key_bits=dict(type='int', default=ssh_defaults['bits']),
ssh_key_type=dict(type='str', default=ssh_defaults['type']),
ssh_key_file=dict(type='path'),
ssh_key_comment=dict(type='str', default=ssh_defaults['comment']),
ssh_key_passphrase=dict(type='str', no_log=True),
update_password=dict(type='str', default='always', choices=['always', 'on_create']),
expires=dict(type='float'),
password_lock=dict(type='bool'),
local=dict(type='bool'),
profile=dict(type='str'),
authorization=dict(type='str'),
role=dict(type='str'),
),
supports_check_mode=True
)
user = User(module)
user.check_password_encrypted()
module.debug('User instantiated - platform %s' % user.platform)
if user.distribution:
module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
err = ''
result = {}
result['name'] = user.name
result['state'] = user.state
if user.state == 'absent':
if user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.remove_user()
if rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
result['force'] = user.force
result['remove'] = user.remove
result['diff'] = {
'before': 'user exists\n',
'after': 'user removed\n',
}
elif user.state == 'present':
if not user.user_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = user.create_user()
if module.check_mode:
result['system'] = user.name
else:
result['system'] = user.system
result['create_home'] = user.create_home
result['diff'] = {
'before': '{0} does not exist\n'.format(user.name),
'after': 'user created\n',
}
else:
# modify user (note: this function is check mode aware)
(rc, out, err) = user.modify_user()
result['append'] = user.append
result['move_home'] = user.move_home
result['diff'] = {
'before':
''.join([
'%s = %s\n' % (key, oldv)
for key, (oldv, newv) in sorted(user.changes.items())
]),
'after':
''.join([
'%s = %s\n' % (key, newv)
for key, (oldv, newv) in sorted(user.changes.items())
]),
}
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if user.password is not None:
result['password'] = 'NOT_LOGGING_PASSWORD'
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
if user.user_exists():
info = user.user_info()
if info is False:
result['msg'] = "failed to look up user name: %s" % user.name
result['failed'] = True
result['uid'] = info[2]
result['group'] = info[3]
result['comment'] = info[4]
result['home'] = info[5]
result['shell'] = info[6]
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.create_home:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
result['diff']['after'] += 'created %s\n' % user.home
# deal with ssh key
if user.sshkeygen:
# generate ssh key (note: this function is check mode aware)
(rc, out, err) = user.ssh_key_gen()
if rc is not None and rc != 0:
module.fail_json(name=user.name, msg=err, rc=rc)
if rc == 0:
result['changed'] = True
result['diff']['after'] += 'generated SSH key'
(rc, out, err) = user.ssh_key_fingerprint()
if rc == 0:
result['ssh_fingerprint'] = out.strip()
else:
result['ssh_fingerprint'] = err.strip()
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
module.exit_json(**result)
|
33,852 |
def start_metrics_pusher(interval_s: float,
collection_callback: Callable[[], Dict[str, float]],
controller_handle):
"""Start a background thread to push metrics to controller.
Args:
interval_s(float): the push interval.
collection_callback: a callable that returns the metric data points to
be sent over the the controller.
controller_handle: actor handle to Serve controller.
"""
def send_once():
data = collection_callback()
# TODO(simon): maybe wait for ack or handle controller failure?
controller_handle.record_autoscaling_metrics.remote(
data=data, send_timestamp=time.time())
def send_forever():
while True:
start = time.time()
send_once()
duration_s = time.time() - start
remaining_time = interval_s - duration_s
if remaining_time > 0:
time.sleep(remaining_time)
timer = threading.Thread(target=send_forever)
timer.setDaemon(True)
timer.start()
|
def start_metrics_pusher(interval_s: float,
collection_callback: Callable[[], Dict[str, float]],
controller_handle):
"""Start a background thread to push metrics to controller.
Args:
interval_s(float): the push interval.
collection_callback: a callable that returns the metric data points to
be sent to controller.
controller_handle: actor handle to Serve controller.
"""
def send_once():
data = collection_callback()
# TODO(simon): maybe wait for ack or handle controller failure?
controller_handle.record_autoscaling_metrics.remote(
data=data, send_timestamp=time.time())
def send_forever():
while True:
start = time.time()
send_once()
duration_s = time.time() - start
remaining_time = interval_s - duration_s
if remaining_time > 0:
time.sleep(remaining_time)
timer = threading.Thread(target=send_forever)
timer.setDaemon(True)
timer.start()
|
22,087 |
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
ln, off = struct.unpack('H2xI', request[44:52])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
55,053 |
def inv(operation_list):
"""Invert a list of operations or a :doc:`template </introduction/templates>`.
If the inversion happens inside a QNode, the operations are removed and requeued
in the reversed order for proper inversion.
.. warning::
Use of :func:`~.qml.inv()` is deprecated and should be replaced with
:func:`~.qml.adjoint()`.
**Example:**
The following example illuminates the inversion of a template:
.. code-block:: python3
@qml.template
def ansatz(weights, wires):
for idx, wire in enumerate(wires):
qml.RX(weights[idx], wires=[wire])
for idx in range(len(wires) - 1):
qml.CNOT(wires=[wires[idx], wires[idx + 1]])
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit(weights):
qml.inv(ansatz(weights, wires=[0, 1]))
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
We may also invert an operation sequence:
.. code-block:: python3
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit1():
qml.T(wires=[0]).inv()
qml.Hadamard(wires=[0]).inv()
qml.S(wires=[0]).inv()
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
@qml.qnode(dev)
def circuit2():
qml.inv([qml.S(wires=[0]), qml.Hadamard(wires=[0]), qml.T(wires=[0])])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
Double checking that both circuits produce the same output:
>>> ZZ1 = circuit1()
>>> ZZ2 = circuit2()
>>> assert ZZ1 == ZZ2
True
Args:
operation_list (Iterable[~.Operation]): An iterable of operations
Returns:
List[~.Operation]: The inverted list of operations
"""
warnings.warn(
"Use of qml.inv() is deprecated and should be replaced with qml.adjoint()",
UserWarning,
)
if isinstance(operation_list, qml.operation.Operation):
operation_list = [operation_list]
elif operation_list is None:
raise ValueError(
"None was passed as an argument to inv. "
"This could happen if inversion of a template without the template decorator is attempted."
)
elif callable(operation_list):
raise ValueError(
"A function was passed as an argument to inv. "
"This could happen if inversion of a template function is attempted. "
"Please use inv on the function including its arguments, as in inv(template(args))."
)
elif isinstance(operation_list, qml.tape.QuantumTape):
new_tape = operation_list.adjoint()
return new_tape
elif not isinstance(operation_list, Iterable):
raise ValueError("The provided operation_list is not iterable.")
non_ops = [
(idx, op)
for idx, op in enumerate(operation_list)
if not isinstance(op, qml.operation.Operation)
]
if non_ops:
string_reps = [" operation_list[{}] = {}".format(idx, op) for idx, op in non_ops]
raise ValueError(
"The given operation_list does not only contain Operations."
+ "The following elements of the iterable were not Operations:"
+ ",".join(string_reps)
)
for op in operation_list:
try:
# remove the queued operation to be inverted
# from the existing queuing context
qml.QueuingContext.remove(op)
except KeyError:
# operation to be inverted does not
# exist on the queuing context
pass
def qfunc():
for o in operation_list:
o.queue()
with qml.tape.QuantumTape() as tape:
qml.adjoint(qfunc)()
return tape
|
def inv(operation_list):
"""Invert a list of operations or a :doc:`template </introduction/templates>`.
If the inversion happens inside a QNode, the operations are removed and requeued
in the reversed order for proper inversion.
.. warning::
Use of :func:`~.inv()` is deprecated and should be replaced with
:func:`~.adjoint()`.
**Example:**
The following example illuminates the inversion of a template:
.. code-block:: python3
@qml.template
def ansatz(weights, wires):
for idx, wire in enumerate(wires):
qml.RX(weights[idx], wires=[wire])
for idx in range(len(wires) - 1):
qml.CNOT(wires=[wires[idx], wires[idx + 1]])
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit(weights):
qml.inv(ansatz(weights, wires=[0, 1]))
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
We may also invert an operation sequence:
.. code-block:: python3
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def circuit1():
qml.T(wires=[0]).inv()
qml.Hadamard(wires=[0]).inv()
qml.S(wires=[0]).inv()
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
@qml.qnode(dev)
def circuit2():
qml.inv([qml.S(wires=[0]), qml.Hadamard(wires=[0]), qml.T(wires=[0])])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
Double checking that both circuits produce the same output:
>>> ZZ1 = circuit1()
>>> ZZ2 = circuit2()
>>> assert ZZ1 == ZZ2
True
Args:
operation_list (Iterable[~.Operation]): An iterable of operations
Returns:
List[~.Operation]: The inverted list of operations
"""
warnings.warn(
"Use of qml.inv() is deprecated and should be replaced with qml.adjoint()",
UserWarning,
)
if isinstance(operation_list, qml.operation.Operation):
operation_list = [operation_list]
elif operation_list is None:
raise ValueError(
"None was passed as an argument to inv. "
"This could happen if inversion of a template without the template decorator is attempted."
)
elif callable(operation_list):
raise ValueError(
"A function was passed as an argument to inv. "
"This could happen if inversion of a template function is attempted. "
"Please use inv on the function including its arguments, as in inv(template(args))."
)
elif isinstance(operation_list, qml.tape.QuantumTape):
new_tape = operation_list.adjoint()
return new_tape
elif not isinstance(operation_list, Iterable):
raise ValueError("The provided operation_list is not iterable.")
non_ops = [
(idx, op)
for idx, op in enumerate(operation_list)
if not isinstance(op, qml.operation.Operation)
]
if non_ops:
string_reps = [" operation_list[{}] = {}".format(idx, op) for idx, op in non_ops]
raise ValueError(
"The given operation_list does not only contain Operations."
+ "The following elements of the iterable were not Operations:"
+ ",".join(string_reps)
)
for op in operation_list:
try:
# remove the queued operation to be inverted
# from the existing queuing context
qml.QueuingContext.remove(op)
except KeyError:
# operation to be inverted does not
# exist on the queuing context
pass
def qfunc():
for o in operation_list:
o.queue()
with qml.tape.QuantumTape() as tape:
qml.adjoint(qfunc)()
return tape
|
4,303 |
def _read_nedf_eeg(filename: str):
"""
Read header info and EEG data from an .nedf file
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
eeg : array, shape (n_samples, n_channels)
Unscaled EEG data
info : dict
Information from the file header
triggers : array, shape (n_annots, 2)
Start samples and values of each trigger
scale : float
Scaling factor for the EEG data
"""
info, dt = parse_nedf_header(filename)
# to quote the original matlab implementation:
# "binary data will always start at byte 5120"
binstart = 10240
with open(filename, mode='rb') as f:
f.seek(binstart, os.SEEK_SET)
data = np.fromfile(f, dtype=dt)
# convert uint8-triplet -> float32
eeg = data['data']['eeg'] @ [1 << 16, 1 << 8, 1.]
eeg = eeg.reshape((-1, info['nchan']))
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
triggers = data['data']['trig'].flatten()
triggerind = triggers.nonzero()[0]
triggers = np.stack((triggerind, triggers[triggerind])).T
# scale channels accordingly (here: to volts)
scale = 2.4 / (6.0 * 8388607)
return eeg, info, triggers, scale
|
def _read_nedf_eeg(filename: str):
"""
Read header info and EEG data from an .nedf file
Parameters
----------
filename : str
Path to the .nedf file.
Returns
-------
eeg : array, shape (n_samples, n_channels)
Unscaled EEG data
info : dict
Information from the file header.
triggers : array, shape (n_annots, 2)
Start samples and values of each trigger
scale : float
Scaling factor for the EEG data
"""
info, dt = parse_nedf_header(filename)
# to quote the original matlab implementation:
# "binary data will always start at byte 5120"
binstart = 10240
with open(filename, mode='rb') as f:
f.seek(binstart, os.SEEK_SET)
data = np.fromfile(f, dtype=dt)
# convert uint8-triplet -> float32
eeg = data['data']['eeg'] @ [1 << 16, 1 << 8, 1.]
eeg = eeg.reshape((-1, info['nchan']))
# convert sign if necessary
eeg[eeg > (1 << 23)] -= 1 << 24
triggers = data['data']['trig'].flatten()
triggerind = triggers.nonzero()[0]
triggers = np.stack((triggerind, triggers[triggerind])).T
# scale channels accordingly (here: to volts)
scale = 2.4 / (6.0 * 8388607)
return eeg, info, triggers, scale
|
28,545 |
def plot_kde(
values,
values2=None,
cumulative=False,
rug=False,
label=None,
bw=4.5,
quantiles=None,
rotated=False,
contour=True,
fill_last=True,
textsize=None,
plot_kwargs=None,
fill_kwargs=None,
rug_kwargs=None,
contour_kwargs=None,
ax=None,
legend=True,
):
"""1D or 2D KDE plot taking into account boundary conditions.
Parameters
----------
values : array-like
Values to plot
values2 : array-like, optional
Values to plot. If present, a 2D KDE will be estimated
cumulative : bool
If true plot the estimated cumulative distribution function. Defaults to False.
Ignored for 2D KDE
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE
label : string
Text to include as part of the legend
bw : float
Bandwidth scaling factor for 1D KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's
rule of thumb (the default rule used by SciPy).
quantiles : list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to None.
rotated : bool
Whether to rotate the 1D KDE plot 90 degrees.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
plot_kwargs : dict
Keywords passed to the pdf line of a 1D KDE.
fill_kwargs : dict
Keywords passed to the fill under the line (use fill_kwargs={'alpha': 0} to disable fill).
Ignored for 2D KDE
rug_kwargs : dict
Keywords passed to the rug plot. Ignored if rug=False or for 2D KDE
Use `space` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot.
contour_kwargs : dict
Keywords passed to the contourplot. Ignored for 1D KDE
ax : matplotlib axes
legend : bool
Add legend to the figure. By default True.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot KDE with Rugplot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> mu_posterior = np.concatenate(non_centered.posterior["mu"].values)
>>> az.plot_kde(mu_posterior, rug=True)
Default Kde without rugplot
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior)
Plot a cumulative distribution
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, cumulative=True)
Rotate plot 90 degrees
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rotated=True)
Plot 2d contour KDE
.. plot::
:context: close-figs
>>> tau_posterior = np.concatenate(non_centered.posterior["tau"].values)
>>> az.plot_kde(mu_posterior, values2=tau_posterior)
Remove fill for last contour in 2d KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, fill_last=False)
Plot 2d smooth KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, contour=False)
"""
if ax is None:
ax = plt.gca()
figsize = ax.get_figure().get_size_inches()
figsize, *_, xt_labelsize, linewidth, markersize = _scale_fig_size(figsize, textsize, 1, 1)
if values2 is None:
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("color", "C0")
default_color = plot_kwargs.get("color")
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", default_color)
if rug_kwargs is None:
rug_kwargs = {}
rug_kwargs.setdefault("marker", "_" if rotated else "|")
rug_kwargs.setdefault("linestyle", "None")
rug_kwargs.setdefault("color", default_color)
rug_kwargs.setdefault("space", 0.2)
plot_kwargs.setdefault("linewidth", linewidth)
rug_kwargs.setdefault("markersize", 2 * markersize)
density, lower, upper = _fast_kde(values, cumulative, bw)
rug_space = max(density) * rug_kwargs.pop("space")
x = np.linspace(lower, upper, len(density))
if cumulative:
density_q = density
else:
density_q = np.cumsum(density)
fill_func = ax.fill_between
fill_x, fill_y = x, density
if rotated:
x, density = density, x
fill_func = ax.fill_betweenx
ax.tick_params(labelsize=xt_labelsize)
if rotated:
ax.set_xlim(0, auto=True)
rug_x, rug_y = np.zeros_like(values) - rug_space, values
else:
ax.set_ylim(0, auto=True)
rug_x, rug_y = values, np.zeros_like(values) - rug_space
if rug:
ax.plot(rug_x, rug_y, **rug_kwargs)
if quantiles is not None:
fill_kwargs.setdefault("alpha", 0.75)
idx = [np.sum(density_q < quant) for quant in quantiles]
fill_func(
fill_x,
fill_y,
where=np.isin(fill_x, fill_x[idx], invert=True, assume_unique=True),
**fill_kwargs
)
else:
fill_kwargs.setdefault("alpha", 0)
ax.plot(x, density, label=label, **plot_kwargs)
fill_func(fill_x, fill_y, **fill_kwargs)
if legend and label:
ax.legend()
else:
if contour_kwargs is None:
contour_kwargs = {}
contour_kwargs.setdefault("colors", "0.5")
gridsize = (128, 128) if contour else (256, 256)
density, xmin, xmax, ymin, ymax = _fast_kde_2d(values, values2, gridsize=gridsize)
g_s = complex(gridsize[0])
x_x, y_y = np.mgrid[xmin:xmax:g_s, ymin:ymax:g_s]
ax.grid(False)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
if contour:
qcfs = ax.contourf(x_x, y_y, density, antialiased=True)
if not fill_last:
qcfs.collections[0].set_alpha(0)
qcs = ax.contour(x_x, y_y, density, **contour_kwargs)
if not fill_last:
qcs.collections[0].set_alpha(0)
else:
ax.pcolormesh(x_x, y_y, density)
return ax
|
def plot_kde(
values,
values2=None,
cumulative=False,
rug=False,
label=None,
bw=4.5,
quantiles=None,
rotated=False,
contour=True,
fill_last=True,
textsize=None,
plot_kwargs=None,
fill_kwargs=None,
rug_kwargs=None,
contour_kwargs=None,
ax=None,
legend=True,
):
"""1D or 2D KDE plot taking into account boundary conditions.
Parameters
----------
values : array-like
Values to plot
values2 : array-like, optional
Values to plot. If present, a 2D KDE will be estimated
cumulative : bool
If true plot the estimated cumulative distribution function. Defaults to False.
Ignored for 2D KDE
rug : bool
If True adds a rugplot. Defaults to False. Ignored for 2D KDE
label : string
Text to include as part of the legend
bw : float
Bandwidth scaling factor for 1D KDE. Should be larger than 0. The higher this number the
smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's
rule of thumb (the default rule used by SciPy).
quantiles : list
Quantiles in ascending order used to segment the KDE. Use [.25, .5, .75] for quartiles.
Defaults to None.
rotated : bool
Whether to rotate the 1D KDE plot 90 degrees.
contour : bool
If True plot the 2D KDE using contours, otherwise plot a smooth 2D KDE. Defaults to True.
fill_last : bool
If True fill the last contour of the 2D KDE plot. Defaults to True.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on figsize.
plot_kwargs : dict
Keywords passed to the pdf line of a 1D KDE.
fill_kwargs : dict
Keywords passed to the fill under the line (use fill_kwargs={'alpha': 0} to disable fill).
Ignored for 2D KDE
rug_kwargs : dict
Keywords passed to the rug plot. Ignored if rug=False or for 2D KDE
Use `space` keyword (float) to control the position of the rugplot. The larger this number
the lower the rugplot.
contour_kwargs : dict
Keywords passed to the contourplot. Ignored for 1D KDE
ax : matplotlib axes
legend : bool
Add legend to the figure. By default True.
Returns
-------
ax : matplotlib axes
Examples
--------
Plot KDE with Rugplot
.. plot::
:context: close-figs
>>> import arviz as az
>>> non_centered = az.load_arviz_data('non_centered_eight')
>>> mu_posterior = np.concatenate(non_centered.posterior["mu"].values)
>>> az.plot_kde(mu_posterior, rug=True)
Default KDE without rugplot
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior)
Plot a cumulative distribution
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, cumulative=True)
Rotate plot 90 degrees
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, rotated=True)
Plot 2d contour KDE
.. plot::
:context: close-figs
>>> tau_posterior = np.concatenate(non_centered.posterior["tau"].values)
>>> az.plot_kde(mu_posterior, values2=tau_posterior)
Remove fill for last contour in 2d KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, fill_last=False)
Plot 2d smooth KDE
.. plot::
:context: close-figs
>>> az.plot_kde(mu_posterior, values2=tau_posterior, contour=False)
"""
if ax is None:
ax = plt.gca()
figsize = ax.get_figure().get_size_inches()
figsize, *_, xt_labelsize, linewidth, markersize = _scale_fig_size(figsize, textsize, 1, 1)
if values2 is None:
if plot_kwargs is None:
plot_kwargs = {}
plot_kwargs.setdefault("color", "C0")
default_color = plot_kwargs.get("color")
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", default_color)
if rug_kwargs is None:
rug_kwargs = {}
rug_kwargs.setdefault("marker", "_" if rotated else "|")
rug_kwargs.setdefault("linestyle", "None")
rug_kwargs.setdefault("color", default_color)
rug_kwargs.setdefault("space", 0.2)
plot_kwargs.setdefault("linewidth", linewidth)
rug_kwargs.setdefault("markersize", 2 * markersize)
density, lower, upper = _fast_kde(values, cumulative, bw)
rug_space = max(density) * rug_kwargs.pop("space")
x = np.linspace(lower, upper, len(density))
if cumulative:
density_q = density
else:
density_q = np.cumsum(density)
fill_func = ax.fill_between
fill_x, fill_y = x, density
if rotated:
x, density = density, x
fill_func = ax.fill_betweenx
ax.tick_params(labelsize=xt_labelsize)
if rotated:
ax.set_xlim(0, auto=True)
rug_x, rug_y = np.zeros_like(values) - rug_space, values
else:
ax.set_ylim(0, auto=True)
rug_x, rug_y = values, np.zeros_like(values) - rug_space
if rug:
ax.plot(rug_x, rug_y, **rug_kwargs)
if quantiles is not None:
fill_kwargs.setdefault("alpha", 0.75)
idx = [np.sum(density_q < quant) for quant in quantiles]
fill_func(
fill_x,
fill_y,
where=np.isin(fill_x, fill_x[idx], invert=True, assume_unique=True),
**fill_kwargs
)
else:
fill_kwargs.setdefault("alpha", 0)
ax.plot(x, density, label=label, **plot_kwargs)
fill_func(fill_x, fill_y, **fill_kwargs)
if legend and label:
ax.legend()
else:
if contour_kwargs is None:
contour_kwargs = {}
contour_kwargs.setdefault("colors", "0.5")
gridsize = (128, 128) if contour else (256, 256)
density, xmin, xmax, ymin, ymax = _fast_kde_2d(values, values2, gridsize=gridsize)
g_s = complex(gridsize[0])
x_x, y_y = np.mgrid[xmin:xmax:g_s, ymin:ymax:g_s]
ax.grid(False)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
if contour:
qcfs = ax.contourf(x_x, y_y, density, antialiased=True)
if not fill_last:
qcfs.collections[0].set_alpha(0)
qcs = ax.contour(x_x, y_y, density, **contour_kwargs)
if not fill_last:
qcs.collections[0].set_alpha(0)
else:
ax.pcolormesh(x_x, y_y, density)
return ax
|
3,253 |
def query(
selected_columns,
query,
params,
orderby=None,
offset=None,
limit=50,
reference_event=None,
referrer=None,
auto_fields=False,
use_aggregate_conditions=False,
conditions=None,
):
"""
High-level API for doing arbitrary user queries against events.
This function operates on the Discover public event schema and
virtual fields/aggregate functions for selected columns and
conditions are supported through this function.
The resulting list will have all internal field names mapped
back into their public schema names.
selected_columns (Sequence[str]) List of public aliases to fetch.
query (str) Filter query string to create conditions from.
params (Dict[str, str]) Filtering parameters with start, end, project_id, environment
orderby (None|str|Sequence[str]) The field to order results by.
offset (None|int) The record offset to read.
limit (int) The number of records to fetch.
reference_event (ReferenceEvent) A reference event object. Used to generate additional
conditions based on the provided reference.
referrer (str|None) A referrer string to help locate the origin of this query.
auto_fields (bool) Set to true to have project + eventid fields automatically added.
conditions (Sequence[any]) List of conditions that are passed directly to snuba without
any additional processing.
"""
if not selected_columns:
raise InvalidSearchQuery("No columns selected")
else:
# We clobber this value throughout this code, so copy the value
selected_columns = [c for c in selected_columns]
with sentry_sdk.start_span(
op="discover.discover", description="query.filter_transform"
) as span:
span.set_data("query", query)
snuba_filter = get_filter(query, params)
if not use_aggregate_conditions:
snuba_filter.having = []
# We need to run a separate query to be able to properly bucket the values for the histogram
# Do that here, and format the bucket number in to the columns before passing it through
# to event search.
idx = 0
function_translations = {}
for col in selected_columns:
if col.startswith("histogram("):
with sentry_sdk.start_span(
op="discover.discover", description="query.histogram_calculation"
) as span:
span.set_data("histogram", col)
histogram_column = find_histogram_buckets(col, params, snuba_filter.conditions)
selected_columns[idx] = histogram_column
snuba_name = get_function_alias(histogram_column)
sentry_name = get_function_alias(col)
function_translations[snuba_name] = sentry_name
# Since we're completely renaming the histogram function, we need to also check if we are
# ordering by the histogram values, and change that.
if orderby is not None:
orderby = list(orderby) if isinstance(orderby, (list, tuple)) else [orderby]
for i, ordering in enumerate(orderby):
if sentry_name == ordering.lstrip("-"):
ordering = "{}{}".format(
"-" if ordering.startswith("-") else "", snuba_name
)
orderby[i] = ordering
break
idx += 1
with sentry_sdk.start_span(op="discover.discover", description="query.field_translations"):
if orderby is not None:
orderby = list(orderby) if isinstance(orderby, (list, tuple)) else [orderby]
snuba_filter.orderby = [get_function_alias(o) for o in orderby]
snuba_filter.update_with(
resolve_field_list(selected_columns, snuba_filter, auto_fields=auto_fields)
)
if reference_event:
ref_conditions = create_reference_event_conditions(reference_event)
if ref_conditions:
snuba_filter.conditions.extend(ref_conditions)
# Resolve the public aliases into the discover dataset names.
snuba_filter, translated_columns = resolve_discover_aliases(
snuba_filter, function_translations
)
# Make sure that any aggregate conditions are also in the selected columns
for having_clause in snuba_filter.having:
# The first element of the having can be an alias, or a nested array of functions. Loop through to make sure
# any referenced functions are in the aggregations.
if isinstance(having_clause[0], (list, tuple)):
# Functions are of the form [fn, [args]]
args_to_check = [[having_clause[0]]]
conditions_not_in_aggregations = []
while len(args_to_check) > 0:
args = args_to_check.pop()
for arg in args:
if arg[0] in [SNUBA_AND, SNUBA_OR]:
args_to_check.extend(arg[1])
else:
alias = arg[1][0]
found = any(
alias == agg_clause[-1] for agg_clause in snuba_filter.aggregations
)
if not found:
conditions_not_in_aggregations.append(alias)
if len(conditions_not_in_aggregations) > 0:
raise InvalidSearchQuery(
u"Aggregate(s) {} used in a condition but are not in the selected columns.".format(
", ".join(conditions_not_in_aggregations)
)
)
else:
found = any(
having_clause[0] == agg_clause[-1] for agg_clause in snuba_filter.aggregations
)
if not found:
raise InvalidSearchQuery(
u"Aggregate {} used in a condition but is not a selected column.".format(
having_clause[0]
)
)
if conditions is not None:
snuba_filter.conditions.extend(conditions)
with sentry_sdk.start_span(op="discover.discover", description="query.snuba_query"):
result = raw_query(
start=snuba_filter.start,
end=snuba_filter.end,
groupby=snuba_filter.groupby,
conditions=snuba_filter.conditions,
aggregations=snuba_filter.aggregations,
selected_columns=snuba_filter.selected_columns,
filter_keys=snuba_filter.filter_keys,
having=snuba_filter.having,
orderby=snuba_filter.orderby,
dataset=Dataset.Discover,
limit=limit,
offset=offset,
referrer=referrer,
)
with sentry_sdk.start_span(
op="discover.discover", description="query.transform_results"
) as span:
span.set_data("result_count", len(result.get("data", [])))
return transform_results(result, translated_columns, snuba_filter, selected_columns)
|
def query(
selected_columns,
query,
params,
orderby=None,
offset=None,
limit=50,
reference_event=None,
referrer=None,
auto_fields=False,
use_aggregate_conditions=False,
conditions=None,
):
"""
High-level API for doing arbitrary user queries against events.
This function operates on the Discover public event schema and
virtual fields/aggregate functions for selected columns and
conditions are supported through this function.
The resulting list will have all internal field names mapped
back into their public schema names.
selected_columns (Sequence[str]) List of public aliases to fetch.
query (str) Filter query string to create conditions from.
params (Dict[str, str]) Filtering parameters with start, end, project_id, environment
orderby (None|str|Sequence[str]) The field to order results by.
offset (None|int) The record offset to read.
limit (int) The number of records to fetch.
reference_event (ReferenceEvent) A reference event object. Used to generate additional
conditions based on the provided reference.
referrer (str|None) A referrer string to help locate the origin of this query.
auto_fields (bool) Set to true to have project + eventid fields automatically added.
conditions (Sequence[any]) List of conditions that are passed directly to snuba without
any additional processing.
"""
if not selected_columns:
raise InvalidSearchQuery("No columns selected")
else:
# We clobber this value throughout this code, so copy the value
selected_columns = selected_columns[:]
with sentry_sdk.start_span(
op="discover.discover", description="query.filter_transform"
) as span:
span.set_data("query", query)
snuba_filter = get_filter(query, params)
if not use_aggregate_conditions:
snuba_filter.having = []
# We need to run a separate query to be able to properly bucket the values for the histogram
# Do that here, and format the bucket number in to the columns before passing it through
# to event search.
idx = 0
function_translations = {}
for col in selected_columns:
if col.startswith("histogram("):
with sentry_sdk.start_span(
op="discover.discover", description="query.histogram_calculation"
) as span:
span.set_data("histogram", col)
histogram_column = find_histogram_buckets(col, params, snuba_filter.conditions)
selected_columns[idx] = histogram_column
snuba_name = get_function_alias(histogram_column)
sentry_name = get_function_alias(col)
function_translations[snuba_name] = sentry_name
# Since we're completely renaming the histogram function, we need to also check if we are
# ordering by the histogram values, and change that.
if orderby is not None:
orderby = list(orderby) if isinstance(orderby, (list, tuple)) else [orderby]
for i, ordering in enumerate(orderby):
if sentry_name == ordering.lstrip("-"):
ordering = "{}{}".format(
"-" if ordering.startswith("-") else "", snuba_name
)
orderby[i] = ordering
break
idx += 1
with sentry_sdk.start_span(op="discover.discover", description="query.field_translations"):
if orderby is not None:
orderby = list(orderby) if isinstance(orderby, (list, tuple)) else [orderby]
snuba_filter.orderby = [get_function_alias(o) for o in orderby]
snuba_filter.update_with(
resolve_field_list(selected_columns, snuba_filter, auto_fields=auto_fields)
)
if reference_event:
ref_conditions = create_reference_event_conditions(reference_event)
if ref_conditions:
snuba_filter.conditions.extend(ref_conditions)
# Resolve the public aliases into the discover dataset names.
snuba_filter, translated_columns = resolve_discover_aliases(
snuba_filter, function_translations
)
# Make sure that any aggregate conditions are also in the selected columns
for having_clause in snuba_filter.having:
# The first element of the having can be an alias, or a nested array of functions. Loop through to make sure
# any referenced functions are in the aggregations.
if isinstance(having_clause[0], (list, tuple)):
# Functions are of the form [fn, [args]]
args_to_check = [[having_clause[0]]]
conditions_not_in_aggregations = []
while len(args_to_check) > 0:
args = args_to_check.pop()
for arg in args:
if arg[0] in [SNUBA_AND, SNUBA_OR]:
args_to_check.extend(arg[1])
else:
alias = arg[1][0]
found = any(
alias == agg_clause[-1] for agg_clause in snuba_filter.aggregations
)
if not found:
conditions_not_in_aggregations.append(alias)
if len(conditions_not_in_aggregations) > 0:
raise InvalidSearchQuery(
u"Aggregate(s) {} used in a condition but are not in the selected columns.".format(
", ".join(conditions_not_in_aggregations)
)
)
else:
found = any(
having_clause[0] == agg_clause[-1] for agg_clause in snuba_filter.aggregations
)
if not found:
raise InvalidSearchQuery(
u"Aggregate {} used in a condition but is not a selected column.".format(
having_clause[0]
)
)
if conditions is not None:
snuba_filter.conditions.extend(conditions)
with sentry_sdk.start_span(op="discover.discover", description="query.snuba_query"):
result = raw_query(
start=snuba_filter.start,
end=snuba_filter.end,
groupby=snuba_filter.groupby,
conditions=snuba_filter.conditions,
aggregations=snuba_filter.aggregations,
selected_columns=snuba_filter.selected_columns,
filter_keys=snuba_filter.filter_keys,
having=snuba_filter.having,
orderby=snuba_filter.orderby,
dataset=Dataset.Discover,
limit=limit,
offset=offset,
referrer=referrer,
)
with sentry_sdk.start_span(
op="discover.discover", description="query.transform_results"
) as span:
span.set_data("result_count", len(result.get("data", [])))
return transform_results(result, translated_columns, snuba_filter, selected_columns)
|
53,526 |
def find_even_number(lst):
for x in lst:
if x % 2 == 0:
return x
print("Did not find an even number")
|
def find_even_number(numbers):
for x in numbers:
if x % 2 == 0:
return x
print("Did not find an even number")
|
23,773 |
def write_generators(conanfile, path, output):
""" produces auxiliary files, required to build a project or a package.
"""
for generator_name in set(conanfile.generators):
try:
generator_class = registered_generators[generator_name]
except KeyError:
raise ConanException("Invalid generator '%s'. Available types: %s" %
(generator_name, ", ".join(registered_generators.available)))
try:
generator = generator_class(conanfile)
except TypeError:
# To allow old-style generator packages to work (e.g. premake)
output.warn("Generator %s failed with new __init__(), trying old one")
generator = generator_class(conanfile.deps_cpp_info, conanfile.cpp_info)
try:
generator.output_path = path
content = generator.content
if isinstance(content, dict):
if generator.filename:
output.warn("Generator %s is multifile. Property 'filename' not used"
% (generator_name,))
for k, v in content.items():
if generator.normalize: # To not break existing behavior, to be removed 2.0
v = normalize(v)
output.info("Generator %s created %s" % (generator_name, k))
save(join(path, k), v, only_if_modified=True)
else:
content = normalize(content)
output.info("Generator %s created %s" % (generator_name, generator.filename))
save(join(path, generator.filename), content, only_if_modified=True)
except Exception as e:
if get_env("CONAN_VERBOSE_TRACEBACK", False):
output.error(traceback.format_exc())
output.error("Generator %s(file:%s) failed\n%s"
% (generator_name, generator.filename, str(e)))
raise ConanException(e)
|
def write_generators(conanfile, path, output):
""" produces auxiliary files, required to build a project or a package.
"""
for generator_name in sorted(set(conanfile.generators)):
try:
generator_class = registered_generators[generator_name]
except KeyError:
raise ConanException("Invalid generator '%s'. Available types: %s" %
(generator_name, ", ".join(registered_generators.available)))
try:
generator = generator_class(conanfile)
except TypeError:
# To allow old-style generator packages to work (e.g. premake)
output.warn("Generator %s failed with new __init__(), trying old one")
generator = generator_class(conanfile.deps_cpp_info, conanfile.cpp_info)
try:
generator.output_path = path
content = generator.content
if isinstance(content, dict):
if generator.filename:
output.warn("Generator %s is multifile. Property 'filename' not used"
% (generator_name,))
for k, v in content.items():
if generator.normalize: # To not break existing behavior, to be removed 2.0
v = normalize(v)
output.info("Generator %s created %s" % (generator_name, k))
save(join(path, k), v, only_if_modified=True)
else:
content = normalize(content)
output.info("Generator %s created %s" % (generator_name, generator.filename))
save(join(path, generator.filename), content, only_if_modified=True)
except Exception as e:
if get_env("CONAN_VERBOSE_TRACEBACK", False):
output.error(traceback.format_exc())
output.error("Generator %s(file:%s) failed\n%s"
% (generator_name, generator.filename, str(e)))
raise ConanException(e)
|
25,181 |
def is_old_setuptools_namespace_package(modname: str) -> bool:
"""Check for old types of namespace setuptools packages
See https://setuptools.pypa.io/en/latest/pkg_resources.html and
https://packaging.python.org/en/latest/guides/packaging-namespace-packages/
Because pkg_resources is a very large import we only do so if explicitly necessary
"""
try:
import pkg_resources # pylint: disable=import-outside-toplevel
except ImportError:
pkg_resources = None # type: ignore[assignment]
return (
pkg_resources is not None
and hasattr(pkg_resources, "_namespace_packages")
and modname in pkg_resources._namespace_packages # type: ignore[attr-defined]
)
|
def is_old_setuptools_namespace_package(modname: str) -> bool:
"""Check for old types of namespace setuptools packages
See https://setuptools.pypa.io/en/latest/pkg_resources.html and
https://packaging.python.org/en/latest/guides/packaging-namespace-packages/
Because pkg_resources is slow to import we only do so if absolutely necessary
"""
try:
import pkg_resources # pylint: disable=import-outside-toplevel
except ImportError:
pkg_resources = None # type: ignore[assignment]
return (
pkg_resources is not None
and hasattr(pkg_resources, "_namespace_packages")
and modname in pkg_resources._namespace_packages # type: ignore[attr-defined]
)
|
2,881 |
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array(
[[0.5, 0.5], [0.1, 0.9], [0.01, 0.99], [0.9, 0.1], [0.75, 0.25], [0.001, 0.999]]
)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > 0.5
loss = log_loss(y_true, y_pred, normalize=True, eps=0.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, 0.1, 0.9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
with pytest.raises(ValueError):
log_loss(y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
# test labels option
y_true = [2, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5]]
y_score = np.array([[0.1, 0.9], [0.1, 0.9]])
error_str = (
r"y_true contains only one label \(2\). Please provide "
r"the true labels explicitly through the labels argument."
)
with pytest.raises(ValueError, match=error_str):
log_loss(y_true, y_pred)
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]]
error_str = "Found input variables with inconsistent numbers of samples: [3, 2]"
(ValueError, error_str, log_loss, y_true, y_pred)
# works when the labels argument is used
true_log_loss = -np.mean(np.log(y_score[:, 1]))
calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2])
assert_almost_equal(calculated_log_loss, true_log_loss)
# ensure labels work when len(np.unique(y_true)) != y_pred.shape[1]
y_true = [1, 2, 2]
y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]]
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=6)
# ensure np.float32 and np.float16 inputs give correct output
for dtype in [np.float16, np.float32]:
y_true = [1, 2, 2]
y_score2 = np.array(
[[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]], dtype=dtype
)
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=3)
|
def test_log_loss():
# binary case with symbolic labels ("no" < "yes")
y_true = ["no", "no", "no", "yes", "yes", "yes"]
y_pred = np.array(
[[0.5, 0.5], [0.1, 0.9], [0.01, 0.99], [0.9, 0.1], [0.75, 0.25], [0.001, 0.999]]
)
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.8817971)
# multiclass case; adapted from http://bit.ly/RJJHWA
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7, 0.1], [0.6, 0.2, 0.2], [0.6, 0.1, 0.3]]
loss = log_loss(y_true, y_pred, normalize=True)
assert_almost_equal(loss, 0.6904911)
# check that we got all the shapes and axes right
# by doubling the length of y_true and y_pred
y_true *= 2
y_pred *= 2
loss = log_loss(y_true, y_pred, normalize=False)
assert_almost_equal(loss, 0.6904911 * 6, decimal=6)
# check eps and handling of absolute zero and one probabilities
y_pred = np.asarray(y_pred) > 0.5
loss = log_loss(y_true, y_pred, normalize=True, eps=0.1)
assert_almost_equal(loss, log_loss(y_true, np.clip(y_pred, 0.1, 0.9)))
# raise error if number of classes are not equal.
y_true = [1, 0, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1]]
with pytest.raises(ValueError):
log_loss(y_true, y_pred)
# case when y_true is a string array object
y_true = ["ham", "spam", "spam", "ham"]
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.4, 0.1], [0.7, 0.2]]
loss = log_loss(y_true, y_pred)
assert_almost_equal(loss, 1.0383217, decimal=6)
# test labels option
y_true = [2, 2]
y_pred = [[0.2, 0.7], [0.6, 0.5]]
y_score = np.array([[0.1, 0.9], [0.1, 0.9]])
error_str = (
r"y_true contains only one label \(2\). Please provide "
r"the true labels explicitly through the labels argument."
)
with pytest.raises(ValueError, match=error_str):
log_loss(y_true, y_pred)
y_pred = [[0.2, 0.7], [0.6, 0.5], [0.2, 0.3]]
error_str = "Found input variables with inconsistent numbers of samples: [3, 2]"
(ValueError, error_str, log_loss, y_true, y_pred)
# works when the labels argument is used
true_log_loss = -np.mean(np.log(y_score[:, 1]))
calculated_log_loss = log_loss(y_true, y_score, labels=[1, 2])
assert_almost_equal(calculated_log_loss, true_log_loss)
# ensure labels work when len(np.unique(y_true)) != y_pred.shape[1]
y_true = [1, 2, 2]
y_score2 = [[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]]
loss = log_loss(y_true, y_score2, labels=[1, 2, 3])
assert_almost_equal(loss, 1.0630345, decimal=6)
# ensure np.float32 and np.float16 inputs give correct output
for dtype in [np.float16, np.float32]:
y_true = [1, 2, 2]
y_score2 = np.array(
[[0.2, 0.7, 0.3], [0.6, 0.5, 0.3], [0.3, 0.9, 0.1]], dtype=dtype
)
loss = log_loss(y_true, y_score2, labels=[1, 2, 3], eps="auto")
assert_almost_equal(loss, 1.0630345, decimal=3)
|
24,620 |
def trilinear_approx(vspace, cell):
r"""
Returns a function whose input is a coordinate within a given grid cell
and returns the trilinearly approximated vector value at that particular
coordinate in that grid cell.
Parameters
----------
vspace: array_like
The vector space as constructed by the vector_space function which is
A 1 by 3 array with the first element containing the coordinates,
the second element containing the vector values,
and the third element containing the delta values for each dimension.
cell: array_like of integers
A grid cell, represented by a 1 by 3 array
of integers, which correspond to a grid cell
in the vector space.
Returns
-------
<class 'function'>
A function whose input is a coordinate within a given grid cell
and returns the trilinearly approximated vector value at that particular
coordinate in that grid cell.
Raises
------
This function does not raise any exceptions.
Warns
-----
This function does not raise any warnings.
Notes
-----
N/A
"""
# Calculating coefficients
ax, bx, cx, dx, ex, fx, gx, hx = trilinear_coeff_cal(vspace, cell)[0]
ay, by, cy, dy, ey, fy, gy, hy = trilinear_coeff_cal(vspace, cell)[1]
az, bz, cz, dz, ez, fz, gz, hz = trilinear_coeff_cal(vspace, cell)[2]
def approx_func(xInput, yInput, zInput):
Bx = (
ax
+ bx * xInput
+ cx * yInput
+ dx * zInput
+ ex * xInput * yInput
+ fx * xInput * zInput
+ gx * yInput * zInput
+ hx * xInput * yInput * zInput
)
By = (
ay
+ by * xInput
+ cy * yInput
+ dy * zInput
+ ey * xInput * yInput
+ fy * xInput * zInput
+ gy * yInput * zInput
+ hy * xInput * yInput * zInput
)
Bz = (
az
+ bz * xInput
+ cz * yInput
+ dz * zInput
+ ez * xInput * yInput
+ fz * xInput * zInput
+ gz * yInput * zInput
+ hz * xInput * yInput * zInput
)
return np.array([Bx, By, Bz])
return approx_func
|
def trilinear_approx(vspace, cell):
r"""
Return a function whose input is a coordinate within a given grid cell
and returns the trilinearly approximated vector value at that particular
coordinate in that grid cell.
Parameters
----------
vspace: array_like
The vector space as constructed by the vector_space function which is
A 1 by 3 array with the first element containing the coordinates,
the second element containing the vector values,
and the third element containing the delta values for each dimension.
cell: array_like of integers
A grid cell, represented by a 1 by 3 array
of integers, which correspond to a grid cell
in the vector space.
Returns
-------
<class 'function'>
A function whose input is a coordinate within a given grid cell
and returns the trilinearly approximated vector value at that particular
coordinate in that grid cell.
Raises
------
This function does not raise any exceptions.
Warns
-----
This function does not raise any warnings.
Notes
-----
N/A
"""
# Calculating coefficients
ax, bx, cx, dx, ex, fx, gx, hx = trilinear_coeff_cal(vspace, cell)[0]
ay, by, cy, dy, ey, fy, gy, hy = trilinear_coeff_cal(vspace, cell)[1]
az, bz, cz, dz, ez, fz, gz, hz = trilinear_coeff_cal(vspace, cell)[2]
def approx_func(xInput, yInput, zInput):
Bx = (
ax
+ bx * xInput
+ cx * yInput
+ dx * zInput
+ ex * xInput * yInput
+ fx * xInput * zInput
+ gx * yInput * zInput
+ hx * xInput * yInput * zInput
)
By = (
ay
+ by * xInput
+ cy * yInput
+ dy * zInput
+ ey * xInput * yInput
+ fy * xInput * zInput
+ gy * yInput * zInput
+ hy * xInput * yInput * zInput
)
Bz = (
az
+ bz * xInput
+ cz * yInput
+ dz * zInput
+ ez * xInput * yInput
+ fz * xInput * zInput
+ gz * yInput * zInput
+ hz * xInput * yInput * zInput
)
return np.array([Bx, By, Bz])
return approx_func
|
30,460 |
def create_single_asset_result_and_enrich_endpoint_dict(asset, endpoint_dict, full_values):
asset_dict = {'ID': asset.get('id')}
if 'interfaces' in asset:
for interface in asset['interfaces']:
if full_values:
endpoint_dict['MACAddress'].append(interface['mac_address'])
for ip_address in interface['ip_addresses']:
endpoint_dict['IPAddress'].append(ip_address['value'])
if full_values:
if 'domain_id' in asset:
domain_name = get_domain_name(asset['domain_id'])
endpoint_dict['Domain'].append(domain_name)
# Adding values found in properties of the asset
enrich_dict_using_asset_properties(asset, asset_dict, endpoint_dict, full_values)
return asset_dict
|
def create_single_asset_result_and_enrich_endpoint_dict(asset, endpoint_dict, full_values):
asset_dict = {'ID': asset.get('id')}
if 'interfaces' in asset:
for interface in asset.get('interfaces', []):
if full_values:
endpoint_dict['MACAddress'].append(interface['mac_address'])
for ip_address in interface['ip_addresses']:
endpoint_dict['IPAddress'].append(ip_address['value'])
if full_values:
if 'domain_id' in asset:
domain_name = get_domain_name(asset['domain_id'])
endpoint_dict['Domain'].append(domain_name)
# Adding values found in properties of the asset
enrich_dict_using_asset_properties(asset, asset_dict, endpoint_dict, full_values)
return asset_dict
|
30,480 |
def get_general_paths(path, name_forpacks):
path_list = [
[path, '*'], #
['Packs', '*', name_forpacks, '*']
]
files = list()
for path in path_list:
files.extend(glob.glob(os.path.join(*path)))
return files
|
def get_general_paths(path, name_forpacks):
path_list = [
[path, '*'],
['Packs', '*', name_forpacks, '*']
]
files = list()
for path in path_list:
files.extend(glob.glob(os.path.join(*path)))
return files
|
10,425 |
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
key_name=dict(aliases=['keypair']),
id=dict(),
group=dict(type='list', aliases=['groups']),
group_id=dict(type='list'),
zone=dict(aliases=['aws_zone', 'ec2_zone']),
instance_type=dict(aliases=['type']),
spot_price=dict(),
spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
spot_launch_group=dict(),
image=dict(),
kernel=dict(),
count=dict(type='int', default='1'),
monitoring=dict(type='bool', default=False),
ramdisk=dict(),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
spot_wait_timeout=dict(type='int', default=600),
placement_group=dict(),
user_data=dict(),
instance_tags=dict(type='dict'),
vpc_subnet_id=dict(),
assign_public_ip=dict(type='bool'),
private_ip=dict(),
instance_profile_name=dict(),
instance_ids=dict(type='list', aliases=['instance_id']),
source_dest_check=dict(type='bool', default=None),
termination_protection=dict(type='bool', default=None),
state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
exact_count=dict(type='int', default=None),
count_tag=dict(type='raw'),
volumes=dict(type='list'),
ebs_optimized=dict(type='bool', default=False),
tenancy=dict(default='default', choices=['default', 'dedicated']),
network_interfaces=dict(type='list', aliases=['network_interface'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
# Can be uncommented when we finish the deprecation cycle.
# ['group', 'group_id'],
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids'],
['network_interfaces', 'assign_public_ip'],
['network_interfaces', 'group'],
['network_interfaces', 'group_id'],
['network_interfaces', 'private_ip'],
['network_interfaces', 'vpc_subnet_id'],
],
)
if module.params.get('group') and module.params.get('group_id'):
module.deprecate(
msg='Support for passing both group and group_id has been deprecated. '
'Currently group_id is ignored, in future passing both will result in an error.',
version='2.14')
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if module.params.get('region') or not module.params.get('ec2_url'):
ec2 = ec2_connect(module)
elif module.params.get('ec2_url'):
ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
if 'region' not in aws_connect_kwargs:
aws_connect_kwargs['region'] = ec2.region
vpc = connect_vpc(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
tagged_instances = []
state = module.params['state']
if state == 'absent':
instance_ids = module.params['instance_ids']
if not instance_ids:
module.fail_json(msg='instance_ids list is required for absent state')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
elif state in ('restarted'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
# Always return instances in the same order
if new_instance_ids:
new_instance_ids.sort()
if instance_dict_array:
instance_dict_array.sort(key=lambda x: x['id'])
if tagged_instances:
tagged_instances.sort(key=lambda x: x['id'])
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
|
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
key_name=dict(aliases=['keypair']),
id=dict(),
group=dict(type='list', aliases=['groups']),
group_id=dict(type='list'),
zone=dict(aliases=['aws_zone', 'ec2_zone']),
instance_type=dict(aliases=['type']),
spot_price=dict(),
spot_type=dict(default='one-time', choices=["one-time", "persistent"]),
spot_launch_group=dict(),
image=dict(),
kernel=dict(),
count=dict(type='int', default='1'),
monitoring=dict(type='bool', default=False),
ramdisk=dict(),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
spot_wait_timeout=dict(type='int', default=600),
placement_group=dict(),
user_data=dict(),
instance_tags=dict(type='dict'),
vpc_subnet_id=dict(),
assign_public_ip=dict(type='bool'),
private_ip=dict(),
instance_profile_name=dict(),
instance_ids=dict(type='list', aliases=['instance_id']),
source_dest_check=dict(type='bool', default=None),
termination_protection=dict(type='bool', default=None),
state=dict(default='present', choices=['present', 'absent', 'running', 'restarted', 'stopped']),
instance_initiated_shutdown_behavior=dict(default='stop', choices=['stop', 'terminate']),
exact_count=dict(type='int', default=None),
count_tag=dict(type='raw'),
volumes=dict(type='list'),
ebs_optimized=dict(type='bool', default=False),
tenancy=dict(default='default', choices=['default', 'dedicated']),
network_interfaces=dict(type='list', aliases=['network_interface'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[
# Can be uncommented when we finish the deprecation cycle.
# ['group', 'group_id'],
['exact_count', 'count'],
['exact_count', 'state'],
['exact_count', 'instance_ids'],
['network_interfaces', 'assign_public_ip'],
['network_interfaces', 'group'],
['network_interfaces', 'group_id'],
['network_interfaces', 'private_ip'],
['network_interfaces', 'vpc_subnet_id'],
],
)
if module.params.get('group') and module.params.get('group_id'):
module.deprecate(
msg='Support for passing both group and group_id has been deprecated. '
'Currently group_id is ignored, in future passing both will result in an error',
version='2.14')
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
if module.params.get('region') or not module.params.get('ec2_url'):
ec2 = ec2_connect(module)
elif module.params.get('ec2_url'):
ec2 = connect_ec2_endpoint(ec2_url, **aws_connect_kwargs)
if 'region' not in aws_connect_kwargs:
aws_connect_kwargs['region'] = ec2.region
vpc = connect_vpc(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg="Failed to get connection: %s" % e.message, exception=traceback.format_exc())
tagged_instances = []
state = module.params['state']
if state == 'absent':
instance_ids = module.params['instance_ids']
if not instance_ids:
module.fail_json(msg='instance_ids list is required for absent state')
(changed, instance_dict_array, new_instance_ids) = terminate_instances(module, ec2, instance_ids)
elif state in ('running', 'stopped'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = startstop_instances(module, ec2, instance_ids, state, instance_tags)
elif state in ('restarted'):
instance_ids = module.params.get('instance_ids')
instance_tags = module.params.get('instance_tags')
if not (isinstance(instance_ids, list) or isinstance(instance_tags, dict)):
module.fail_json(msg='running list needs to be a list of instances or set of tags to run: %s' % instance_ids)
(changed, instance_dict_array, new_instance_ids) = restart_instances(module, ec2, instance_ids, state, instance_tags)
elif state == 'present':
# Changed is always set to true when provisioning new instances
if not module.params.get('image'):
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
# Always return instances in the same order
if new_instance_ids:
new_instance_ids.sort()
if instance_dict_array:
instance_dict_array.sort(key=lambda x: x['id'])
if tagged_instances:
tagged_instances.sort(key=lambda x: x['id'])
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)
|
17,735 |
def changeDigit_base10_to_base62_alph_num(current_digit):
'''The supplimental digits for the base10_to_base62_alph_num function,
which Converts the base 10 to base 62 '''
'''current_digit = the currenty digit for this base.
(i.e. in base10 it would be the one, ten, hundreds, or thousands places .....)'''
base62_No = 62
decimal = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61]
base62_Values = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J",
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d",
"e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
"o", "p", "q", "r", "s", "t", "u", "v", "w", "x",
"y", "z"]
for counter in range(int(base62_No-10)):
if current_digit == decimal[counter - 1]:
current_digit = base62_Values[counter - 1]
return current_digit
|
def changeDigit_base10_to_base62_alph_num(current_digit):
'''The supplemental digits for the base10_to_base62_alph_num function,
which Converts the base 10 to base 62 '''
'''current_digit = the currenty digit for this base.
(i.e. in base10 it would be the one, ten, hundreds, or thousands places .....)'''
base62_No = 62
decimal = [10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61]
base62_Values = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J",
"K", "L", "M", "N", "O", "P", "Q", "R", "S", "T",
"U", "V", "W", "X", "Y", "Z", "a", "b", "c", "d",
"e", "f", "g", "h", "i", "j", "k", "l", "m", "n",
"o", "p", "q", "r", "s", "t", "u", "v", "w", "x",
"y", "z"]
for counter in range(int(base62_No-10)):
if current_digit == decimal[counter - 1]:
current_digit = base62_Values[counter - 1]
return current_digit
|
31,010 |
def test_get_multiple_packs_dirs(requests_mock):
"""
Scenario: Get a pack dir name from pull request files
Given
- A pull request
- A file in the pull request is in a pack
When
- Getting the pack dir name from a pull request
Then
- Ensure the pack dir name is returned correctly
"""
branch = 'contrib_branch'
pr_number = '1'
repo = 'contrib_repo'
requests_mock.get(
'https://api.github.com/repos/demisto/content/pulls/1/files',
[{'json': github_response_1, 'status_code': 200},
{'json': github_response_2, 'status_code': 200},
{'json': github_response_3, 'status_code': 200},
{'json': github_response_4, 'status_code': 200}]
)
pack_dir = get_pack_dir(branch, pr_number, repo)
assert pack_dir == ['Slack', 'Slack1']
|
def test_get_multiple_packs_dirs(requests_mock):
"""
Scenario: Get a pack dir name from pull request files
Given
- A pull request
- A file in the pull request is in a pack
When
- Getting the pack dir names from a pull request
Then
- Ensure the pack dir name is returned correctly
"""
branch = 'contrib_branch'
pr_number = '1'
repo = 'contrib_repo'
requests_mock.get(
'https://api.github.com/repos/demisto/content/pulls/1/files',
[{'json': github_response_1, 'status_code': 200},
{'json': github_response_2, 'status_code': 200},
{'json': github_response_3, 'status_code': 200},
{'json': github_response_4, 'status_code': 200}]
)
pack_dir = get_pack_dir(branch, pr_number, repo)
assert pack_dir == ['Slack', 'Slack1']
|
13,405 |
def test_03_verify_the_first_pool_created_with_encrypted_root_dataset_become_the_system_dataset(request, pool_data):
pool_disk = [POST('/disk/get_unused/').json()[0]['name']]
payload = {
'name': 'encrypted',
'encryption': True,
'encryption_options': {
'algorithm': 'AES-128-CCM',
'passphrase': 'my_pool_passphrase',
},
'topology': {
'data': [
{'type': 'STRIPE', 'disks': pool_disk}
],
}
}
results = POST('/pool/', payload)
assert results.status_code == 200, results.text
job_id = results.json()
job_status = wait_on_job(job_id, 240)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
pool_data['encrypted'] = job_status['results']['result']
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'encrypted', results.text
assert results.json()['basename'] == 'encrypted/.system', results.textZ
|
def test_03_verify_sysds_is_moved_after_first_pool_is_created(request, pool_data):
pool_disk = [POST('/disk/get_unused/').json()[0]['name']]
payload = {
'name': 'encrypted',
'encryption': True,
'encryption_options': {
'algorithm': 'AES-128-CCM',
'passphrase': 'my_pool_passphrase',
},
'topology': {
'data': [
{'type': 'STRIPE', 'disks': pool_disk}
],
}
}
results = POST('/pool/', payload)
assert results.status_code == 200, results.text
job_id = results.json()
job_status = wait_on_job(job_id, 240)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
pool_data['encrypted'] = job_status['results']['result']
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'encrypted', results.text
assert results.json()['basename'] == 'encrypted/.system', results.textZ
|
22,091 |
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM %s' % ','.join(value)
|
10,608 |
def do_vault(data, secret, vaultid='default', wrap_object=False):
if not isinstance(secret, string_types):
raise AnsibleFilterTypeError("Secret passed is required to be as tring, instead we got: %s" % type(secret))
if not isinstance(data, string_types):
raise AnsibleFilterTypeError("Can only vault strings, instead we got: %s" % type(data))
vault = ''
vs = VaultSecret(to_bytes(secret))
vl = VaultLib()
try:
vault = vl.encrypt(to_bytes(data), vs, vaultid)
except Exception as e:
raise AnsibleFilterError("Unable to encrypt: %s" % to_native(e), orig_exc=e)
if wrap_object:
vault = AnsibleVaultEncryptedUnicode(vault)
else:
vault = to_native(vault)
return vault
|
def do_vault(data, secret, vaultid='default', wrap_object=False):
if not isinstance(secret, string_types):
raise AnsibleFilterTypeError("Secret passed is required to be a string, instead we got: %s" % type(secret))
if not isinstance(data, string_types):
raise AnsibleFilterTypeError("Can only vault strings, instead we got: %s" % type(data))
vault = ''
vs = VaultSecret(to_bytes(secret))
vl = VaultLib()
try:
vault = vl.encrypt(to_bytes(data), vs, vaultid)
except Exception as e:
raise AnsibleFilterError("Unable to encrypt: %s" % to_native(e), orig_exc=e)
if wrap_object:
vault = AnsibleVaultEncryptedUnicode(vault)
else:
vault = to_native(vault)
return vault
|
2,684 |
def sort_by_row_values(graph, copy=True):
"""Sort a sparse graph such that each row is stored with increasing values.
Parameters
----------
graph : sparse matrix, (n_samples, n_samples)
Distance matrix to other samples, where only non-zero elements are
considered neighbors. Matrix is converted to CSR format if not already.
copy : bool, optional (default=True)
If True, the graph is copied before sorting. If False, the sorting is
performed inplace. If graph is not of CSR format, a copy is always
returned.
Returns
-------
graph : sparse matrix, (n_samples, n_samples)
Distance matrix to other samples, where only non-zero elements are
considered neighbors. Matrix is of CSR format.
"""
if graph.format not in ("csr", "csc", "coo", "lil"):
raise TypeError(
"Sparse matrix in {!r} format is not supported due to "
"its handling of explicit zeros".format(graph.format)
)
elif graph.format != "csr":
graph = graph.asformat("csr")
elif copy:
graph = graph.copy()
# if each sample has the same number of provided neighbors
row_nnz = np.diff(graph.indptr)
if row_nnz.max() == row_nnz.min():
n_samples = graph.shape[0]
distances = graph.data.reshape(n_samples, -1)
order = np.argsort(distances, kind="mergesort")
order += np.arange(n_samples)[:, None] * row_nnz[0]
order = order.ravel()
graph.data = graph.data[order]
graph.indices = graph.indices[order]
else:
for start, stop in zip(graph.indptr, graph.indptr[1:]):
order = np.argsort(graph.data[start:stop], kind="mergesort")
graph.data[start:stop] = graph.data[start:stop][order]
graph.indices[start:stop] = graph.indices[start:stop][order]
return graph
|
def sort_by_row_values(graph, copy=True):
"""Sort a sparse graph such that each row is stored with increasing values.
Parameters
----------
graph : sparse matrix of shape (n_samples, n_samples)
Distance matrix to other samples, where only non-zero elements are
considered neighbors. Matrix is converted to CSR format if not already.
copy : bool, optional (default=True)
If True, the graph is copied before sorting. If False, the sorting is
performed inplace. If graph is not of CSR format, a copy is always
returned.
Returns
-------
graph : sparse matrix, (n_samples, n_samples)
Distance matrix to other samples, where only non-zero elements are
considered neighbors. Matrix is of CSR format.
"""
if graph.format not in ("csr", "csc", "coo", "lil"):
raise TypeError(
"Sparse matrix in {!r} format is not supported due to "
"its handling of explicit zeros".format(graph.format)
)
elif graph.format != "csr":
graph = graph.asformat("csr")
elif copy:
graph = graph.copy()
# if each sample has the same number of provided neighbors
row_nnz = np.diff(graph.indptr)
if row_nnz.max() == row_nnz.min():
n_samples = graph.shape[0]
distances = graph.data.reshape(n_samples, -1)
order = np.argsort(distances, kind="mergesort")
order += np.arange(n_samples)[:, None] * row_nnz[0]
order = order.ravel()
graph.data = graph.data[order]
graph.indices = graph.indices[order]
else:
for start, stop in zip(graph.indptr, graph.indptr[1:]):
order = np.argsort(graph.data[start:stop], kind="mergesort")
graph.data[start:stop] = graph.data[start:stop][order]
graph.indices[start:stop] = graph.indices[start:stop][order]
return graph
|
32,614 |
def parse_file(get_file_path_res: dict[str, str], delimiter: str = ",") -> List[str]:
"""
Parses the given file line by line to list.
:param delimiter: delimiter by which the content of the list is seperated.
:param get_file_path_res: Object contains file ID, path and name
:return: bulk list of the elements in the file
"""
bulk_list = []
with open(get_file_path_res['path']) as file:
reader = csv.reader(file, delimiter=delimiter, skipinitialspace=True)
for row in reader:
for col in row:
bulk_list += col.split()
return bulk_list
|
def parse_file(get_file_path_res: dict[str, str], delimiter: str = ",") -> List[str]:
"""
Parses the given file line by line to list.
:param delimiter: delimiter by which the content of the list is seperated.
:param get_file_path_res: Object contains file ID, path and name
:return: bulk list of the elements in the file
"""
bulk_list = []
with open(get_file_path_res.get('path')) as file:
reader = csv.reader(file, delimiter=delimiter, skipinitialspace=True)
for row in reader:
for col in row:
bulk_list += col.split()
return bulk_list
|
13,918 |
def add_registration_files_count(state, *args, **kwargs):
AbstractNode = state.get_model('osf.abstractnode')
BaseFileNode = state.get_model('osf.basefilenode')
ContentType = state.get_model('contenttypes', 'ContentType')
registrations = AbstractNode.objects.filter(type='osf.registration', is_deleted=False)
content_type = ContentType.objects.get(app_label='osf', model='abstractnode')
registrations_to_update = []
for registration in registrations:
job = registration.archive_jobs.first() if registration.archive_jobs.count() else None
archiving = job and not job.done and (job.status != 'SUCCESS')
if archiving:
# Skip stuck, failed, or archiving registrations.
continue
registration_files = BaseFileNode.objects.filter(
target_object_id=registration.id,
target_content_type=content_type,
type='osf.osfstoragefile',
deleted_on__isnull=True,
)
registration.files_count = registration_files.count()
registrations_to_update.append(registration)
bulk_update(registrations_to_update, update_fields=['files_count'], batch_size=1000)
logger.info('Populated `files_count` on a total of {} registrations'.format(len(registrations_to_update)))
|
def add_registration_files_count(state, *args, **kwargs):
AbstractNode = state.get_model('osf.abstractnode')
BaseFileNode = state.get_model('osf.basefilenode')
ContentType = state.get_model('contenttypes', 'ContentType')
registrations = AbstractNode.objects.filter(type='osf.registration', is_deleted=False)
content_type = ContentType.objects.get(app_label='osf', model='abstractnode')
registrations_to_update = []
for registration in registrations:
job = registration.archive_job
archiving = job and not job.done and (job.status != 'SUCCESS')
if archiving:
# Skip stuck, failed, or archiving registrations.
continue
registration_files = BaseFileNode.objects.filter(
target_object_id=registration.id,
target_content_type=content_type,
type='osf.osfstoragefile',
deleted_on__isnull=True,
)
registration.files_count = registration_files.count()
registrations_to_update.append(registration)
bulk_update(registrations_to_update, update_fields=['files_count'], batch_size=1000)
logger.info('Populated `files_count` on a total of {} registrations'.format(len(registrations_to_update)))
|
33,322 |
def account_download_filter(account_type, download_table, filters, account_level="treasury_account"):
if account_level not in ("treasury_account", "federal_account"):
raise InvalidParameterException(
'Invalid Parameter: account_level must be either "federal_account" or "treasury_account"'
)
query_filters = {}
tas_id = "treasury_account_identifier" if account_type == "account_balances" else "treasury_account"
if filters.get("agency") and filters["agency"] != "all":
if not ToptierAgency.objects.filter(toptier_agency_id=filters["agency"]).exists():
raise InvalidParameterException("Agency with that ID does not exist")
query_filters[f"{tas_id}__funding_toptier_agency_id"] = filters["agency"]
if filters.get("federal_account") and filters["federal_account"] != "all":
if not FederalAccount.objects.filter(id=filters["federal_account"]).exists():
raise InvalidParameterException("Federal Account with that ID does not exist")
query_filters[f"{tas_id}__federal_account__id"] = filters["federal_account"]
if filters.get("budget_function") and filters["budget_function"] != "all":
query_filters[f"{tas_id}__budget_function_code"] = filters["budget_function"]
if filters.get("budget_subfunction") and filters["budget_subfunction"] != "all":
query_filters[f"{tas_id}__budget_subfunction_code"] = filters["budget_subfunction"]
if tas_id == "treasury_account": # file A does not have DEFC field so we do not attempt to filter
if filters.get("def_codes") and len(filters.get("def_codes")) > 0:
query_filters["disaster_emergency_fund__code__in"] = filters["def_codes"]
submission_filter = get_submission_filter(account_type, filters)
# Make derivations based on the account level
if account_level == "treasury_account":
queryset = generate_treasury_account_query(download_table.objects, account_type, tas_id, filters)
elif account_level == "federal_account":
queryset = generate_federal_account_query(download_table.objects, account_type, tas_id, filters)
else:
raise InvalidParameterException(
'Invalid Parameter: account_level must be either "federal_account" or "treasury_account"'
)
# Apply filter and return
return queryset.filter(submission_filter, **query_filters)
|
def account_download_filter(account_type, download_table, filters, account_level="treasury_account"):
if account_level not in ("treasury_account", "federal_account"):
raise InvalidParameterException(
'Invalid Parameter: account_level must be either "federal_account" or "treasury_account"'
)
query_filters = {}
tas_id = "treasury_account_identifier" if account_type == "account_balances" else "treasury_account"
if filters.get("agency") and filters["agency"] != "all":
if not ToptierAgency.objects.filter(toptier_agency_id=filters["agency"]).exists():
raise InvalidParameterException("Agency with that ID does not exist")
query_filters[f"{tas_id}__funding_toptier_agency_id"] = filters["agency"]
if filters.get("federal_account") and filters["federal_account"] != "all":
if not FederalAccount.objects.filter(id=filters["federal_account"]).exists():
raise InvalidParameterException("Federal Account with that ID does not exist")
query_filters[f"{tas_id}__federal_account__id"] = filters["federal_account"]
if filters.get("budget_function") and filters["budget_function"] != "all":
query_filters[f"{tas_id}__budget_function_code"] = filters["budget_function"]
if filters.get("budget_subfunction") and filters["budget_subfunction"] != "all":
query_filters[f"{tas_id}__budget_subfunction_code"] = filters["budget_subfunction"]
if tas_id == "treasury_account": # file A does not have DEFC field so we do not attempt to filter
if len(filters.get("def_codes", 0)) > 0:
query_filters["disaster_emergency_fund__code__in"] = filters["def_codes"]
submission_filter = get_submission_filter(account_type, filters)
# Make derivations based on the account level
if account_level == "treasury_account":
queryset = generate_treasury_account_query(download_table.objects, account_type, tas_id, filters)
elif account_level == "federal_account":
queryset = generate_federal_account_query(download_table.objects, account_type, tas_id, filters)
else:
raise InvalidParameterException(
'Invalid Parameter: account_level must be either "federal_account" or "treasury_account"'
)
# Apply filter and return
return queryset.filter(submission_filter, **query_filters)
|
4,574 |
def _plot_surf_plotly(surf_mesh, surf_map=None, bg_map=None,
hemi='left', view='lateral', cmap=None,
colorbar=False, avg_method='mean',
threshold=None, output_file=None):
"""Helper function for plot_surf.
.. versionadded:: 0.8.1
This function handles surface plotting when the selected
engine is plotly.
"""
coords, faces = load_surf_mesh(surf_mesh)
x, y, z = coords.T
i, j, k = faces.T
if cmap is None:
cmap = cold_hot
if surf_map is not None:
surf_map_faces = _compute_surf_map_faces(
surf_map, faces, avg_method,
coords.shape[0], faces.shape[0]
)
colors = colorscale(cmap, surf_map_faces, threshold)
vertexcolor = _get_vertexcolor(
surf_map_faces,
colors["cmap"],
colors["norm"],
colors["abs_threshold"],
bg_map=bg_map,
)
else:
vertexcolor = None
if colorbar:
mesh_3d = go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k,
intensity=surf_map_faces,
colorscale=colors['colors'])
else:
mesh_3d = go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k,
vertexcolor=vertexcolor)
cameras_view = _set_view_plot_surf_plotly(hemi, view)
fig = go.Figure(data=[mesh_3d])
fig.update_layout(scene_camera=CAMERAS[cameras_view], **LAYOUT)
if output_file is not None:
fig.write_image(output_file)
return fig
|
def _plot_surf_plotly(surf_mesh, surf_map=None, bg_map=None,
hemi='left', view='lateral', cmap=None,
colorbar=False, avg_method='mean',
threshold=None, output_file=None):
"""Helper function for plot_surf.
.. versionadded:: 0.8.1
This function handles surface plotting when the selected
engine is plotly.
"""
coords, faces = load_surf_mesh(surf_mesh)
x, y, z = coords.T
i, j, k = faces.T
if cmap is None:
cmap = cold_hot
if surf_map is not None:
surf_map_faces = _compute_surf_map_faces(
surf_map, faces, avg_method,
coords.shape[0], faces.shape[0]
)
colors = colorscale(cmap, surf_map_faces, threshold)
vertexcolor = _get_vertexcolor(
surf_map_faces,
colors["cmap"],
colors["norm"],
colors["abs_threshold"],
bg_map=bg_map,
)
else:
vertexcolor = None
if colorbar:
mesh_3d = go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k,
intensity=surf_map,
colorscale=colors['colors'])
else:
mesh_3d = go.Mesh3d(x=x, y=y, z=z, i=i, j=j, k=k,
vertexcolor=vertexcolor)
cameras_view = _set_view_plot_surf_plotly(hemi, view)
fig = go.Figure(data=[mesh_3d])
fig.update_layout(scene_camera=CAMERAS[cameras_view], **LAYOUT)
if output_file is not None:
fig.write_image(output_file)
return fig
|
34,446 |
def _get_reader_class(filename: Text) -> Optional[Any]:
from rasa.core.training.story_reader import yaml_story_reader, markdown_story_reader
module = None
if filename.endswith(".md"):
module = markdown_story_reader.MarkdownStoryReader
elif filename.endswith(".yml"):
module = yaml_story_reader.YAMLStoryReader
return module
|
def _get_reader_class(filename: Text) -> Optional[Type[StoryReader]]:
from rasa.core.training.story_reader import yaml_story_reader, markdown_story_reader
module = None
if filename.endswith(".md"):
module = markdown_story_reader.MarkdownStoryReader
elif filename.endswith(".yml"):
module = yaml_story_reader.YAMLStoryReader
return module
|
43,973 |
def _pauli_mult(p1, p2):
r"""Return the result of multipication between two tensor product of pauli operators.
The Pauli operator ::math::`(P_0)` is denoted by [(0, 'P')], where ::math::`P` represents
::math::`X`, ::math::`Y` or ::math::`Z`.
Args:
p1 (list[list[tuple[int, str]]]): the first tensor product of pauli operators
p2 (list[list[tuple[int, str]]]): the second tensor product of pauli operators
Returns
tuple(list[tuple[int, str]], complex): list of the pauli operators and the coefficient
**Example**
>>> p1 = [(0, "X"), (1, "Y")], # X_0 @ Y_1
>>> p2 = [(0, "X"), (2, "Y")], # X_0 @ Y_2
>>> _pauli_mult(p1, p2)
([(2, "Y"), (1, "Y")], 1.0) # p1 @ p2 = X_0 @ Y_1 @ X_0 @ Y_2
"""
c = 1.0
t1 = [t[0] for t in p1]
t2 = [t[0] for t in p2]
k = []
for i in p1:
if i[0] in t1 and i[0] not in t2:
k.append((i[0], pauli_mult[i[1]]))
for j in p2:
if j[0] in t2 and j[0] not in t1:
k.append((j[0], pauli_mult[j[1]]))
if i[0] == j[0]:
if i[1] + j[1] in pauli_coeff:
k.append((i[0], pauli_mult[i[1] + j[1]]))
c = c * pauli_coeff[i[1] + j[1]]
else:
k.append((i[0], pauli_mult[i[1] + j[1]]))
k = [i for i in k if "I" not in i[1]]
for item in k:
k_ = [i for i, x in enumerate(k) if x == item]
if len(k_) >= 2:
for j in k_[::-1][:-1]:
del k[j]
return k, c
|
def _pauli_mult(p1, p2):
r"""Return the result of multipication between two tensor products of pauli operators.
The Pauli operator ::math::`(P_0)` is denoted by [(0, 'P')], where ::math::`P` represents
::math::`X`, ::math::`Y` or ::math::`Z`.
Args:
p1 (list[list[tuple[int, str]]]): the first tensor product of pauli operators
p2 (list[list[tuple[int, str]]]): the second tensor product of pauli operators
Returns
tuple(list[tuple[int, str]], complex): list of the pauli operators and the coefficient
**Example**
>>> p1 = [(0, "X"), (1, "Y")], # X_0 @ Y_1
>>> p2 = [(0, "X"), (2, "Y")], # X_0 @ Y_2
>>> _pauli_mult(p1, p2)
([(2, "Y"), (1, "Y")], 1.0) # p1 @ p2 = X_0 @ Y_1 @ X_0 @ Y_2
"""
c = 1.0
t1 = [t[0] for t in p1]
t2 = [t[0] for t in p2]
k = []
for i in p1:
if i[0] in t1 and i[0] not in t2:
k.append((i[0], pauli_mult[i[1]]))
for j in p2:
if j[0] in t2 and j[0] not in t1:
k.append((j[0], pauli_mult[j[1]]))
if i[0] == j[0]:
if i[1] + j[1] in pauli_coeff:
k.append((i[0], pauli_mult[i[1] + j[1]]))
c = c * pauli_coeff[i[1] + j[1]]
else:
k.append((i[0], pauli_mult[i[1] + j[1]]))
k = [i for i in k if "I" not in i[1]]
for item in k:
k_ = [i for i, x in enumerate(k) if x == item]
if len(k_) >= 2:
for j in k_[::-1][:-1]:
del k[j]
return k, c
|
28,057 |
def __convert_permissions(permissions) -> Dict:
""" Convert the given permissions to dictionary. """
ret = {"user_permissions": {}, "group_permissions": {}}
for user_name, perms in permissions.user.items():
ret["user_permissions"][user_name] = perms
for group_name, perms in permissions.group.items():
ret["group_permissions"][group_name] = perms
return ret
|
def __convert_permissions(permissions: AccessControl) -> Dict:
""" Convert the given permissions to dictionary. """
ret = {"user_permissions": {}, "group_permissions": {}}
for user_name, perms in permissions.user.items():
ret["user_permissions"][user_name] = perms
for group_name, perms in permissions.group.items():
ret["group_permissions"][group_name] = perms
return ret
|
39,871 |
def test_coexisting_configurations(click_runner,
custom_filepath,
mock_primary_registry_filepath,
testerchain):
# Parse node addresses
deployer, alice, ursula, another_ursula, *all_yall = testerchain.interface.w3.eth.accounts
envvars = {'NUCYPHER_KEYRING_PASSWORD': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_MINER_ESCROW_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_POLICY_MANAGER_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_USER_ESCROW_PROXY_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_FELIX_DB_SECRET': INSECURE_DEVELOPMENT_PASSWORD}
# Future configuration filepaths for assertions...
public_keys_dir = os.path.join(custom_filepath, 'keyring', 'public')
known_nodes_dir = os.path.join(custom_filepath, 'known_nodes')
# ... Ensure they do not exist to begin with.
assert not os.path.isdir(public_keys_dir)
assert not os.path.isfile(known_nodes_dir)
# Deploy contracts
deploy_args = ('contracts',
'--registry-outfile', mock_primary_registry_filepath,
'--provider-uri', TEST_PROVIDER_URI,
'--deployer-address', deployer,
'--config-root', custom_filepath,
'--poa')
result = click_runner.invoke(deploy.deploy, deploy_args, input='Y', catch_exceptions=False, env=envvars)
assert result.exit_code == 0
# No keys have been generated...
with pytest.raises(FileNotFoundError):
assert len(os.listdir(public_keys_dir)) == 0
# No known nodes exist...
with pytest.raises(FileNotFoundError):
assert len(os.listdir(known_nodes_dir)) == 0
# Just the configuration root...
assert os.path.isdir(custom_filepath)
# and the fresh registry.
assert os.path.isfile(mock_primary_registry_filepath)
#
# Create
#
# Expected config files
felix_file_location = os.path.join(custom_filepath, 'felix.config')
alice_file_location = os.path.join(custom_filepath, 'alice.config')
ursula_file_location = os.path.join(custom_filepath, 'ursula.config')
another_ursula_configuration_file_location = os.path.join(custom_filepath, f'ursula-{another_ursula[:6]}.config')
# Felix creates a system configuration
felix_init_args = ('felix', 'init',
'--config-root', custom_filepath,
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', deployer,
'--registry-filepath', mock_primary_registry_filepath
)
result = click_runner.invoke(nucypher_cli, felix_init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(felix_file_location)
assert len(os.listdir(public_keys_dir)) == 3
# Use a custom local filepath to init an persistent Alice
alice_init_args = ('alice', 'init',
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', alice,
'--registry-filepath', mock_primary_registry_filepath,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, alice_init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(alice_file_location)
assert len(os.listdir(public_keys_dir)) == 5
# Use the same local filepath to init an persistent Ursula
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', ursula,
'--rest-host', MOCK_IP_ADDRESS,
'--registry-filepath', mock_primary_registry_filepath,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 8
assert os.path.isfile(ursula_file_location)
# Use the same local filepath to init another persistent Ursula
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--checksum-address', another_ursula,
'--rest-host', MOCK_IP_ADDRESS_2,
'--registry-filepath', mock_primary_registry_filepath,
'--provider-uri', TEST_PROVIDER_URI,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(ursula_file_location)
assert os.path.isfile(another_ursula_configuration_file_location)
assert len(os.listdir(public_keys_dir)) == 11
#
# Destroy
#
another_ursula_destruction_args = ('ursula', 'destroy', '--force',
'--config-file', another_ursula_configuration_file_location)
result = click_runner.invoke(nucypher_cli, another_ursula_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 8
assert not os.path.isfile(another_ursula_configuration_file_location)
ursula_destruction_args = ('ursula', 'destroy', '--config-file', ursula_file_location)
result = click_runner.invoke(nucypher_cli, ursula_destruction_args, input='Y', catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert 'y/N' in result.output
assert len(os.listdir(public_keys_dir)) == 5
assert not os.path.isfile(ursula_file_location)
felix_destruction_args = ('alice', 'destroy', '--force', '--config-file', alice_file_location)
result = click_runner.invoke(nucypher_cli, felix_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 3
assert not os.path.isfile(alice_file_location)
felix_destruction_args = ('felix', 'destroy', '--force', '--config-file', felix_file_location)
result = click_runner.invoke(nucypher_cli, felix_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 0
assert not os.path.isfile(felix_file_location)
|
def test_coexisting_configurations(click_runner,
custom_filepath,
mock_primary_registry_filepath,
testerchain):
# Parse node addresses
deployer, alice, ursula, another_ursula, *all_yall = testerchain.interface.w3.eth.accounts
envvars = {'NUCYPHER_KEYRING_PASSWORD': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_MINER_ESCROW_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_POLICY_MANAGER_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_USER_ESCROW_PROXY_SECRET': INSECURE_DEVELOPMENT_PASSWORD,
'NUCYPHER_FELIX_DB_SECRET': INSECURE_DEVELOPMENT_PASSWORD}
# Future configuration filepaths for assertions...
public_keys_dir = os.path.join(custom_filepath, 'keyring', 'public')
known_nodes_dir = os.path.join(custom_filepath, 'known_nodes')
# ... Ensure they do not exist to begin with.
assert not os.path.isdir(public_keys_dir)
assert not os.path.isfile(known_nodes_dir)
# Deploy contracts
deploy_args = ('contracts',
'--registry-outfile', mock_primary_registry_filepath,
'--provider-uri', TEST_PROVIDER_URI,
'--deployer-address', deployer,
'--config-root', custom_filepath,
'--poa')
result = click_runner.invoke(deploy.deploy, deploy_args, input='Y', catch_exceptions=False, env=envvars)
assert result.exit_code == 0
# No keys have been generated...
with pytest.raises(FileNotFoundError):
assert len(os.listdir(public_keys_dir)) == 0
# No known nodes exist...
with pytest.raises(FileNotFoundError):
assert len(os.listdir(known_nodes_dir)) == 0
# Just the configuration root...
assert os.path.isdir(custom_filepath)
# and the fresh registry.
assert os.path.isfile(mock_primary_registry_filepath)
#
# Create
#
# Expected config files
felix_file_location = os.path.join(custom_filepath, 'felix.config')
alice_file_location = os.path.join(custom_filepath, 'alice.config')
ursula_file_location = os.path.join(custom_filepath, 'ursula.config')
another_ursula_configuration_file_location = os.path.join(custom_filepath, f'ursula-{another_ursula[:6]}.config')
# Felix creates a system configuration
felix_init_args = ('felix', 'init',
'--config-root', custom_filepath,
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', deployer,
'--registry-filepath', mock_primary_registry_filepath
)
result = click_runner.invoke(nucypher_cli, felix_init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(felix_file_location)
assert len(os.listdir(public_keys_dir)) == 3
# Use a custom local filepath to init an persistent Alice
alice_init_args = ('alice', 'init',
# Use a custom local filepath to init a persistent Alice
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', alice,
'--registry-filepath', mock_primary_registry_filepath,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, alice_init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(alice_file_location)
assert len(os.listdir(public_keys_dir)) == 5
# Use the same local filepath to init an persistent Ursula
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--provider-uri', TEST_PROVIDER_URI,
'--checksum-address', ursula,
'--rest-host', MOCK_IP_ADDRESS,
'--registry-filepath', mock_primary_registry_filepath,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 8
assert os.path.isfile(ursula_file_location)
# Use the same local filepath to init another persistent Ursula
init_args = ('ursula', 'init',
'--network', TEMPORARY_DOMAIN,
'--checksum-address', another_ursula,
'--rest-host', MOCK_IP_ADDRESS_2,
'--registry-filepath', mock_primary_registry_filepath,
'--provider-uri', TEST_PROVIDER_URI,
'--config-root', custom_filepath)
result = click_runner.invoke(nucypher_cli, init_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert os.path.isfile(ursula_file_location)
assert os.path.isfile(another_ursula_configuration_file_location)
assert len(os.listdir(public_keys_dir)) == 11
#
# Destroy
#
another_ursula_destruction_args = ('ursula', 'destroy', '--force',
'--config-file', another_ursula_configuration_file_location)
result = click_runner.invoke(nucypher_cli, another_ursula_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 8
assert not os.path.isfile(another_ursula_configuration_file_location)
ursula_destruction_args = ('ursula', 'destroy', '--config-file', ursula_file_location)
result = click_runner.invoke(nucypher_cli, ursula_destruction_args, input='Y', catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert 'y/N' in result.output
assert len(os.listdir(public_keys_dir)) == 5
assert not os.path.isfile(ursula_file_location)
felix_destruction_args = ('alice', 'destroy', '--force', '--config-file', alice_file_location)
result = click_runner.invoke(nucypher_cli, felix_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 3
assert not os.path.isfile(alice_file_location)
felix_destruction_args = ('felix', 'destroy', '--force', '--config-file', felix_file_location)
result = click_runner.invoke(nucypher_cli, felix_destruction_args, catch_exceptions=False, env=envvars)
assert result.exit_code == 0
assert len(os.listdir(public_keys_dir)) == 0
assert not os.path.isfile(felix_file_location)
|
43,169 |
def to_cugraph(g):
"""Convert a DGL graph to a cugraph.Graph and return.
Parameters
----------
g : DGLGraph
A homogeneous graph.
Returns
-------
cugraph.Graph
The converted cugraph graph.
Notes
-----
The function only supports GPU graph input.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import cugraph
>>> import torch
>>> g = dgl.graph((torch.tensor([1, 2]), torch.tensor([1, 3]))).to('cuda')
>>> cugraph_g = g.to_cugraph()
>>> cugraph_g.edges()
src dst
0 2 3
1 1 1
"""
if g.device.type != 'cuda':
raise DGLError(f"Cannot convert a {g.device.type} graph to cugraph." +
"Call g.to('cuda') first.")
if not g.is_homogeneous:
raise DGLError("dgl.to_cugraph only supports homogeneous graphs.")
try:
import cugraph
import cudf
except ModuleNotFoundError:
raise ModuleNotFoundError("to_cugraph requires cugraph which could not be imported")
edgelist = g.edges()
src_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[0]))
dst_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[1]))
cudf_data = cudf.DataFrame({'source':src_ser, 'destination':dst_ser})
g_cugraph = cugraph.Graph(directed=True)
g_cugraph.from_cudf_edgelist(cudf_data,
source='source',
destination='destination')
return g_cugraph
|
def to_cugraph(g):
"""Convert a DGL graph to a cugraph.Graph and return.
Parameters
----------
g : DGLGraph
A homogeneous graph.
Returns
-------
cugraph.Graph
The converted cugraph graph.
Notes
-----
The function only supports GPU graph input.
Examples
--------
The following example uses PyTorch backend.
>>> import dgl
>>> import cugraph
>>> import torch
>>> g = dgl.graph((torch.tensor([1, 2]), torch.tensor([1, 3]))).to('cuda')
>>> cugraph_g = g.to_cugraph()
>>> cugraph_g.edges()
src dst
0 2 3
1 1 1
"""
if g.device.type != 'cuda':
raise DGLError(f"Cannot convert a {g.device.type} graph to cugraph." +
"Call g.to('cuda') first.")
if not g.is_homogeneous:
raise DGLError("dgl.to_cugraph only supports homogeneous graphs.")
try:
import cugraph
import cudf
except ModuleNotFoundError:
raise ModuleNotFoundError("to_cugraph requires cugraph which could not be imported")
edgelist = g.edges()
src_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[0]))
dst_ser = cudf.from_dlpack(F.zerocopy_to_dlpack(edgelist[1]))
cudf_data = cudf.DataFrame({'source':src_ser, 'destination':dst_ser})
g_cugraph = cugraph.Graph(directed=True)
g_cugraph.from_cudf_edgelist(cudf_data,
source='source',
destination='destination')
return g_cugraph
|
9,107 |
def enumerate_apk_packages(base_url, os_name, os_code_name, os_arch):
"""
Enumerate packages in an apk (Alpine Package) repository.
:param base_url: the apk repository base URL.
:param os_name: the name of the OS associated with the repository.
:param os_code_name: the OS version associated with the repository.
:param os_arch: the system architecture associated with the repository.
:returns: an enumeration of package entries.
"""
base_url = base_url.replace('$releasever', os_code_name)
apkindex_url = os.path.join(base_url, os_arch, 'APKINDEX.tar.gz')
print('Reading apk package metadata from ' + apkindex_url)
with open_gz_url(apkindex_url) as f:
with tarfile.open(mode='r|', fileobj=f) as tf:
index = None
for ti in tf:
if ti.name == 'APKINDEX':
index = tf.extractfile(ti)
break
if index is None:
raise RuntimeError('APKINDEX url did not contain an APKINDEX file')
for index_entry in parse_apkindex(index):
pkg_name, pkg_version, source_name = index_entry['P'], index_entry['V'], index_entry['o']
pkg_filename = '%s-%s.apk' % (pkg_name, pkg_version)
pkg_url = os.path.join(base_url, pkg_filename)
yield PackageEntry(pkg_name, pkg_version, pkg_url, source_name=source_name)
if 'p' in index_entry:
for d in parse_deps(index_entry['p']):
if d.type is None:
yield PackageEntry(d.name, pkg_version, pkg_url, source_name=source_name)
|
def enumerate_apk_packages(base_url, os_name, os_code_name, os_arch):
"""
Enumerate packages in an apk (Alpine Package) repository.
:param base_url: the apk repository base URL.
:param os_name: the name of the OS associated with the repository.
:param os_code_name: the OS version associated with the repository.
:param os_arch: the system architecture associated with the repository.
:returns: an enumeration of package entries.
"""
base_url = base_url.replace('$releasever', os_code_name)
apkindex_url = os.path.join(base_url, os_arch, 'APKINDEX.tar.gz')
print('Reading apk package metadata from ' + apkindex_url)
with open_gz_url(apkindex_url) as f:
with tarfile.open(mode='r|', fileobj=f) as tf:
index = None
for ti in tf:
if ti.name == 'APKINDEX':
index = tf.extractfile(ti)
break
if index is None:
raise RuntimeError('APKINDEX url did not contain an APKINDEX file')
for index_entry in parse_apkindex(index):
pkg_name, pkg_version, source_name = index_entry['P'], index_entry['V'], index_entry['o']
pkg_filename = '%s-%s.apk' % (pkg_name, pkg_version)
pkg_url = os.path.join(base_url, pkg_filename)
yield PackageEntry(pkg_name, pkg_version, pkg_url, source_name=source_name)
if 'p' in index_entry:
for d in parse_deps(index_entry['p']):
if d.type is None:
yield PackageEntry(d.name, pkg_version, pkg_url, source_name=source_name, binary_name=pkg_name)
|
30,044 |
def test_miner_set_etherbase(web3_empty):
web3 = web3_empty
assert web3.eth.coinbase == web3.eth.accounts[0]
new_account = web3.personal.newAccount('this-is-a-password')
web3.geth.miner.set_etherBase(new_account)
assert web3.eth.coinbase == new_account
|
def test_miner_set_etherbase(web3_empty):
web3 = web3_empty
assert web3.eth.coinbase == web3.eth.accounts[0]
new_account = web3.personal.newAccount('this-is-a-password')
web3.geth.miner.set_etherbase(new_account)
assert web3.eth.coinbase == new_account
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.