id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
46,600 | def deprecated(
key: str,
message: str = "'$KEY' is deprecated. Change your code and config to use '$NEW_KEY'",
*,
_parent_: Container,
_node_: Optional[Node],
) -> Any:
from omegaconf._impl import select_node
if not isinstance(key, str):
raise ValueError(
f"oc.deprecated: interpolation key type is not a string ({type(key).__name__})"
)
if not isinstance(message, str):
raise ValueError(
f"oc.deprecated: interpolation message type is not a string ({type(message).__name__})"
)
assert _node_ is not None
full_key = _node_._get_full_key(key=None)
target_node = select_node(_parent_, key, absolute_key=True)
if target_node is None:
raise ConfigKeyError(
f"In oc.deprecate resolver at '{full_key}': Key not found: '{key}'"
)
new_key = target_node._get_full_key(key=None)
msg = string.Template(message).safe_substitute(
KEY=full_key,
NEW_KEY=new_key,
)
warnings.warn(category=UserWarning, message=msg)
return target_node
| def deprecated(
key: str,
message: str = "'$KEY' is deprecated. Change your code and config to use '$NEW_KEY'",
*,
_parent_: Container,
_node_: Optional[Node],
) -> Any:
from omegaconf._impl import select_node
if not isinstance(key, str):
raise TypeError(
f"oc.deprecated: interpolation key type is not a string ({type(key).__name__})"
)
if not isinstance(message, str):
raise ValueError(
f"oc.deprecated: interpolation message type is not a string ({type(message).__name__})"
)
assert _node_ is not None
full_key = _node_._get_full_key(key=None)
target_node = select_node(_parent_, key, absolute_key=True)
if target_node is None:
raise ConfigKeyError(
f"In oc.deprecate resolver at '{full_key}': Key not found: '{key}'"
)
new_key = target_node._get_full_key(key=None)
msg = string.Template(message).safe_substitute(
KEY=full_key,
NEW_KEY=new_key,
)
warnings.warn(category=UserWarning, message=msg)
return target_node
|
35,159 | def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,
callback=None, atol=None, callback_type=None):
"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``.
Args:
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): The real or complex
matrix of the linear system with shape ``(n, n)``.
b (cupy.ndarray): Right hand side of the linear system with shape
``(n,)`` or ``(n, 1)``.
x0 (cupy.ndarray): Starting guess for the solution.
tol (float): Tolerance for convergence.
restart (int): Number of iterations between restarts. Larger values
increase iteration cost, but may be necessary for convergence.
maxiter (int): Maximum number of iterations.
M (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Preconditioner for
``A``. The preconditioner should approximate the inverse of ``A``.
callback (function): User-specified function to call on every restart.
It is called as ``callback(arg)``, where ``arg`` is selected by
``callback_type``.
callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution
vector is used as an argument of callback function. if `pr_norm`,
relative (preconditioned) residual norm is used as an arugment.
atol (float): Tolerance for convergence.
Returns:
tuple:
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
the converged solution and ``info`` provides convergence
information.
Reference:
M. Wang, H. Klie, M. Parashar and H. Sudan, "Solving Sparse Linear
Systems on NVIDIA Tesla GPUs", ICCS 2009 (2009).
.. seealso:: :func:`scipy.sparse.linalg.gmres`
"""
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(A.shape))
if A.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype))
n = A.shape[0]
if not (b.shape == (n,) or b.shape == (n, 1)):
raise ValueError('b has incompatible dimensins')
b = b.astype(A.dtype).ravel()
if n == 0:
return cupy.empty_like(b), 0
b_norm = cupy.linalg.norm(b)
if b_norm == 0:
return b, 0
if atol is None:
atol = tol * float(b_norm)
else:
atol = max(float(atol), tol * float(b_norm))
if x0 is None:
x = cupy.zeros((n,), dtype=A.dtype)
else:
if not (x0.shape == (n,) or x0.shape == (n, 1)):
raise ValueError('x0 has incompatible dimensins')
x = x0.astype(A.dtype).ravel()
if maxiter is None:
maxiter = n * 10
if restart is None:
restart = 20
restart = min(restart, n)
if callback_type is None:
callback_type = 'pr_norm'
if callback_type not in ('x', 'pr_norm'):
raise ValueError('Unknow callback_type: {}'.format(callback_type))
if callback is None:
callback_type = None
V = cupy.empty((n, restart), dtype=A.dtype, order='F')
H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')
e = numpy.zeros((restart+1,), dtype=A.dtype)
matvec, psolve = _make_funcs(A, M)
compute_hu = _make_compute_hu(V)
iters = 0
while True:
mx = psolve(x)
r = b - matvec(mx)
r_norm = cublas.nrm2(r)
if callback_type == 'x':
callback(mx)
elif callback_type == 'pr_norm' and iters > 0:
callback(r_norm / b_norm)
if r_norm <= atol or iters >= maxiter:
break
v = r / r_norm
V[:, 0] = v
e[0] = r_norm
# Arnoldi iteration
for j in range(restart):
z = psolve(v)
u = matvec(z)
H[:j+1, j], u = compute_hu(u, j)
cublas.nrm2(u, out=H[j+1, j])
if j+1 < restart:
v = u / H[j+1, j]
V[:, j+1] = v
# Note: The least-square solution to equation Hy = e is computed on CPU
# because it is faster if tha matrix size is small.
ret = scipy.linalg.lstsq(cupy.asnumpy(H), e)
y = cupy.array(ret[0])
x += V @ y
iters += restart
info = 0
if iters == maxiter and not (r_norm <= atol):
info = iters
return mx, info
| def gmres(A, b, x0=None, tol=1e-5, restart=None, maxiter=None, M=None,
callback=None, atol=None, callback_type=None):
"""Uses Generalized Minimal RESidual iteration to solve ``Ax = b``.
Args:
A (cupy.ndarray or cupyx.scipy.sparse.spmatrix): The real or complex
matrix of the linear system with shape ``(n, n)``.
b (cupy.ndarray): Right hand side of the linear system with shape
``(n,)`` or ``(n, 1)``.
x0 (cupy.ndarray): Starting guess for the solution.
tol (float): Tolerance for convergence.
restart (int): Number of iterations between restarts. Larger values
increase iteration cost, but may be necessary for convergence.
maxiter (int): Maximum number of iterations.
M (cupy.ndarray or cupyx.scipy.sparse.spmatrix): Preconditioner for
``A``. The preconditioner should approximate the inverse of ``A``.
callback (function): User-specified function to call on every restart.
It is called as ``callback(arg)``, where ``arg`` is selected by
``callback_type``.
callback_type (str): 'x' or 'pr_norm'. If 'x', the current solution
vector is used as an argument of callback function. if `pr_norm`,
relative (preconditioned) residual norm is used as an arugment.
atol (float): Tolerance for convergence.
Returns:
tuple:
It returns ``x`` (cupy.ndarray) and ``info`` (int) where ``x`` is
the converged solution and ``info`` provides convergence
information.
Reference:
M. Wang, H. Klie, M. Parashar and H. Sudan, "Solving Sparse Linear
Systems on NVIDIA Tesla GPUs", ICCS 2009 (2009).
.. seealso:: :func:`scipy.sparse.linalg.gmres`
"""
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape: {})'.format(A.shape))
if A.dtype.char not in 'fdFD':
raise TypeError('unsupprted dtype (actual: {})'.format(A.dtype))
n = A.shape[0]
if not (b.shape == (n,) or b.shape == (n, 1)):
raise ValueError('b has incompatible dimensions')
b = b.astype(A.dtype).ravel()
if n == 0:
return cupy.empty_like(b), 0
b_norm = cupy.linalg.norm(b)
if b_norm == 0:
return b, 0
if atol is None:
atol = tol * float(b_norm)
else:
atol = max(float(atol), tol * float(b_norm))
if x0 is None:
x = cupy.zeros((n,), dtype=A.dtype)
else:
if not (x0.shape == (n,) or x0.shape == (n, 1)):
raise ValueError('x0 has incompatible dimensins')
x = x0.astype(A.dtype).ravel()
if maxiter is None:
maxiter = n * 10
if restart is None:
restart = 20
restart = min(restart, n)
if callback_type is None:
callback_type = 'pr_norm'
if callback_type not in ('x', 'pr_norm'):
raise ValueError('Unknow callback_type: {}'.format(callback_type))
if callback is None:
callback_type = None
V = cupy.empty((n, restart), dtype=A.dtype, order='F')
H = cupy.zeros((restart+1, restart), dtype=A.dtype, order='F')
e = numpy.zeros((restart+1,), dtype=A.dtype)
matvec, psolve = _make_funcs(A, M)
compute_hu = _make_compute_hu(V)
iters = 0
while True:
mx = psolve(x)
r = b - matvec(mx)
r_norm = cublas.nrm2(r)
if callback_type == 'x':
callback(mx)
elif callback_type == 'pr_norm' and iters > 0:
callback(r_norm / b_norm)
if r_norm <= atol or iters >= maxiter:
break
v = r / r_norm
V[:, 0] = v
e[0] = r_norm
# Arnoldi iteration
for j in range(restart):
z = psolve(v)
u = matvec(z)
H[:j+1, j], u = compute_hu(u, j)
cublas.nrm2(u, out=H[j+1, j])
if j+1 < restart:
v = u / H[j+1, j]
V[:, j+1] = v
# Note: The least-square solution to equation Hy = e is computed on CPU
# because it is faster if tha matrix size is small.
ret = scipy.linalg.lstsq(cupy.asnumpy(H), e)
y = cupy.array(ret[0])
x += V @ y
iters += restart
info = 0
if iters == maxiter and not (r_norm <= atol):
info = iters
return mx, info
|
46,378 | def unit_get_expanded_info(country_name: str, unit_type, request_type: str) -> str:
original_name = unit_type.name and unit_type.name or unit_type.id
id = unit_type.id
default_value = None
faction_value = None
with UNITINFOTEXT_PATH.open("r", encoding="utf-8") as fdata:
data = json.load(fdata)
type_exists = data.get(id)
if type_exists is not None:
for faction in type_exists:
if default_value is None:
default_exists = faction.get("default")
if default_exists is not None:
default_value = default_exists.get(request_type)
if faction_value is None:
faction_exists = faction.get(country_name)
if faction_exists is not None:
faction_value = faction_exists.get(request_type)
if default_value is None:
if request_type == "text":
return "WIP - This unit doesn't have any description text yet."
if request_type == "name":
return original_name
else:
return "Unknown"
if faction_value is None:
return default_value
return faction_value
| def unit_get_expanded_info(country_name: str, unit_type, request_type: str) -> str:
original_name = unit_type.name and unit_type.name or unit_type.id
id = unit_type.id
default_value = None
faction_value = None
with UNITINFOTEXT_PATH.open("r", encoding="utf-8") as fdata:
data = json.load(fdata)
type_exists = data.get(unit_type.id)
if type_exists is not None:
for faction in type_exists:
if default_value is None:
default_exists = faction.get("default")
if default_exists is not None:
default_value = default_exists.get(request_type)
if faction_value is None:
faction_exists = faction.get(country_name)
if faction_exists is not None:
faction_value = faction_exists.get(request_type)
if default_value is None:
if request_type == "text":
return "WIP - This unit doesn't have any description text yet."
if request_type == "name":
return original_name
else:
return "Unknown"
if faction_value is None:
return default_value
return faction_value
|
7,241 | def test_uint_image():
img = np.random.randint(0, 255, (10, 10), dtype=np.uint8)
labels = np.zeros((10, 10), dtype=np.int64)
labels[1:3, 1:3]=1
labels[6:9, 6:9] = 2
output = label2rgb(labels, image=img, bg_label=0)
# Make sure that the output is made of floats and in the correct range
assert np.issubdtype(output.dtype, np.floating)
assert output.max() <= 1
| def test_uint_image():
img = np.random.randint(0, 255, (10, 10), dtype=np.uint8)
labels = np.zeros((10, 10), dtype=np.int64)
labels[1:3, 1:3] = 1
labels[6:9, 6:9] = 2
output = label2rgb(labels, image=img, bg_label=0)
# Make sure that the output is made of floats and in the correct range
assert np.issubdtype(output.dtype, np.floating)
assert output.max() <= 1
|
7,575 | def test_votable_tag():
xml = votable_xml_string('1.1')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.1"' in xml
assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.1"' in xml
xml = votable_xml_string('1.2')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.2"' in xml
assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.2"' in xml
xml = votable_xml_string('1.3')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml
assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 '
'https://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"' in xml
xml = votable_xml_string('1.4')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml
assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 '
'https://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"' in xml
| def test_votable_tag():
xml = votable_xml_string('1.1')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.1"' in xml
assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.1"' in xml
xml = votable_xml_string('1.2')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.2"' in xml
assert 'xsi:noNamespaceSchemaLocation="http://www.ivoa.net/xml/VOTable/v1.2"' in xml
xml = votable_xml_string('1.3')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml
assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 '
assert 'https://www.ivoa.net/xml/VOTable/VOTable-1.3.xsd"' in xml
xml = votable_xml_string('1.4')
assert 'xmlns="http://www.ivoa.net/xml/VOTable/v1.3"' in xml
assert 'xsi:schemaLocation="http://www.ivoa.net/xml/VOTable/v1.3 '
'https://www.ivoa.net/xml/VOTable/VOTable-1.4.xsd"' in xml
|
30,710 | def download_zip_file_from_gc(current_feature_content_zip_file_path, extract_destination_path):
"""Save the content_new.zip file from the feature branch into artifacts folder.
Args:
gc_service_account: full path to service account json.
current_feature_content_zip_file_path (str): Content_new.zip file path in google cloud.
extract_destination_path: The folder path to download the content_new.zip file to.
Returns:
The new path of the content_new.zip file.
"""
storage_client = init_storage_client()
storage_bucket = storage_client.bucket(STORAGE_BUCKET_NAME)
index_blob = storage_bucket.blob(current_feature_content_zip_file_path)
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
index_blob.download_to_filename(extract_destination_path)
if os.path.exists(f'{extract_destination_path}/content_new.zip'):
return f'{extract_destination_path}/content_new.zip'
return ''
| def download_zip_file_from_gcp(current_feature_content_zip_file_path, extract_destination_path):
"""Save the content_new.zip file from the feature branch into artifacts folder.
Args:
gc_service_account: full path to service account json.
current_feature_content_zip_file_path (str): Content_new.zip file path in google cloud.
extract_destination_path: The folder path to download the content_new.zip file to.
Returns:
The new path of the content_new.zip file.
"""
storage_client = init_storage_client()
storage_bucket = storage_client.bucket(STORAGE_BUCKET_NAME)
index_blob = storage_bucket.blob(current_feature_content_zip_file_path)
if not os.path.exists(extract_destination_path):
os.mkdir(extract_destination_path)
index_blob.download_to_filename(extract_destination_path)
if os.path.exists(f'{extract_destination_path}/content_new.zip'):
return f'{extract_destination_path}/content_new.zip'
return ''
|
41,538 | def segment_volume(folder_model, fname_image, fname_roi=None):
"""Segment an image.
Segment an image (fname_image) using a already trained model given its
training parameters (both in folder_model). If provided, a RegionOfInterest (fname_roi)
is used to crop the image prior to segment it.
Args:
folder_model (string): foldername which contains
(1) the model ('model.pt') to use
(2) its configuration file ('model_metadata.json') used for the training,
see https://github.com/neuropoly/ivado-medical-imaging/wiki/configuration-file
fname_image (string): image filename (e.g. .nii.gz) to segment.
roi_fname (string): Binary image filename (e.g. .nii.gz) defining a region of interest,
e.g. spinal cord centerline, used to crop the image prior to segment it if provided.
Returns:
nibabelObject: Object containing the soft segmentation.
"""
# Define device
device = torch.device("cpu")
# Check if model folder exists
if os.path.isdir(folder_model):
# Check if model and model metadata exist
fname_model = os.path.join(folder_model, 'model.pt')
if not os.path.isfile(fname_model):
print('Model file not found: {}'.format(fname_model))
exit()
fname_model_metadata = os.path.join(folder_model, 'model_metadata.json')
if not os.path.isfile(fname_model_metadata):
print('Model metadata file not found: {}'.format(fname_model_metadata))
exit()
else:
print('Model folder not found: {}'.format(folder_model))
exit()
# Load model training config
with open(fname_model_metadata, "r") as fhandle:
context = json.load(fhandle)
# If ROI is not provided then force center cropping
if fname_roi is None and 'ROICrop2D' in context["transformation_validation"].keys():
context["transformation_validation"] = dict((key, value) if key != 'ROICrop2D'
else ('CenterCrop2D', value)
for (key, value) in context["transformation_validation"].items())
# Force labeled to False in transforms
context["transformation_validation"] = dict((key, {**value, **{"labeled": False}})
if not key.startswith('NormalizeInstance')
else (key, value)
for (key, value) in context["transformation_validation"].items())
# Compose transforms
do_transforms = compose_transforms(context['transformation_validation'])
# Undo Transforms
undo_transforms = ivadomed_transforms.UndoCompose(do_transforms)
# Load data
filename_pairs = [([fname_image], None, fname_roi, [{}])]
if not context['unet_3D']: # TODO: rename this param 'model_name' or 'kernel_dim'
ds = MRI2DSegmentationDataset(filename_pairs,
slice_axis=AXIS_DCT[context['slice_axis']],
cache=True,
transform=do_transforms,
slice_filter_fn=SliceFilter(**context["slice_filter"]),
canonical=True)
else:
print('\n3D unet is not implemented yet.')
exit()
# If fname_roi provided, then remove slices without ROI
if fname_roi is not None:
ds = ivadomed_loader.filter_roi(ds, nb_nonzero_thr=context["slice_filter_roi"])
if not context['unet_3D']:
print(f"\nLoaded {len(ds)} {context['slice_axis']} slices..")
# Data Loader
data_loader = DataLoader(ds, batch_size=context["batch_size"],
shuffle=False, pin_memory=True,
collate_fn=mt_datasets.mt_collate,
num_workers=0)
# Load model
model = torch.load(fname_model, map_location=device)
# Inference time
model.eval()
# Loop across batches
preds_list, sliceIdx_list = [], []
for i_batch, batch in enumerate(data_loader):
with torch.no_grad():
preds = model(batch['input'])
rdict = {}
rdict['gt'] = preds
batch.update(rdict)
# Reconstruct 3D object
for i_slice in range(len(batch['gt'])):
# Undo transformations
rdict = {}
# Import transformations parameters
for k in batch.keys():
rdict[k] = batch[k][i_slice]
rdict_undo = undo_transforms(rdict)
# Add new segmented slice to preds_list
# Convert PIL to numpy
pred_cur = np.array(rdict_undo['gt'])
preds_list.append(pred_cur)
# Store the slice index of pred_cur in the original 3D image
sliceIdx_list.append(int(rdict_undo['input_metadata']['slice_index']))
# If last batch and last sample of this batch, then reconstruct 3D object
if i_batch == len(data_loader) - 1 and i_slice == len(batch['gt']) - 1:
pred_nib = pred_to_nib(data_lst=preds_list,
z_lst=sliceIdx_list,
fname_ref=fname_image,
fname_out=None,
slice_axis=AXIS_DCT[context['slice_axis']],
kernel_dim='3d' if context['unet_3D'] else '2d',
debug=False,
bin_thr=-1)
return pred_nib
| def segment_volume(folder_model, fname_image, fname_roi=None):
"""Segment an image.
Segment an image (fname_image) using a pre-trained model (folder_model). If provided, a region of interest (fname_roi)
is used to crop the image prior to segment it.
Args:
folder_model (string): foldername which contains
(1) the model ('model.pt') to use
(2) its configuration file ('model_metadata.json') used for the training,
see https://github.com/neuropoly/ivado-medical-imaging/wiki/configuration-file
fname_image (string): image filename (e.g. .nii.gz) to segment.
roi_fname (string): Binary image filename (e.g. .nii.gz) defining a region of interest,
e.g. spinal cord centerline, used to crop the image prior to segment it if provided.
Returns:
nibabelObject: Object containing the soft segmentation.
"""
# Define device
device = torch.device("cpu")
# Check if model folder exists
if os.path.isdir(folder_model):
# Check if model and model metadata exist
fname_model = os.path.join(folder_model, 'model.pt')
if not os.path.isfile(fname_model):
print('Model file not found: {}'.format(fname_model))
exit()
fname_model_metadata = os.path.join(folder_model, 'model_metadata.json')
if not os.path.isfile(fname_model_metadata):
print('Model metadata file not found: {}'.format(fname_model_metadata))
exit()
else:
print('Model folder not found: {}'.format(folder_model))
exit()
# Load model training config
with open(fname_model_metadata, "r") as fhandle:
context = json.load(fhandle)
# If ROI is not provided then force center cropping
if fname_roi is None and 'ROICrop2D' in context["transformation_validation"].keys():
context["transformation_validation"] = dict((key, value) if key != 'ROICrop2D'
else ('CenterCrop2D', value)
for (key, value) in context["transformation_validation"].items())
# Force labeled to False in transforms
context["transformation_validation"] = dict((key, {**value, **{"labeled": False}})
if not key.startswith('NormalizeInstance')
else (key, value)
for (key, value) in context["transformation_validation"].items())
# Compose transforms
do_transforms = compose_transforms(context['transformation_validation'])
# Undo Transforms
undo_transforms = ivadomed_transforms.UndoCompose(do_transforms)
# Load data
filename_pairs = [([fname_image], None, fname_roi, [{}])]
if not context['unet_3D']: # TODO: rename this param 'model_name' or 'kernel_dim'
ds = MRI2DSegmentationDataset(filename_pairs,
slice_axis=AXIS_DCT[context['slice_axis']],
cache=True,
transform=do_transforms,
slice_filter_fn=SliceFilter(**context["slice_filter"]),
canonical=True)
else:
print('\n3D unet is not implemented yet.')
exit()
# If fname_roi provided, then remove slices without ROI
if fname_roi is not None:
ds = ivadomed_loader.filter_roi(ds, nb_nonzero_thr=context["slice_filter_roi"])
if not context['unet_3D']:
print(f"\nLoaded {len(ds)} {context['slice_axis']} slices..")
# Data Loader
data_loader = DataLoader(ds, batch_size=context["batch_size"],
shuffle=False, pin_memory=True,
collate_fn=mt_datasets.mt_collate,
num_workers=0)
# Load model
model = torch.load(fname_model, map_location=device)
# Inference time
model.eval()
# Loop across batches
preds_list, sliceIdx_list = [], []
for i_batch, batch in enumerate(data_loader):
with torch.no_grad():
preds = model(batch['input'])
rdict = {}
rdict['gt'] = preds
batch.update(rdict)
# Reconstruct 3D object
for i_slice in range(len(batch['gt'])):
# Undo transformations
rdict = {}
# Import transformations parameters
for k in batch.keys():
rdict[k] = batch[k][i_slice]
rdict_undo = undo_transforms(rdict)
# Add new segmented slice to preds_list
# Convert PIL to numpy
pred_cur = np.array(rdict_undo['gt'])
preds_list.append(pred_cur)
# Store the slice index of pred_cur in the original 3D image
sliceIdx_list.append(int(rdict_undo['input_metadata']['slice_index']))
# If last batch and last sample of this batch, then reconstruct 3D object
if i_batch == len(data_loader) - 1 and i_slice == len(batch['gt']) - 1:
pred_nib = pred_to_nib(data_lst=preds_list,
z_lst=sliceIdx_list,
fname_ref=fname_image,
fname_out=None,
slice_axis=AXIS_DCT[context['slice_axis']],
kernel_dim='3d' if context['unet_3D'] else '2d',
debug=False,
bin_thr=-1)
return pred_nib
|
47,876 | def generate(topology, topologyName, topologies_hdr, py_impl, cpp_impl):
name = topologyName.replace('-', '_').replace('.', '_')
# DLDT models come with multiple files foe different precision
files = topology['files']
assert(len(files) > 0), topologyName
if len(files) > 2:
assert(topology['framework'] == 'dldt'), topologyName
assert(len(files) % 2 == 0), topologyName
for i in range(len(files) / 2):
subTopology = topology.copy()
subTopologyName = topologyName
subTopology['files'] = [files[i * 2], files[i * 2 + 1]]
# Detect precision by the first file
precision = subTopology['files'][0]['name']
precision = precision[:precision.find('/')].lower()
if precision != 'fp32': # Keep origin name for FP32
subTopologyName += '_' + precision
registerVersionedName(name, precision)
generate(subTopology, subTopologyName, topologies_hdr, py_impl, cpp_impl)
return
registerVersionedName(name)
config = {}
config['framework'] = topology['framework']
config['topology_name'] = name
if 'model_optimizer_args' in topology:
config['model_optimizer_args'] = ' '.join(topology['model_optimizer_args'])
fileURL, fileSHA, fileName = getSource(files[0])
if fileName.endswith('.tar.gz') or fileName.endswith('.zip'):
config['archive_url'], config['archive_sha256'], config['archive_name'] = fileURL, fileSHA, fileName
else:
config['config_url'], config['config_sha256'], config['config_path'] = fileURL, fileSHA, fileName
if len(files) > 1:
config['model_url'], config['model_sha256'], config['model_path'] = getSource(files[1])
s = ', '.join(['{"%s", "%s"}' % (key, value) for key, value in config.items()])
for impl in [py_impl, cpp_impl]:
impl.write("""
Topology %s(bool download)
{
Topology t({%s});
if (download)
t.download();
return t;
}\n""" % (name, s))
topologies_hdr.write("""
/**
%s
License: %s
*/
CV_EXPORTS_W Topology %s(bool download = true);
""" % (topology['description'], topology['license'], name))
| def generate(topology, topologyName, topologies_hdr, py_impl, cpp_impl):
name = topologyName.replace('-', '_').replace('.', '_')
# DLDT models come with multiple files foe different precision
files = topology['files']
assert(len(files) > 0), topologyName
if len(files) > 2:
assert(topology['framework'] == 'dldt'), topologyName
assert(len(files) % 2 == 0), topologyName
for i in range(len(files) / 2):
subTopology = topology.copy()
subTopologyName = topologyName
subTopology['files'] = [files[i * 2], files[i * 2 + 1]]
# Detect precision by the first file
precision = subTopology['files'][0]['name']
precision = precision[:precision.find('/')].lower()
if precision != 'fp32': # Keep origin name for FP32
subTopologyName += '_' + precision
registerVersionedName(name, precision)
generate(subTopology, subTopologyName, topologies_hdr, py_impl, cpp_impl)
return
registerVersionedName(name)
config = {}
config['framework'] = topology['framework']
config['topology_name'] = name
if 'model_optimizer_args' in topology:
config['model_optimizer_args'] = ' '.join(topology['model_optimizer_args'])
fileURL, fileSHA, fileName = getSource(files[0])
if fileName.endswith('.tar.gz') or fileName.endswith('.zip'):
config['archive_url'], config['archive_sha256'], config['archive_name'] = fileURL, fileSHA, fileName
else:
config['config_url'], config['config_sha256'], config['config_path'] = fileURL, fileSHA, fileName
if len(files) > 1:
config['model_url'], config['model_sha256'], config['model_path'] = getSource(files[1])
s = ', '.join('{"%s", "%s"}' % (key, value) for key, value in config.items())
for impl in [py_impl, cpp_impl]:
impl.write("""
Topology %s(bool download)
{
Topology t({%s});
if (download)
t.download();
return t;
}\n""" % (name, s))
topologies_hdr.write("""
/**
%s
License: %s
*/
CV_EXPORTS_W Topology %s(bool download = true);
""" % (topology['description'], topology['license'], name))
|
41,505 | def test_qmu_tilde(caplog):
mu = 1.0
pdf = pyhf.simplemodels.hepdata_like([6], [9], [3])
data = [9] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
par_bounds[pdf.config.poi_index] = [-10, 10]
with caplog.at_level(logging.WARNING, 'pyhf.infer.test_statistics'):
pyhf.infer.test_statistics.qmu_tilde(mu, data, pdf, init_pars, par_bounds)
assert 'WARNING qmu tilde test statistic used for fit' in caplog.text
| def test_qmu_tilde(caplog):
mu = 1.0
pdf = pyhf.simplemodels.hepdata_like([6], [9], [3])
data = [9] + pdf.config.auxdata
init_pars = pdf.config.suggested_init()
par_bounds = pdf.config.suggested_bounds()
par_bounds[pdf.config.poi_index] = [-10, 10]
with caplog.at_level(logging.WARNING, 'pyhf.infer.test_statistics'):
pyhf.infer.test_statistics.qmu_tilde(mu, data, pdf, init_pars, par_bounds)
assert 'WARNING qmu_tilde test statistic used for fit' in caplog.text
|
1,834 | def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
error_score=np.nan):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised. This parameter
does not affect the refit step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for k, v in parameters.items():
cloned_parameters[k] = clone(v, safe=False)
estimator = estimator.set_params(**cloned_parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, dict):
test_scores = {name: error_score for name in scorer}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%s" %
(error_score, format_exc()),
FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_scores = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
msg += ", %s=" % scorer_name
if return_train_score:
msg += "(train=%.3f," % train_scores[scorer_name]
msg += " test=%.3f)" % test_scores[scorer_name]
else:
msg += "%.3f" % test_scores[scorer_name]
else:
msg += ", score="
msg += ("%.3f" % test_scores if not return_train_score else
"(train=%.3f, test=%.3f)" % (train_scores, test_scores))
if verbose > 1:
total_time = score_time + fit_time
print(_message_with_time('CV', msg, total_time))
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result
| def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, return_n_test_samples=False,
return_times=False, return_estimator=False,
error_score=np.nan):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape (n_samples, n_features)
The data to fit.
y : array-like of shape (n_samples,) or (n_samples, n_outputs) or None
The target variable to try to predict in the case of
supervised learning.
scorer : A single callable or dict mapping scorer name to the callable
If it is a single callable, the return value for ``train_scores`` and
``test_scores`` is a single float.
For a dict, it should be one mapping the scorer name to the scorer
callable object / function.
The callable object / fn should have signature
``scorer(estimator, X, y)``.
train : array-like of shape (n_train_samples,)
Indices of training samples.
test : array-like of shape (n_test_samples,)
Indices of test samples.
verbose : int
The verbosity level.
error_score : 'raise' or numeric, default=np.nan
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised.
If a numeric value is given, FitFailedWarning is raised. This parameter
does not affect the refit step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : bool, default=False
Compute and return score on training set.
return_parameters : bool, default=False
Return parameters that has been used for the estimator.
return_n_test_samples : bool, default=False
Whether to return the ``n_test_samples``
return_times : bool, default=False
Whether to return the fit/score times.
return_estimator : bool, default=False
Whether to return the fitted estimator.
Returns
-------
result : dict with the following attributes
train_scores : dict of scorer name -> float
Score on training set (for all the scorers),
returned only if `return_train_score` is `True`.
test_scores : dict of scorer name -> float
Score on testing set (for all the scorers).
n_test_samples : int
Number of test samples.
fit_time : float
Time spent for fitting in seconds.
score_time : float
Time spent for scoring in seconds.
parameters : dict or None
The parameters that have been evaluated.
estimator : estimator object
The fitted estimator.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = _check_fit_params(X, fit_params, train)
if parameters is not None:
# clone after setting parameters in case any parameters
# are estimators (like pipeline steps)
# because pipeline doesn't clone steps in fit
cloned_parameters = {}
for k, v in parameters.items():
cloned_parameters[k] = clone(v, safe=False)
estimator = estimator.set_params(**cloned_parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
result = {}
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
# Note fit time as time until error
fit_time = time.time() - start_time
score_time = 0.0
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
if isinstance(scorer, dict):
test_scores = {name: error_score for name in scorer}
if return_train_score:
train_scores = test_scores.copy()
else:
test_scores = error_score
if return_train_score:
train_scores = error_score
warnings.warn("Estimator fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%s" %
(error_score, format_exc()),
FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)")
else:
fit_time = time.time() - start_time
test_scores = _score(estimator, X_test, y_test, scorer)
score_time = time.time() - start_time - fit_time
if return_train_score:
train_scores = _score(estimator, X_train, y_train, scorer)
if verbose > 2:
if isinstance(test_scores, dict):
for scorer_name in sorted(test_scores):
msg += ", %s=" % scorer_name
if return_train_score:
msg += "(train=%.3f," % train_scores[scorer_name]
msg += " test=%.3f)" % test_scores[scorer_name]
else:
msg += "%.3f" % test_scores[scorer_name]
else:
msg += ", score="
msg += ("%.3f" % test_scores if not return_train_score else
"(train=%.3f, test=%.3f)" % (train_scores, test_scores))
if verbose > 1:
total_time = score_time + fit_time
print(_message_with_time('CV', msg, total_time))
result["test_scores"] = test_scores
if return_train_score:
result["train_scores"] = train_scores
if return_n_test_samples:
result["n_test_samples"] = _num_samples(X_test)
if return_times:
result["fit_time"] = fit_time
result["score_time"] = score_time
if return_parameters:
result["parameters"] = parameters
if return_estimator:
result["estimator"] = estimator
return result
|
10,973 | def from_env(name, default=NoDefaultValue, kind=str):
"""
Get a configuration value from the environment.
Arguments
---------
name : str
The name of the environment variable to pull from for this
setting.
default : any
A default value of the return type in case the intended
environment variable is not set. If this argument is not passed,
the environment variable is considered to be required, and
``ImproperlyConfigured`` may be raised.
kind : callable
A callable that takes a string and returns a value of the return
type.
Returns
-------
any
A value of the type returned by ``kind``.
Raises
------
ImproperlyConfigured
If there is no ``default``, and the environment variable is not
set.
"""
try:
val = os.environ[name]
except KeyError:
if default == NoDefaultValue:
raise ImproperlyConfigured("Missing environment variable {}.".format(name))
val = default
val = kind(val)
return val
| def from_env(name, default=NoDefaultValue, kind=str):
"""
Get a configuration value from the environment.
Arguments
---------
name : str
The name of the environment variable to pull from for this
setting.
default : any
A default value of the return type in case the intended
environment variable is not set. If this argument is not passed,
the environment variable is considered to be required, and
``ImproperlyConfigured`` may be raised.
kind : callable
A callable that takes a string and returns a value of the return
type.
Returns
-------
any
A value of the type returned by ``kind``.
Raises
------
ImproperlyConfigured
If there is no ``default``, and the environment variable is not
set.
"""
try:
val = os.environ[name]
except KeyError:
if default is NoDefaultValue:
raise ImproperlyConfigured("Missing environment variable {}.".format(name))
val = default
val = kind(val)
return val
|
32,613 | def cymru_bulk_whois_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
Returns results of 'cymru-bulk-whois' command
:type client: ``Client``
:param Client: client to use
:type args: ``Dict[str, Any]``
:param args: All command arguments - 'entry_id', 'delimiter'
:return: CommandResults object containing the results of the lookup action as returned from the API
and its readable output.
"""
if args.get('entry_id'):
demisto.debug('getting the path of the file from its entry_id')
get_file_path_res = demisto.getFilePath(args.get('entry_id'))
if not get_file_path_res:
raise ValueError('No file was found for given entry_id')
ips_list = parse_file(get_file_path_res, args.get('delimiter', ','))
else:
raise ValueError('No entry_id specified.')
return parse_ips_list(client, ips_list)
| def cymru_bulk_whois_command(client: Client, args: Dict[str, Any]) -> List[CommandResults]:
"""
Returns results of 'cymru-bulk-whois' command
:type client: ``Client``
:param Client: client to use
:type args: ``Dict[str, Any]``
:param args: All command arguments - 'entry_id', 'delimiter'
:return: CommandResults object containing the results of the lookup action as returned from the API
and its readable output.
"""
if args.get('entry_id'):
demisto.debug('getting the path of the file from its entry_id')
file_path = demisto.getFilePath(args.get('entry_id'))
if not get_file_path_res:
raise ValueError('No file was found for given entry_id')
ips_list = parse_file(get_file_path_res, args.get('delimiter', ','))
else:
raise ValueError('No entry_id specified.')
return parse_ips_list(client, ips_list)
|
31,698 | def fetch_last_emails(account, folder_name='Inbox', since_datetime=None, exclude_ids=None):
qs = get_folder_by_path(account, folder_name, is_public=IS_PUBLIC_FOLDER)
if since_datetime:
qs = qs.filter(datetime_received__gte=since_datetime)
else:
if not FETCH_ALL_HISTORY:
tz = EWSTimeZone.timezone('UTC')
first_fetch_datetime = dateparser.parse(FETCH_TIME)
first_fetch_ews_datetime = EWSDateTime.from_datetime(tz.localize(first_fetch_datetime))
qs = qs.filter(datetime_received__gte=first_fetch_ews_datetime)
qs = qs.filter().only(*map(lambda x: x.name, Message.FIELDS))
qs = qs.filter().order_by('datetime_received')
result = []
counter = 0
for item in qs:
counter += 1
try:
if isinstance(item, Message) and item.message_id not in exclude_ids:
result.append(item)
if len(result) >= MAX_FETCH:
break
except ValueError as exc:
future_utils.raise_from(ValueError(
'Got an error when pulling incidents. You might be using the wrong exchange version.'
), exc)
demisto.debug(f'EWS V2 - Got total of {counter} from ews query. {len(result)} results not excluded. ')
return result
| def fetch_last_emails(account, folder_name='Inbox', since_datetime=None, exclude_ids=None):
qs = get_folder_by_path(account, folder_name, is_public=IS_PUBLIC_FOLDER)
if since_datetime:
qs = qs.filter(datetime_received__gte=since_datetime)
else:
if not FETCH_ALL_HISTORY:
tz = EWSTimeZone.timezone('UTC')
first_fetch_datetime = dateparser.parse(FETCH_TIME)
first_fetch_ews_datetime = EWSDateTime.from_datetime(tz.localize(first_fetch_datetime))
qs = qs.filter(datetime_received__gte=first_fetch_ews_datetime)
qs = qs.filter().only(*map(lambda x: x.name, Message.FIELDS))
qs = qs.filter().order_by('datetime_received')
result = []
counter = 0
for item in qs:
counter += 1
try:
if isinstance(item, Message) and not item.message_id in exclude_ids:
result.append(item)
if len(result) >= MAX_FETCH:
break
except ValueError as exc:
future_utils.raise_from(ValueError(
'Got an error when pulling incidents. You might be using the wrong exchange version.'
), exc)
demisto.debug(f'EWS V2 - Got total of {counter} from ews query. {len(result)} results not excluded. ')
return result
|
24,688 | def get_available_rmw_implementations():
"""
Return the set of all available RMW implementations as registered in the ament index.
The result can be overridden by setting an environment variable named
``RMW_IMPLEMENTATIONS``.
The variable can contain RMW implementation names separated by the platform
specific path separator.
Including an unavailable RMW implementation results in a RuntimeError.
"""
available_rmw_implementations = ament_index_python.get_resources(
'rmw_typesupport')
available_rmw_implementations = {
name for name in available_rmw_implementations
if name != 'rmw_implementation'}
# filter by implementations in environment variable if provided
rmw_implementations = os.environ.get('RMW_IMPLEMENTATIONS')
if rmw_implementations:
rmw_implementations = rmw_implementations.split(os.pathsep)
missing_rmw_implementations = set(rmw_implementations) - \
available_rmw_implementations
if missing_rmw_implementations:
raise RuntimeError(
f'The RMW implementations {missing_rmw_implementations} '
"specified in 'RMW_IMPLEMENTATIONS' are not available ("
', '.join(sorted(available_rmw_implementations)) + ')')
available_rmw_implementations = {
name for name in available_rmw_implementations
if name in rmw_implementations}
return available_rmw_implementations
| def get_available_rmw_implementations():
"""
Return the set of all available RMW implementations as registered in the ament index.
The result can be overridden by setting an environment variable named
``RMW_IMPLEMENTATIONS``.
The variable can contain RMW implementation names separated by the platform
specific path separator.
Including an unavailable RMW implementation results in a RuntimeError.
"""
available_rmw_implementations = ament_index_python.get_resources(
'rmw_typesupport')
available_rmw_implementations = {
name for name in available_rmw_implementations
if name != 'rmw_implementation'}
# filter by implementations in environment variable if provided
rmw_implementations = os.environ.get('RMW_IMPLEMENTATIONS')
if rmw_implementations:
rmw_implementations = rmw_implementations.split(os.pathsep)
missing_rmw_implementations = set(rmw_implementations) - \
available_rmw_implementations
if missing_rmw_implementations:
raise RuntimeError(
f'The RMW implementations {missing_rmw_implementations} '
"specified in 'RMW_IMPLEMENTATIONS' are not available ("
f"{', '.join(sorted(available_rmw_implementations))})")
available_rmw_implementations = {
name for name in available_rmw_implementations
if name in rmw_implementations}
return available_rmw_implementations
|
23,803 | def set_dirty(folder):
dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER
assert not os.path.exists(dirty_file)
save(dirty_file, "")
| def set_dirty(folder):
dirty_file = os.path.normpath(folder) + _DIRTY_FOLDER
assert not os.path.exists(dirty_file), "Folder '{}' is already dirty".format(folder)
save(dirty_file, "")
|
37,337 | def draw(program: Union[Waveform, ParametricPulse, Schedule],
style: Optional[Dict[str, Any]] = None,
backend: Optional[BaseBackend] = None,
time_range_dt: Optional[Tuple[int, int]] = None,
time_range_ns: Optional[Tuple[int, int]] = None,
disable_channels: Optional[List[PulseChannel]] = None,
show_snapshot: bool = True,
show_framechange: bool = True,
show_waveform_info: bool = True,
show_barrier: bool = True,
plotter: str = types.Plotter.Mpl2D,
axis: Optional[Any] = None,
filename: Optional[str] = None):
"""Generate visualization data for pulse programs.
Args:
program: Program to visualize. This program can be arbitrary Qiskit Pulse program,
such as :py:class:~`qiskit.pulse.Waveform`, :py:class:~`qiskit.pulse.ParametricPulse`,
and :py:class:~`qiskit.pulse.Schedule`.
style: Stylesheet options. This can be dictionary or preset stylesheet classes. See
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxStandard`,
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxPublication`, and
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxDebugging` for details of
preset stylesheets. See also the stylesheet section for details of configuration keys.
backend: Backend object to play the input pulse program. If this object is provided,
the input program is visualized with the details of hardware information.
time_range_dt: Set horizontal axis limit in units of dt.
time_range_ns: Set horizontal axis limit in units of ns. This is available only when
`backend` object is set to the canvas.
disable_channels: List of pulse channel instances not shown in the output image.
show_snapshot: Set `True` to show snapshot instructions.
show_framechange: Set `True` to show frame change instructions. The frame change
indicates instructions that modulate phase or frequency of pulse channels.
show_waveform_info: Set `True` to show additional information about waveforms.
show_barrier: Set `True` to show barrier lines.
plotter: Name of plotter API to generate an output image.
See plotter section for details.
axis: Arbitrary object passed to the plotter. If this object is provided,
the plotters uses given `axis` instead of internally initializing a figure object.
This object format depends on the plotter. See plotters section for details.
filename: Set file path string to output image.
Returns:
Image data. The generated data format depends on the `plotter`.
If matplotlib family is specified, this will be a `matplotlib.pyplot.Figure` data.
Examples:
To visualize a pulse program, you can call this function with set of
control arguments. Most of appearance of the output image can be controlled by the
stylesheet.
Drawing with the default stylesheet.
.. jupyter-execute::
from qiskit import QuantumCircuit, transpile, schedule
from qiskit.visualization.pulse_v2 import draw
from qiskit.test.mock import FakeAlmaden
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
qc = transpile(qc, FakeAlmaden())
sched = schedule(qc, FakeAlmaden())
# draw
draw(sched, backend=FakeAlmaden())
Drawing with the stylesheet suited for publication.
.. jupyter-execute::
from qiskit import QuantumCircuit, transpile, schedule
from qiskit.visualization.pulse_v2 import draw, IqxPublication
from qiskit.test.mock import FakeAlmaden
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
qc = transpile(qc, FakeAlmaden())
sched = schedule(qc, FakeAlmaden())
# draw
draw(sched, style=IqxPublication(), backend=FakeAlmaden())
Drawing with the stylesheet suited for program debugging.
.. jupyter-execute::
from qiskit import QuantumCircuit, transpile, schedule
from qiskit.visualization.pulse_v2 import draw, IqxDebugging
from qiskit.test.mock import FakeAlmaden
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
qc = transpile(qc, FakeAlmaden())
sched = schedule(qc, FakeAlmaden())
# draw
draw(sched, style=IqxDebugging(), backend=FakeAlmaden())
You can partially customize a preset stylesheet when call it.
```python
my_style = {
'formatter.channel_scaling.drive': 5,
'formatter.channel_scaling.control': 1,
'formatter.channel_scaling.measure': 5
}
style = IqxStandard(**my_style)
# draw
draw(sched, style=style, backend=FakeAlmaden())
```
In the same way as above, you can create custom generator or layout functions
and update existing stylesheet with custom functions.
This feature enables you to control the most of appearance of the output image
without modifying the codebase of the pulse drawer.
Plotters:
- `mpl2d`: Matplotlib API to generate 2D image. Charts are placed along y axis with
vertical offset. This API takes matplotlib.axes.Axes as `axis` input.
Stylesheet:
- formatter.general.fig_width: Width of output image (default `13`).
- formatter.general.fig_chart_height: Height of output image per chart.
The height of each chart is multiplied with this factor and the
sum of all chart heights becomes the height of output image (default `1.5`).
- formatter.general.dpi: Dot per inch of image if `filename` is set (default `150`).
- formatter.general.vertical_resolution: Vertical resolution of the pulse envelope.
The change of data points below this limit is ignored (default `1e-6`).
- formatter.general.max_scale: Maximum scaling factor of each chart. This factor is
considered when chart auto-scaling is enabled (default `100`).
- formatter.color.fill_waveform_w: List of color codes assigned to the real and
the imaginary part envelope of waveform or parametric pulse drawing
(default `['#648fff', '#002999']`).
- formatter.color.fill_waveform_d: List of color codes assigned to the real and
the imaginary part envelope of waveforms in the drive channels
in the schedule drawing (default `['#648fff', '#002999']`).
- formatter.color.fill_waveform_u: List of color codes assigned to the real and
the imaginary part envelope of waveforms in the control channels
in the schedule drawing (default `['#ffb000', '#994A00']`).
- formatter.color.fill_waveform_m: List of color codes assigned to the real and
the imaginary part envelope of waveforms in the measure channels
in the schedule drawing (default `['#dc267f', '#760019']`).
- formatter.color.fill_waveform_a: List of color codes assigned to the real and
the imaginary part envelope of waveforms in the acquire channels
in the schedule drawing (default `['#dc267f', '#760019']`).
- formatter.color.baseline: Color code of lines of zero line of each chart
(default `'#000000'`).
- formatter.color.barrier: Color code of lines of barrier (default `'#222222'`).
- formatter.color.background: Color code of the face color of canvas
(default `'#f2f3f4'`).
- formatter.color.fig_title: Color code of the figure title text
(default `'#000000'`).
- formatter.color.annotate: Color code of annotation texts in the canvas
(default `'#222222'`).
- formatter.color.frame_change: Color code of the symbol for frame changes
(default `'#000000'`).
- formatter.color.snapshot: Color code of the symbol for snapshot
(default `'#000000'`)
- formatter.color.axis_label: Color code of axis labels (default `'#000000'`).
- formatter.alpha.fill_waveform: Transparency of waveforms. A value in the range from
`0` to `1`. The value `0` gives completely transparent waveforms (default `0.3`).
- formatter.alpha.baseline: Transparency of base lines. A value in the range from
`0` to `1`. The value `0` gives completely transparent base lines (default `1.0`).
- formatter.alpha.barrier: Transparency of barrier lines. A value in the range from
`0` to `1`. The value `0` gives completely transparent barrier lines (default `0.7`).
- formatter.layer.fill_waveform: Layer index of waveforms. Larger number comes
in the front of the output image (default `2`).
- formatter.layer.baseline: Layer index of baselines. Larger number comes
in the front of the output image (default `1`).
- formatter.layer.barrier: Layer index of barrier lines. Larger number comes
in the front of the output image (default `1`).
- formatter.layer.annotate: Layer index of annotations. Larger number comes
in the front of the output image (default `5`).
- formatter.layer.axis_label: Layer index of axis labels. Larger number comes
in the front of the output image (default `5`).
- formatter.layer.frame_change: Layer index of frame change symbols. Larger number comes
in the front of the output image (default `4`).
- formatter.layer.snapshot: Layer index of snapshot symbols. Larger number comes
in the front of the output image (default `3`).
- formatter.layer.fig_title: Layer index of the figure title. Larger number comes
in the front of the output image (default `6`).
- formatter.margin.top: Margin from the top boundary of the figure canvas to
the surface of the first chart (default `0.5`).
- formatter.margin.bottom: Margin from the bottom boundary of the figure canvas to
the surface of the last chart (default `0.5`).
- formatter.margin.left_percent: Margin from the left boundary of the figure canvas to
the zero point of the horizontal axis. The value is in units of percentage of
the whole program duration. If the duration is 100 and the value of 0.5 is set,
this keeps left margin of 5 (default `0.05`).
- formatter.margin.right_percent: Margin from the right boundary of the figure canvas to
the left limit of the horizontal axis. The value is in units of percentage of
the whole program duration. If the duration is 100 and the value of 0.5 is set,
this keeps right margin of 5 (default `0.05`).
- formatter.margin.between_channel: Vertical margin between charts (default `0.2`).
- formatter.label_offset.pulse_name: Offset of pulse name annotations from the
chart baseline (default `0.3`).
- formatter.label_offset.chart_info: Offset of chart info annotations from the
chart baseline (default `0.3`).
- formatter.label_offset.frame_change: Offset of frame change annotations from the
chart baseline (default `0.3`).
- formatter.label_offset.snapshot: Offset of snapshot annotations from the
chart baseline (default `0.3`).
- formatter.text_size.axis_label: Text size of axis labels (default `15`).
- formatter.text_size.annotate: Text size of annotations (default `12`).
- formatter.text_size.frame_change: Text size of frame change symbols (default `20`).
- formatter.text_size.snapshot: Text size of snapshot symbols (default `20`).
- formatter.text_size.fig_title: Text size of the figure title (default `15`).
- formatter.text_size.axis_break_symbol: Text size of axis break symbols (default `15`).
- formatter.line_width.fill_waveform: Line width of the fringe of filled waveforms
(default `0`).
- formatter.line_width.axis_break: Line width of axis breaks.
The axis break line paints over other drawing objects with the background
face color (default `6`).
- formatter.line_width.baseline: Line width of base lines (default `1`)
- formatter.line_width.barrier: Line width of barrier lines (default `1`).
- formatter.line_style.fill_waveform: Line style of the fringe of filled waveforms. This
conforms to the line style spec of matplotlib (default `'-'`).
- formatter.line_style.baseline: Line style of base lines. This
conforms to the line style spec of matplotlib (default `'-'`).
- formatter.line_style.barrier: Line style of barrier lines. This
conforms to the line style spec of matplotlib (default `':'`).
- formatter.channel_scaling.drive: Default scaling value of drive channel
waveforms (default `1.0`).
- formatter.channel_scaling.control: Default scaling value of control channel
waveforms (default `1.0`).
- formatter.channel_scaling.measure: Default scaling value of measure channel
waveforms (default `1.0`).
- formatter.channel_scaling.acquire: Default scaling value of acquire channel
waveforms (default `1.0`).
- formatter.channel_scaling.pos_spacing: Minimum height of chart above the baseline.
Chart top is determined based on the maximum height of waveforms associated
with the chart. If the maximum height is below this value, this value is set
as the chart top (default 0.1).
- formatter.channel_scaling.neg_spacing: Minimum height of chart below the baseline.
Chart bottom is determined based on the minimum height of waveforms associated
with the chart. If the minimum height is above this value, this value is set
as the chart bottom (default -0.1).
- formatter.axis_break.length: Waveform or idle time duration that axis break is
applied. Intervals longer than this value are truncated.
The value is in units of data points (default `3000`).
- formatter.axis_break.max_length: Length of new waveform or idle time duration
after axis break is applied. Longer intervals are truncated to this length
(default `1000`).
- formatter.control.apply_phase_modulation: Set `True` to apply phase modulation
to the waveforms (default `True`).
- formatter.control.show_snapshot_channel: Set `True` to show snapshot instructions
(default `True`).
- formatter.control.show_acquire_channel: Set `True` to show acquire channels
(default `True`).
- formatter.control.show_empty_channel: Set `True` to show charts without any waveforms
(default `True`).
- formatter.control.auto_chart_scaling: Set `True` to apply auto-scaling to charts
(default `True`).
- formatter.control.axis_break: Set `True` to apply axis break for long intervals
(default `True`).
- formatter.unicode_symbol.frame_change: Text that represents the symbol of
frame change. This text is used when the plotter doesn't support latex
(default u'\u21BA').
- formatter.unicode_symbol.snapshot: Text that represents the symbol of
snapshot. This text is used when the plotter doesn't support latex
(default u'\u21AF').
- formatter.latex_symbol.frame_change: Latex text that represents the symbol of
frame change (default r'\\circlearrowleft').
- formatter.latex_symbol.snapshot: Latex text that represents the symbol of
snapshot (default '').
- generator.waveform: List of callback functions that generates drawing object
for waveforms. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.waveform` for more details.
No default generator is set.
- generator.frame: List of callback functions that generates drawing object
for frame changes. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.frame` for more details.
No default generator is set.
- generator.chart: List of callback functions that generates drawing object
for charts. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.chart` for more details.
No default generator is set.
- generator.snapshot: List of callback functions that generates drawing object
for snapshots. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.snapshot` for more details.
No default generator is set.
- generator.barrier: List of callback functions that generates drawing object
for barriers. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.barrier` for more details.
No default generator is set.
- layout.chart_channel_map: Callback function that determines the relationship
between pulse channels and charts.
See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details.
No default layout is set.
- layout.time_axis_map: Callback function that determines the layout of
horizontal axis labels.
See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details.
No default layout is set.
- layout.figure_title: Callback function that generates a string for
the figure title.
See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details.
No default layout is set.
Raises:
ImportError: When required visualization package is not installed.
VisualizationError: When invalid plotter API is specified.
"""
temp_style = stylesheet.QiskitPulseStyle()
temp_style.update(style or stylesheet.IqxStandard())
if backend:
device = device_info.OpenPulseBackendInfo.create_from_backend(backend)
else:
device = device_info.OpenPulseBackendInfo()
# create empty canvas and load program
canvas = core.DrawerCanvas(stylesheet=temp_style, device=device)
canvas.load_program(program=program)
#
# update configuration
#
# time range
if time_range_dt:
canvas.set_time_range(*time_range_dt, seconds=False)
if time_range_ns:
canvas.set_time_range(*time_range_ns, seconds=True)
# channels not shown
if disable_channels:
for chan in disable_channels:
canvas.set_disable_channel(chan, remove=True)
# show snapshots
if not show_snapshot:
canvas.set_disable_type(types.DrawingSymbol.SNAPSHOT, remove=True)
canvas.set_disable_type(types.DrawingLabel.SNAPSHOT, remove=True)
# show frame changes
if not show_framechange:
canvas.set_disable_type(types.DrawingSymbol.FRAME, remove=True)
canvas.set_disable_type(types.DrawingLabel.FRAME, remove=True)
# show waveform info
if not show_waveform_info:
canvas.set_disable_type(types.DrawingLabel.PULSE_INFO, remove=True)
canvas.set_disable_type(types.DrawingLabel.PULSE_NAME, remove=True)
# show barrier
if not show_barrier:
canvas.set_disable_type(types.DrawingLine.BARRIER, remove=True)
canvas.update()
#
# Call plotter API and generate image
#
if plotter == types.Plotter.Mpl2D:
try:
from qiskit.visualization.pulse_v2.plotters import Mpl2DPlotter
except ImportError:
raise ImportError('Must have Matplotlib installed.')
plotter_api = Mpl2DPlotter(canvas=canvas, axis=axis)
plotter_api.draw()
else:
raise VisualizationError('Plotter API {name} is not supported.'.format(name=plotter))
# save figure
if filename:
plotter_api.save_file(filename=filename)
return plotter_api.get_image()
| def draw(program: Union[Waveform, ParametricPulse, Schedule],
style: Optional[Dict[str, Any]] = None,
backend: Optional[BaseBackend] = None,
time_range_dt: Optional[Tuple[int, int]] = None,
time_range_ns: Optional[Tuple[int, int]] = None,
disable_channels: Optional[List[PulseChannel]] = None,
show_snapshot: bool = True,
show_framechange: bool = True,
show_waveform_info: bool = True,
show_barrier: bool = True,
plotter: str = types.Plotter.Mpl2D.value(),
axis: Optional[Any] = None,
filename: Optional[str] = None):
"""Generate visualization data for pulse programs.
Args:
program: Program to visualize. This program can be arbitrary Qiskit Pulse program,
such as :py:class:~`qiskit.pulse.Waveform`, :py:class:~`qiskit.pulse.ParametricPulse`,
and :py:class:~`qiskit.pulse.Schedule`.
style: Stylesheet options. This can be dictionary or preset stylesheet classes. See
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxStandard`,
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxPublication`, and
:py:class:~`qiskit.visualization.pulse_v2.stylesheets.IqxDebugging` for details of
preset stylesheets. See also the stylesheet section for details of configuration keys.
backend: Backend object to play the input pulse program. If this object is provided,
the input program is visualized with the details of hardware information.
time_range_dt: Set horizontal axis limit in units of dt.
time_range_ns: Set horizontal axis limit in units of ns. This is available only when
`backend` object is set to the canvas.
disable_channels: List of pulse channel instances not shown in the output image.
show_snapshot: Set `True` to show snapshot instructions.
show_framechange: Set `True` to show frame change instructions. The frame change
indicates instructions that modulate phase or frequency of pulse channels.
show_waveform_info: Set `True` to show additional information about waveforms.
show_barrier: Set `True` to show barrier lines.
plotter: Name of plotter API to generate an output image.
See plotter section for details.
axis: Arbitrary object passed to the plotter. If this object is provided,
the plotters uses given `axis` instead of internally initializing a figure object.
This object format depends on the plotter. See plotters section for details.
filename: Set file path string to output image.
Returns:
Image data. The generated data format depends on the `plotter`.
If matplotlib family is specified, this will be a `matplotlib.pyplot.Figure` data.
Examples:
To visualize a pulse program, you can call this function with set of
control arguments. Most of appearance of the output image can be controlled by the
stylesheet.
Drawing with the default stylesheet.
.. jupyter-execute::
from qiskit import QuantumCircuit, transpile, schedule
from qiskit.visualization.pulse_v2 import draw
from qiskit.test.mock import FakeAlmaden
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
qc = transpile(qc, FakeAlmaden())
sched = schedule(qc, FakeAlmaden())
# draw
draw(sched, backend=FakeAlmaden())
Drawing with the stylesheet suited for publication.
.. jupyter-execute::
from qiskit import QuantumCircuit, transpile, schedule
from qiskit.visualization.pulse_v2 import draw, IqxPublication
from qiskit.test.mock import FakeAlmaden
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
qc = transpile(qc, FakeAlmaden())
sched = schedule(qc, FakeAlmaden())
# draw
draw(sched, style=IqxPublication(), backend=FakeAlmaden())
Drawing with the stylesheet suited for program debugging.
.. jupyter-execute::
from qiskit import QuantumCircuit, transpile, schedule
from qiskit.visualization.pulse_v2 import draw, IqxDebugging
from qiskit.test.mock import FakeAlmaden
qc = QuantumCircuit(2)
qc.h(0)
qc.cx(0, 1)
qc.measure_all()
qc = transpile(qc, FakeAlmaden())
sched = schedule(qc, FakeAlmaden())
# draw
draw(sched, style=IqxDebugging(), backend=FakeAlmaden())
You can partially customize a preset stylesheet when call it.
```python
my_style = {
'formatter.channel_scaling.drive': 5,
'formatter.channel_scaling.control': 1,
'formatter.channel_scaling.measure': 5
}
style = IqxStandard(**my_style)
# draw
draw(sched, style=style, backend=FakeAlmaden())
```
In the same way as above, you can create custom generator or layout functions
and update existing stylesheet with custom functions.
This feature enables you to control the most of appearance of the output image
without modifying the codebase of the pulse drawer.
Plotters:
- `mpl2d`: Matplotlib API to generate 2D image. Charts are placed along y axis with
vertical offset. This API takes matplotlib.axes.Axes as `axis` input.
Stylesheet:
- formatter.general.fig_width: Width of output image (default `13`).
- formatter.general.fig_chart_height: Height of output image per chart.
The height of each chart is multiplied with this factor and the
sum of all chart heights becomes the height of output image (default `1.5`).
- formatter.general.dpi: Dot per inch of image if `filename` is set (default `150`).
- formatter.general.vertical_resolution: Vertical resolution of the pulse envelope.
The change of data points below this limit is ignored (default `1e-6`).
- formatter.general.max_scale: Maximum scaling factor of each chart. This factor is
considered when chart auto-scaling is enabled (default `100`).
- formatter.color.fill_waveform_w: List of color codes assigned to the real and
the imaginary part envelope of waveform or parametric pulse drawing
(default `['#648fff', '#002999']`).
- formatter.color.fill_waveform_d: List of color codes assigned to the real and
the imaginary part envelope of waveforms in the drive channels
in the schedule drawing (default `['#648fff', '#002999']`).
- formatter.color.fill_waveform_u: List of color codes assigned to the real and
the imaginary part envelope of waveforms in the control channels
in the schedule drawing (default `['#ffb000', '#994A00']`).
- formatter.color.fill_waveform_m: List of color codes assigned to the real and
the imaginary part envelope of waveforms in the measure channels
in the schedule drawing (default `['#dc267f', '#760019']`).
- formatter.color.fill_waveform_a: List of color codes assigned to the real and
the imaginary part envelope of waveforms in the acquire channels
in the schedule drawing (default `['#dc267f', '#760019']`).
- formatter.color.baseline: Color code of lines of zero line of each chart
(default `'#000000'`).
- formatter.color.barrier: Color code of lines of barrier (default `'#222222'`).
- formatter.color.background: Color code of the face color of canvas
(default `'#f2f3f4'`).
- formatter.color.fig_title: Color code of the figure title text
(default `'#000000'`).
- formatter.color.annotate: Color code of annotation texts in the canvas
(default `'#222222'`).
- formatter.color.frame_change: Color code of the symbol for frame changes
(default `'#000000'`).
- formatter.color.snapshot: Color code of the symbol for snapshot
(default `'#000000'`)
- formatter.color.axis_label: Color code of axis labels (default `'#000000'`).
- formatter.alpha.fill_waveform: Transparency of waveforms. A value in the range from
`0` to `1`. The value `0` gives completely transparent waveforms (default `0.3`).
- formatter.alpha.baseline: Transparency of base lines. A value in the range from
`0` to `1`. The value `0` gives completely transparent base lines (default `1.0`).
- formatter.alpha.barrier: Transparency of barrier lines. A value in the range from
`0` to `1`. The value `0` gives completely transparent barrier lines (default `0.7`).
- formatter.layer.fill_waveform: Layer index of waveforms. Larger number comes
in the front of the output image (default `2`).
- formatter.layer.baseline: Layer index of baselines. Larger number comes
in the front of the output image (default `1`).
- formatter.layer.barrier: Layer index of barrier lines. Larger number comes
in the front of the output image (default `1`).
- formatter.layer.annotate: Layer index of annotations. Larger number comes
in the front of the output image (default `5`).
- formatter.layer.axis_label: Layer index of axis labels. Larger number comes
in the front of the output image (default `5`).
- formatter.layer.frame_change: Layer index of frame change symbols. Larger number comes
in the front of the output image (default `4`).
- formatter.layer.snapshot: Layer index of snapshot symbols. Larger number comes
in the front of the output image (default `3`).
- formatter.layer.fig_title: Layer index of the figure title. Larger number comes
in the front of the output image (default `6`).
- formatter.margin.top: Margin from the top boundary of the figure canvas to
the surface of the first chart (default `0.5`).
- formatter.margin.bottom: Margin from the bottom boundary of the figure canvas to
the surface of the last chart (default `0.5`).
- formatter.margin.left_percent: Margin from the left boundary of the figure canvas to
the zero point of the horizontal axis. The value is in units of percentage of
the whole program duration. If the duration is 100 and the value of 0.5 is set,
this keeps left margin of 5 (default `0.05`).
- formatter.margin.right_percent: Margin from the right boundary of the figure canvas to
the left limit of the horizontal axis. The value is in units of percentage of
the whole program duration. If the duration is 100 and the value of 0.5 is set,
this keeps right margin of 5 (default `0.05`).
- formatter.margin.between_channel: Vertical margin between charts (default `0.2`).
- formatter.label_offset.pulse_name: Offset of pulse name annotations from the
chart baseline (default `0.3`).
- formatter.label_offset.chart_info: Offset of chart info annotations from the
chart baseline (default `0.3`).
- formatter.label_offset.frame_change: Offset of frame change annotations from the
chart baseline (default `0.3`).
- formatter.label_offset.snapshot: Offset of snapshot annotations from the
chart baseline (default `0.3`).
- formatter.text_size.axis_label: Text size of axis labels (default `15`).
- formatter.text_size.annotate: Text size of annotations (default `12`).
- formatter.text_size.frame_change: Text size of frame change symbols (default `20`).
- formatter.text_size.snapshot: Text size of snapshot symbols (default `20`).
- formatter.text_size.fig_title: Text size of the figure title (default `15`).
- formatter.text_size.axis_break_symbol: Text size of axis break symbols (default `15`).
- formatter.line_width.fill_waveform: Line width of the fringe of filled waveforms
(default `0`).
- formatter.line_width.axis_break: Line width of axis breaks.
The axis break line paints over other drawing objects with the background
face color (default `6`).
- formatter.line_width.baseline: Line width of base lines (default `1`)
- formatter.line_width.barrier: Line width of barrier lines (default `1`).
- formatter.line_style.fill_waveform: Line style of the fringe of filled waveforms. This
conforms to the line style spec of matplotlib (default `'-'`).
- formatter.line_style.baseline: Line style of base lines. This
conforms to the line style spec of matplotlib (default `'-'`).
- formatter.line_style.barrier: Line style of barrier lines. This
conforms to the line style spec of matplotlib (default `':'`).
- formatter.channel_scaling.drive: Default scaling value of drive channel
waveforms (default `1.0`).
- formatter.channel_scaling.control: Default scaling value of control channel
waveforms (default `1.0`).
- formatter.channel_scaling.measure: Default scaling value of measure channel
waveforms (default `1.0`).
- formatter.channel_scaling.acquire: Default scaling value of acquire channel
waveforms (default `1.0`).
- formatter.channel_scaling.pos_spacing: Minimum height of chart above the baseline.
Chart top is determined based on the maximum height of waveforms associated
with the chart. If the maximum height is below this value, this value is set
as the chart top (default 0.1).
- formatter.channel_scaling.neg_spacing: Minimum height of chart below the baseline.
Chart bottom is determined based on the minimum height of waveforms associated
with the chart. If the minimum height is above this value, this value is set
as the chart bottom (default -0.1).
- formatter.axis_break.length: Waveform or idle time duration that axis break is
applied. Intervals longer than this value are truncated.
The value is in units of data points (default `3000`).
- formatter.axis_break.max_length: Length of new waveform or idle time duration
after axis break is applied. Longer intervals are truncated to this length
(default `1000`).
- formatter.control.apply_phase_modulation: Set `True` to apply phase modulation
to the waveforms (default `True`).
- formatter.control.show_snapshot_channel: Set `True` to show snapshot instructions
(default `True`).
- formatter.control.show_acquire_channel: Set `True` to show acquire channels
(default `True`).
- formatter.control.show_empty_channel: Set `True` to show charts without any waveforms
(default `True`).
- formatter.control.auto_chart_scaling: Set `True` to apply auto-scaling to charts
(default `True`).
- formatter.control.axis_break: Set `True` to apply axis break for long intervals
(default `True`).
- formatter.unicode_symbol.frame_change: Text that represents the symbol of
frame change. This text is used when the plotter doesn't support latex
(default u'\u21BA').
- formatter.unicode_symbol.snapshot: Text that represents the symbol of
snapshot. This text is used when the plotter doesn't support latex
(default u'\u21AF').
- formatter.latex_symbol.frame_change: Latex text that represents the symbol of
frame change (default r'\\circlearrowleft').
- formatter.latex_symbol.snapshot: Latex text that represents the symbol of
snapshot (default '').
- generator.waveform: List of callback functions that generates drawing object
for waveforms. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.waveform` for more details.
No default generator is set.
- generator.frame: List of callback functions that generates drawing object
for frame changes. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.frame` for more details.
No default generator is set.
- generator.chart: List of callback functions that generates drawing object
for charts. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.chart` for more details.
No default generator is set.
- generator.snapshot: List of callback functions that generates drawing object
for snapshots. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.snapshot` for more details.
No default generator is set.
- generator.barrier: List of callback functions that generates drawing object
for barriers. Arbitrary callback functions satisfying the generator format
can be set here. There are some default generators in the pulse drawer.
See :py:mod:~`qiskit.visualization.pulse_v2.generators.barrier` for more details.
No default generator is set.
- layout.chart_channel_map: Callback function that determines the relationship
between pulse channels and charts.
See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details.
No default layout is set.
- layout.time_axis_map: Callback function that determines the layout of
horizontal axis labels.
See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details.
No default layout is set.
- layout.figure_title: Callback function that generates a string for
the figure title.
See :py:mod:~`qiskit.visualization.pulse_v2.layout` for more details.
No default layout is set.
Raises:
ImportError: When required visualization package is not installed.
VisualizationError: When invalid plotter API is specified.
"""
temp_style = stylesheet.QiskitPulseStyle()
temp_style.update(style or stylesheet.IqxStandard())
if backend:
device = device_info.OpenPulseBackendInfo.create_from_backend(backend)
else:
device = device_info.OpenPulseBackendInfo()
# create empty canvas and load program
canvas = core.DrawerCanvas(stylesheet=temp_style, device=device)
canvas.load_program(program=program)
#
# update configuration
#
# time range
if time_range_dt:
canvas.set_time_range(*time_range_dt, seconds=False)
if time_range_ns:
canvas.set_time_range(*time_range_ns, seconds=True)
# channels not shown
if disable_channels:
for chan in disable_channels:
canvas.set_disable_channel(chan, remove=True)
# show snapshots
if not show_snapshot:
canvas.set_disable_type(types.DrawingSymbol.SNAPSHOT, remove=True)
canvas.set_disable_type(types.DrawingLabel.SNAPSHOT, remove=True)
# show frame changes
if not show_framechange:
canvas.set_disable_type(types.DrawingSymbol.FRAME, remove=True)
canvas.set_disable_type(types.DrawingLabel.FRAME, remove=True)
# show waveform info
if not show_waveform_info:
canvas.set_disable_type(types.DrawingLabel.PULSE_INFO, remove=True)
canvas.set_disable_type(types.DrawingLabel.PULSE_NAME, remove=True)
# show barrier
if not show_barrier:
canvas.set_disable_type(types.DrawingLine.BARRIER, remove=True)
canvas.update()
#
# Call plotter API and generate image
#
if plotter == types.Plotter.Mpl2D:
try:
from qiskit.visualization.pulse_v2.plotters import Mpl2DPlotter
except ImportError:
raise ImportError('Must have Matplotlib installed.')
plotter_api = Mpl2DPlotter(canvas=canvas, axis=axis)
plotter_api.draw()
else:
raise VisualizationError('Plotter API {name} is not supported.'.format(name=plotter))
# save figure
if filename:
plotter_api.save_file(filename=filename)
return plotter_api.get_image()
|
16,341 | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return switches controlled by shell commands."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
devices = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
icon_template = device_config.get(CONF_ICON_TEMPLATE)
value_template = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
CommandSwitch(
hass,
object_id,
device_config.get(CONF_FRIENDLY_NAME, object_id),
device_config[CONF_COMMAND_ON],
device_config[CONF_COMMAND_OFF],
device_config.get(CONF_COMMAND_STATE),
icon_template,
value_template,
device_config[CONF_COMMAND_TIMEOUT],
)
)
if not switches:
_LOGGER.error("No switches added")
return False
add_entities(switches)
| def setup_platform(hass, config, add_entities, discovery_info=None):
"""Find and return switches controlled by shell commands."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
devices = config.get(CONF_SWITCHES, {})
switches = []
for object_id, device_config in devices.items():
icon_template = device_config.get(CONF_ICON_TEMPLATE)
value_template = device_config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
switches.append(
CommandSwitch(
hass,
object_id,
device_config.get(CONF_FRIENDLY_NAME, object_id),
device_config[CONF_COMMAND_ON],
device_config[CONF_COMMAND_OFF],
device_config.get(CONF_COMMAND_STATE),
device_config.get(CONF_ICON_TEMPLATE),
value_template,
device_config[CONF_COMMAND_TIMEOUT],
)
)
if not switches:
_LOGGER.error("No switches added")
return False
add_entities(switches)
|
17,398 | def _get_engine_from_magic_number(filename_or_obj):
# check byte header to determine file type
if isinstance(filename_or_obj, bytes):
magic_number = filename_or_obj[:8]
else:
if filename_or_obj.tell() != 0:
raise ValueError(
"file-like object read/write pointer not at zero "
"please close and reopen, or use a context "
"manager"
)
magic_number = filename_or_obj.read(8)
filename_or_obj.seek(0)
if magic_number.startswith(b"CDF"):
engine = "scipy"
elif magic_number.startswith(b"\211HDF\r\n\032\n"):
engine = "h5netcdf"
else:
raise ValueError(
"{} is not the signature of any supported file format "
"did you mean to pass a string for a path instead?".format(magic_number)
)
return engine
| def _get_engine_from_magic_number(filename_or_obj):
# check byte header to determine file type
if isinstance(filename_or_obj, bytes):
magic_number = filename_or_obj[:8]
else:
if filename_or_obj.tell() != 0:
raise ValueError(
"file-like object read/write pointer not at zero "
"please close and reopen, or use a context "
"manager"
)
magic_number = filename_or_obj.read(8)
filename_or_obj.seek(0)
if magic_number.startswith(b"CDF"):
engine = "scipy"
elif magic_number.startswith(b"\211HDF\r\n\032\n"):
engine = "h5netcdf"
else:
raise ValueError(
f"{magic_number} is not the signature of any supported file format "
"did you mean to pass a string for a path instead?".format(magic_number)
)
return engine
|
6,819 | def send_one(email, smtpserver=None, auto_commit=True, now=False, from_test=False):
'''Send Email Queue with given smtpserver'''
email = frappe.db.sql('''select
name, status, communication, message, sender, reference_doctype,
reference_name, unsubscribe_param, unsubscribe_method, expose_recipients,
show_as_cc, add_unsubscribe_link, attachments, retry
from
`tabEmail Queue`
where
name=%s
for update''', email, as_dict=True)[0]
recipients_list = frappe.db.sql('''select name, recipient, status from
`tabEmail Queue Recipient` where parent=%s''',email.name,as_dict=1)
if frappe.are_emails_muted():
frappe.msgprint(_("Emails are muted"))
return
if cint(frappe.defaults.get_defaults().get("hold_queue"))==1 :
return
if email.status not in ('Not Sent','Partially Sent') :
# rollback to release lock and return
frappe.db.rollback()
return
frappe.db.sql("""update `tabEmail Queue` set status='Sending', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
try:
if not frappe.flags.in_test:
if not smtpserver: smtpserver = SMTPServer(append_to=email.reference_doctype, sender=email.sender)
smtpserver.setup_email_account(append_to=email.reference_doctype, sender=email.sender)
for recipient in recipients_list:
if recipient.status != "Not Sent":
continue
message = prepare_message(email, recipient.recipient, recipients_list)
if not frappe.flags.in_test:
smtpserver.sess.sendmail(email.sender, recipient.recipient, message)
recipient.status = "Sent"
frappe.db.sql("""update `tabEmail Queue Recipient` set status='Sent', modified=%s where name=%s""",
(now_datetime(), recipient.name), auto_commit=auto_commit)
#if all are sent set status
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Error', error=%s
where name=%s""", ("No recipients to send to", email.name), auto_commit=auto_commit)
if frappe.flags.in_test:
frappe.flags.sent_mail = message
return
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
except (smtplib.SMTPServerDisconnected,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
smtplib.SMTPAuthenticationError,
smtplib.SMTPRecipientsRefused,
JobTimeoutException):
# bad connection/timeout, retry later
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Partially Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Not Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
# no need to attempt further
return
except Exception as e:
frappe.db.rollback()
if email.retry < 3:
frappe.db.sql("""update `tabEmail Queue` set status='Not Sent', modified=%s, retry=retry+1 where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Partially Errored', error=%s where name=%s""",
(text_type(e), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Error', error=%s
where name=%s""", (text_type(e), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
if now:
print(frappe.get_traceback())
raise e
else:
# log to Error Log
log('frappe.email.queue.flush', text_type(e))
| def send_one(email, smtpserver=None, auto_commit=True, now=False, from_test=False):
'''Send Email Queue with given smtpserver'''
email = frappe.db.sql('''select
name, status, communication, message, sender, reference_doctype,
reference_name, unsubscribe_param, unsubscribe_method, expose_recipients,
show_as_cc, add_unsubscribe_link, attachments, retry
from
`tabEmail Queue`
where
name=%s
for update''', email, as_dict=True)[0]
recipients_list = frappe.db.sql('''select name, recipient, status from
`tabEmail Queue Recipient` where parent=%s''',email.name,as_dict=1)
if frappe.are_emails_muted():
frappe.msgprint(_("Emails are muted"))
return
if cint(frappe.defaults.get_defaults().get("hold_queue"))==1 :
return
if email.status not in ('Not Sent','Partially Sent') :
# rollback to release lock and return
frappe.db.rollback()
return
frappe.db.sql("""update `tabEmail Queue` set status='Sending', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
try:
if not frappe.flags.in_test:
if not smtpserver: smtpserver = SMTPServer()
smtpserver.setup_email_account(append_to=email.reference_doctype, sender=email.sender)
for recipient in recipients_list:
if recipient.status != "Not Sent":
continue
message = prepare_message(email, recipient.recipient, recipients_list)
if not frappe.flags.in_test:
smtpserver.sess.sendmail(email.sender, recipient.recipient, message)
recipient.status = "Sent"
frappe.db.sql("""update `tabEmail Queue Recipient` set status='Sent', modified=%s where name=%s""",
(now_datetime(), recipient.name), auto_commit=auto_commit)
#if all are sent set status
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Error', error=%s
where name=%s""", ("No recipients to send to", email.name), auto_commit=auto_commit)
if frappe.flags.in_test:
frappe.flags.sent_mail = message
return
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
except (smtplib.SMTPServerDisconnected,
smtplib.SMTPConnectError,
smtplib.SMTPHeloError,
smtplib.SMTPAuthenticationError,
smtplib.SMTPRecipientsRefused,
JobTimeoutException):
# bad connection/timeout, retry later
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Partially Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Not Sent', modified=%s where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
# no need to attempt further
return
except Exception as e:
frappe.db.rollback()
if email.retry < 3:
frappe.db.sql("""update `tabEmail Queue` set status='Not Sent', modified=%s, retry=retry+1 where name=%s""",
(now_datetime(), email.name), auto_commit=auto_commit)
else:
if any("Sent" == s.status for s in recipients_list):
frappe.db.sql("""update `tabEmail Queue` set status='Partially Errored', error=%s where name=%s""",
(text_type(e), email.name), auto_commit=auto_commit)
else:
frappe.db.sql("""update `tabEmail Queue` set status='Error', error=%s
where name=%s""", (text_type(e), email.name), auto_commit=auto_commit)
if email.communication:
frappe.get_doc('Communication', email.communication).set_delivery_status(commit=auto_commit)
if now:
print(frappe.get_traceback())
raise e
else:
# log to Error Log
log('frappe.email.queue.flush', text_type(e))
|
5,261 | def _extract_tokens(lemmas, scores, ratio, words):
"""Extracts tokens from provided lemmas. Most scored lemmas are used if `words` not provided.
Parameters
----------
lemmas : list of str
Given lemmas.
scores : dict
Dictionary with lemmas and its scores.
ratio : float
Proportion of lemmas used for final result.
words : int
Number of used words. If no "words" option is selected, the number of
sentences is reduced by the provided ratio, else, the ratio is ignored.
Returns
-------
list of (float, str)
Scores and corresponded lemmas.
"""
lemmas.sort(key=lambda s: scores[s], reverse=True)
length = len(lemmas) * ratio if words is None else words if words <= len(lemmas) else len(lemmas)
return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]
| def _extract_tokens(lemmas, scores, ratio, words):
"""Extracts tokens from provided lemmas. Most scored lemmas are used if `words` not provided.
Parameters
----------
lemmas : list of str
Given lemmas.
scores : dict
Dictionary with lemmas and its scores.
ratio : float
Proportion of lemmas used for final result.
words : int
Number of used words. If no "words" option is selected, the number of
sentences is reduced by the provided ratio, else, the ratio is ignored.
Returns
-------
list of (float, str)
Scores and corresponded lemmas.
"""
lemmas.sort(key=lambda s: scores[s], reverse=True)
length = len(lemmas) * ratio if words is None else min(words, len(lemmas))
return [(scores[lemmas[i]], lemmas[i],) for i in range(int(length))]
|
48,412 | def main():
module = AnsibleModule(
argument_spec=dict(
project_id=dict(required=True),
instance_id=dict(required=True),
),
supports_check_mode=True
)
# Get parameters
project_id = module.params.get('project_id')
instance_id = module.params.get('instance_id')
project = ""
instance = ""
result = ""
# Connect to OVH API
client = ovh.Client()
if not HAS_OVH:
module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
# Check that the instance exists
try:
project = client.get('/cloud/project/{0}'.format(project_id))
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='project {0} does not exist'.format(project_id))
# Check that the instance exists
try:
instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
# Is monthlyBilling already enabled or pending ?
if instance['monthlyBilling'] is not None:
if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
module.exit_json(changed=False, result=instance['monthlyBilling'])
if module.check_mode:
module.exit_json(changed=True, msg="Dry Run!")
else:
try:
result = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
module.exit_json(changed=True, result=result['monthlyBilling'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
# We should never reach here
module.fail_json(msg='Internal ovh_monthly_billing module error')
| def main():
module = AnsibleModule(
argument_spec=dict(
project_id=dict(required=True),
instance_id=dict(required=True),
),
supports_check_mode=True
)
# Get parameters
project_id = module.params.get('project_id')
instance_id = module.params.get('instance_id')
project = ""
instance = ""
result = ""
# Connect to OVH API
client = ovh.Client()
if not HAS_OVH:
module.fail_json(msg='python-ovh is required to run this module, see https://github.com/ovh/python-ovh')
# Check that the instance exists
try:
project = client.get('/cloud/project/{0}'.format(project_id))
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='project {0} does not exist'.format(project_id))
# Check that the instance exists
try:
instance = client.get('/cloud/project/{0}/instance/{1}'.format(project_id, instance_id))
except ovh.exceptions.ResourceNotFoundError:
module.fail_json(msg='instance {0} does not exist in project {1}'.format(instance_id, project_id))
# Is monthlyBilling already enabled or pending ?
if instance['monthlyBilling'] is not None:
if instance['monthlyBilling']['status'] in ['ok', 'activationPending']:
module.exit_json(changed=False, result=instance['monthlyBilling'])
if module.check_mode:
module.exit_json(changed=True, msg="Dry Run!")
else:
try:
result = client.post('/cloud/project/{0}/instance/{1}/activeMonthlyBilling'.format(project_id, instance_id))
module.exit_json(changed=True, result=result['monthlyBilling'])
except APIError as apiError:
module.fail_json(changed=False, msg="Failed to call OVH API: {0}".format(apiError))
if not HAS_OVH:
self.fail_json(msg=missing_required_lib('python-ovh'), exception=OVH_IMPORT_ERROR)
# We should never reach here
module.fail_json(msg='Internal ovh_monthly_billing module error')
|
8,244 | def extract_along_coord(smap, coord):
"""
Return the value of the image array at every point along the coordinate.
For a given coordinate ``coord``, find all the pixels that cross the coordinate
and extract the values of the image array in ``smap`` at these points. This is done by applying
`Bresenham's line algorithm <http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm>`_
between the consecutive pairs of points in the coordinate and then indexing the data
array of ``smap`` at those points.
Parameters
----------
smap : `~sunpy.map.GenericMap`
coord : `~astropy.coordinates.SkyCoord`
Coordinate along which to extract intensity
Returns
-------
intensity : `~astropy.units.Quantity`
loop_coord : `~astropy.coordinates.SkyCoord`
"""
if not len(coord.shape) or coord.shape[0] < 2:
raise ValueError('At least two points are required for extracting intensity along a '
'line. To extract points at single coordinates, use '
'sunpy.map.maputils.sample_at_coords.')
if not all(contains_coordinate(smap, coord)):
raise ValueError('At least one coordinate is not within the bounds of the map.'
'To extract the intensity along a coordinate, all points must fall within '
'the bounds of the map.')
# Find pixels between each loop segment
px, py = smap.wcs.world_to_array_index(coord)
pix = []
for i in range(len(px)-1):
b = _bresenham(px[i], py[i], px[i+1], py[i+1])
# Pop the last one, unless this is the final entry because the first point
# of the next section will be the same
if i < (len(px) - 2):
b = b[:-1]
pix.append(b)
pix = np.vstack(pix)
intensity = u.Quantity(smap.data[pix[:, 0], pix[:, 1]], smap.unit)
coord_new = smap.pixel_to_world(pix[:, 1]*u.pix, pix[:, 0]*u.pix)
return intensity, coord_new
| def extract_along_coord(smap, coord):
"""
Return the value of the image array at every point along the coordinate.
For a given coordinate ``coord``, find all the pixels that cross the coordinate
and extract the values of the image array in ``smap`` at these points. This is done by applying
`Bresenham's line algorithm <http://en.wikipedia.org/wiki/Bresenham%27s_line_algorithm>`_
between the consecutive pairs of points in the coordinate and then indexing the data
array of ``smap`` at those points.
Parameters
----------
smap : `~sunpy.map.GenericMap`
coord : `~astropy.coordinates.SkyCoord`
Coordinate along which to extract intensity
Returns
-------
values : `~astropy.units.Quantity`
loop_coord : `~astropy.coordinates.SkyCoord`
"""
if not len(coord.shape) or coord.shape[0] < 2:
raise ValueError('At least two points are required for extracting intensity along a '
'line. To extract points at single coordinates, use '
'sunpy.map.maputils.sample_at_coords.')
if not all(contains_coordinate(smap, coord)):
raise ValueError('At least one coordinate is not within the bounds of the map.'
'To extract the intensity along a coordinate, all points must fall within '
'the bounds of the map.')
# Find pixels between each loop segment
px, py = smap.wcs.world_to_array_index(coord)
pix = []
for i in range(len(px)-1):
b = _bresenham(px[i], py[i], px[i+1], py[i+1])
# Pop the last one, unless this is the final entry because the first point
# of the next section will be the same
if i < (len(px) - 2):
b = b[:-1]
pix.append(b)
pix = np.vstack(pix)
intensity = u.Quantity(smap.data[pix[:, 0], pix[:, 1]], smap.unit)
coord_new = smap.pixel_to_world(pix[:, 1]*u.pix, pix[:, 0]*u.pix)
return intensity, coord_new
|
17,695 | def _push(dspath, content, target, data, force, jobs, res_kwargs, pbars,
got_path_arg=False):
force_git_push = force in ('all', 'gitpush')
# nothing recursive in here, we only need a repo to work with
ds = Dataset(dspath)
repo = ds.repo
res_kwargs.update(type='dataset', path=dspath)
# content will be unique for every push (even on the same dataset)
pbar_id = 'push-{}-{}'.format(target, id(content))
# register for final orderly take down
pbars[pbar_id] = ds
log_progress(
lgr.info, pbar_id,
'Determine push target',
unit=' Steps',
label='Push',
total=4,
)
#
# First we must figure out where to push to, if needed
#
# will contain info to determine what refspecs need to be pushed
# and to which remote, if none is given
wannabe_gitpush = None
# pristine input arg
_target = target
# verified or auto-detected target sibling name
target, status, message, wannabe_gitpush = _get_push_target(repo, target)
if target is None:
yield dict(
res_kwargs,
status=status,
message=message,
)
return
log_progress(
lgr.info, pbar_id, "Push refspecs",
label="Push to '{}'".format(target), update=1, total=4)
# cache repo type
is_annex_repo = isinstance(ds.repo, AnnexRepo)
# handling pure special remotes is a lot simpler
target_is_git_remote = repo.config.get(
f'remote.{target}.url', None) is not None
# TODO would is be useful to also check whether the
# target is set 'annex-ignore' right here?
if target_is_git_remote:
# branch and refspec only need handling for Git remotes
refspecs2push = _get_refspecs2push(
repo, is_annex_repo, target, target_arg=_target,
wannabe_gitpush=wannabe_gitpush)
if not refspecs2push:
# nothing was set up for push, and we have no active branch
# this is a weird one, let's confess and stop here
# I don't think we need to support such a scenario
yield dict(
res_kwargs,
status='impossible',
message=
'There is no active branch, cannot determine remote '
'branch'
)
return
#
# We know where to push to, honor dependencies
# XXX we could do this right after we know the value of `target`,
# but this owuld mean we would also push to dependencies
# even when no actual push to the primary target is needed
#
# list of remotes that are publication dependencies for the
# target remote
publish_depends = ensure_list(ds.config.get(
f'remote.{target}.datalad-publish-depends', []))
if publish_depends:
lgr.debug("Discovered publication dependencies for '%s': %s'",
target, publish_depends)
# we know what to push and where, now dependency processing first
for r in publish_depends:
# simply make a call to this function again, all the same, but
# target is different
# TODO: what if a publication dependency doesn't have any of the
# determined refspecs2push, yet. Should we not attempt to push them,
# because the main target has it?
yield from _push(
dspath,
content,
# to this particular dependency
r,
data,
force,
jobs,
res_kwargs.copy(),
pbars,
got_path_arg=got_path_arg,
)
# and lastly the primary push target
# git-annex data copy
#
if is_annex_repo:
if data != "nothing":
log_progress(
lgr.info, pbar_id, "Transfer data",
label="Transfer data to '{}'".format(target), update=2, total=4)
yield from _transfer_data(
repo,
ds,
target,
content,
data,
force,
jobs,
res_kwargs.copy(),
got_path_arg=got_path_arg,
)
else:
lgr.debug("Data transfer to '%s' disabled by argument", target)
else:
lgr.debug("No data transfer: %s is not a git annex repository", repo)
if not target_is_git_remote or not refspecs2push:
# there is nothing that we need to push or sync with on the git-side
# of things with this remote
lgr.debug('No git-remote or no refspecs found that need to be pushed')
# TODO ensure progress bar is ended properly
return
log_progress(
lgr.info, pbar_id, "Update availability information",
label="Update availability for '{}'".format(target), update=3, total=4)
# TODO fetch is only needed if anything was actually transferred. Collect this
# info and make the following conditional on it
# after file transfer the remote might have different commits to
# the annex branch. They have to be merged locally, otherwise a
# push of it further down will fail
_sync_remote_annex_branch(repo, target, is_annex_repo)
# and push all relevant branches, plus the git-annex branch to announce
# local availability info too
yield from _push_refspecs(
repo,
target,
refspecs2push,
force_git_push,
res_kwargs.copy(),
)
| def _push(dspath, content, target, data, force, jobs, res_kwargs, pbars,
got_path_arg=False):
force_git_push = force in ('all', 'gitpush')
# nothing recursive in here, we only need a repo to work with
ds = Dataset(dspath)
repo = ds.repo
res_kwargs.update(type='dataset', path=dspath)
# content will be unique for every push (even on the same dataset)
pbar_id = 'push-{}-{}'.format(target, id(content))
# register for final orderly take down
pbars[pbar_id] = ds
log_progress(
lgr.info, pbar_id,
'Determine push target',
unit=' Steps',
label='Push',
total=4,
)
#
# First we must figure out where to push to, if needed
#
# will contain info to determine what refspecs need to be pushed
# and to which remote, if none is given
wannabe_gitpush = None
# pristine input arg
_target = target
# verified or auto-detected target sibling name
target, status, message, wannabe_gitpush = _get_push_target(repo, target)
if target is None:
yield dict(
res_kwargs,
status=status,
message=message,
)
return
log_progress(
lgr.info, pbar_id, "Push refspecs",
label="Push to '{}'".format(target), update=1, total=4)
# cache repo type
is_annex_repo = isinstance(ds.repo, AnnexRepo)
# handling pure special remotes is a lot simpler
target_is_git_remote = repo.config.get(
f'remote.{target}.url', None) is not None
# TODO would is be useful to also check whether the
# target is set 'annex-ignore' right here?
if target_is_git_remote:
# branch and refspec only need handling for Git remotes
refspecs2push = _get_refspecs2push(
repo, is_annex_repo, target, target_arg=_target,
wannabe_gitpush=wannabe_gitpush)
if not refspecs2push:
# nothing was set up for push, and we have no active branch
# this is a weird one, let's confess and stop here
# I don't think we need to support such a scenario
yield dict(
res_kwargs,
status='impossible',
message=
'There is no active branch, cannot determine remote '
'branch'
)
return
#
# We know where to push to, honor dependencies
# XXX we could do this right after we know the value of `target`,
# but this would mean we would also push to dependencies
# even when no actual push to the primary target is needed
#
# list of remotes that are publication dependencies for the
# target remote
publish_depends = ensure_list(ds.config.get(
f'remote.{target}.datalad-publish-depends', []))
if publish_depends:
lgr.debug("Discovered publication dependencies for '%s': %s'",
target, publish_depends)
# we know what to push and where, now dependency processing first
for r in publish_depends:
# simply make a call to this function again, all the same, but
# target is different
# TODO: what if a publication dependency doesn't have any of the
# determined refspecs2push, yet. Should we not attempt to push them,
# because the main target has it?
yield from _push(
dspath,
content,
# to this particular dependency
r,
data,
force,
jobs,
res_kwargs.copy(),
pbars,
got_path_arg=got_path_arg,
)
# and lastly the primary push target
# git-annex data copy
#
if is_annex_repo:
if data != "nothing":
log_progress(
lgr.info, pbar_id, "Transfer data",
label="Transfer data to '{}'".format(target), update=2, total=4)
yield from _transfer_data(
repo,
ds,
target,
content,
data,
force,
jobs,
res_kwargs.copy(),
got_path_arg=got_path_arg,
)
else:
lgr.debug("Data transfer to '%s' disabled by argument", target)
else:
lgr.debug("No data transfer: %s is not a git annex repository", repo)
if not target_is_git_remote or not refspecs2push:
# there is nothing that we need to push or sync with on the git-side
# of things with this remote
lgr.debug('No git-remote or no refspecs found that need to be pushed')
# TODO ensure progress bar is ended properly
return
log_progress(
lgr.info, pbar_id, "Update availability information",
label="Update availability for '{}'".format(target), update=3, total=4)
# TODO fetch is only needed if anything was actually transferred. Collect this
# info and make the following conditional on it
# after file transfer the remote might have different commits to
# the annex branch. They have to be merged locally, otherwise a
# push of it further down will fail
_sync_remote_annex_branch(repo, target, is_annex_repo)
# and push all relevant branches, plus the git-annex branch to announce
# local availability info too
yield from _push_refspecs(
repo,
target,
refspecs2push,
force_git_push,
res_kwargs.copy(),
)
|
54,061 | def test_jumping_knowledge():
num_nodes, channels, num_layers = 100, 17, 5
xs = list([torch.randn(num_nodes, channels) for _ in range(num_layers)])
model = JumpingKnowledge('cat')
assert model.__repr__() == 'JumpingKnowledge(cat)'
out = model(xs)
assert out.size() == (num_nodes, channels * num_layers)
if is_full_test():
jit = torch.jit.script(model)
assert torch.allclose(jit(xs), out)
model = JumpingKnowledge('max')
assert model.__repr__() == 'JumpingKnowledge(max)'
out = model(xs)
assert out.size() == (num_nodes, channels)
if is_full_test():
jit = torch.jit.script(model)
assert torch.allclose(jit(xs), out)
model = JumpingKnowledge('lstm', channels, num_layers)
assert model.__repr__() == 'JumpingKnowledge(lstm, Channels: ' +\
f'{channels}, Layers: {num_layers})'
out = model(xs)
assert out.size() == (num_nodes, channels)
if is_full_test():
jit = torch.jit.script(model)
assert torch.allclose(jit(xs), out)
| def test_jumping_knowledge():
num_nodes, channels, num_layers = 100, 17, 5
xs = list([torch.randn(num_nodes, channels) for _ in range(num_layers)])
model = JumpingKnowledge('cat')
assert model.__repr__() == 'JumpingKnowledge(cat)'
out = model(xs)
assert out.size() == (num_nodes, channels * num_layers)
if is_full_test():
jit = torch.jit.script(model)
assert torch.allclose(jit(xs), out)
model = JumpingKnowledge('max')
assert model.__repr__() == 'JumpingKnowledge(max)'
out = model(xs)
assert out.size() == (num_nodes, channels)
if is_full_test():
jit = torch.jit.script(model)
assert torch.allclose(jit(xs), out)
model = JumpingKnowledge('lstm', channels, num_layers)
assert model.__repr__() == (f'JumpingKnowledge(lstm, channels='
f'{channels}, layers={num_layers})')
out = model(xs)
assert out.size() == (num_nodes, channels)
if is_full_test():
jit = torch.jit.script(model)
assert torch.allclose(jit(xs), out)
|
43,920 | def primitive_norm(l, alpha):
r"""Compute the normalization constant for a primitive Gaussian function.
A Gaussian function is defined as
.. math::
G = x^l y^m z^n e^{-\alpha r^2},
where :math:`l = (l, m, n)` defines the angular momentum quantum numbers, :math:`\alpha`
is the exponent and :math:`r = (x, y, z)` determines the center of the function. The
normalization constant for this function is computed as
.. math::
N(l, \alpha) = (\frac{2\alpha}{\pi})^{3/4} \frac{(4 \alpha)^{(l_x + l_y + l_z)/2}}
{(2\l_x-1)!! (2\l_y-1)!! (2\l_z-1)!!)^{1/2}}.
Args:
l (tuple[int]): angular momentum quantum numbers of the basis function
alpha (array[float]): exponent of the primitive Gaussian function
Returns:
n (array[float]): normalization coefficient
**Example**
>>> l = (0, 0, 0)
>>> alpha = np.array([3.425250914])
>>> n = gaussian_norm(l, alpha)
>>> print(n)
array([1.79444183])
"""
lx, ly, lz = l
n = (
(2 * alpha / np.pi) ** 0.75
* (4 * alpha) ** (sum(l) / 2)
/ anp.sqrt(fac2(2 * lx - 1) * fac2(2 * ly - 1) * fac2(2 * lz - 1))
)
return n
| def primitive_norm(l, alpha):
r"""Compute the normalization constant for a primitive Gaussian function.
A Gaussian function is defined as
.. math::
G = x^l y^m z^n e^{-\alpha r^2},
where :math:`l = (l, m, n)` defines the angular momentum quantum numbers, :math:`\alpha`
is the exponent. The
normalization constant for this function is computed as
.. math::
N(l, \alpha) = (\frac{2\alpha}{\pi})^{3/4} \frac{(4 \alpha)^{(l_x + l_y + l_z)/2}}
{(2\l_x-1)!! (2\l_y-1)!! (2\l_z-1)!!)^{1/2}}.
Args:
l (tuple[int]): angular momentum quantum numbers of the basis function
alpha (array[float]): exponent of the primitive Gaussian function
Returns:
n (array[float]): normalization coefficient
**Example**
>>> l = (0, 0, 0)
>>> alpha = np.array([3.425250914])
>>> n = gaussian_norm(l, alpha)
>>> print(n)
array([1.79444183])
"""
lx, ly, lz = l
n = (
(2 * alpha / np.pi) ** 0.75
* (4 * alpha) ** (sum(l) / 2)
/ anp.sqrt(fac2(2 * lx - 1) * fac2(2 * ly - 1) * fac2(2 * lz - 1))
)
return n
|
45,556 | def update_auth(c, config):
"""
Set auth related configuration from YAML config file.
As an example, this function should update the following TLJH auth
configuration:
```yaml
auth:
type: oauthenticator.github.GitHubOAuthenticator
GitHubOAuthenticator:
client_id: "..."
client_secret: "..."
oauth_callback_url: "..."
ArbitraryKey:
arbitrary_key: "..."
arbitrary_key_with_none_value:
```
by applying the following configuration:
```python
c.JupyterHub.authenticator_class = "oauthenticator.github.GitHubOAuthenticator"
c.GitHubOAuthenticator.client_id = "..."
c.GitHubOAuthenticator.client_secret = "..."
c.GitHubOAuthenticator.oauth_callback_url = "..."
c.ArbitraryKey.arbitrary_key = "..."
```
Note that "auth.type" and "auth.ArbitraryKey.arbitrary_key_with_none_value"
are treated a bit differently. auth.type will always map to
c.JupyterHub.authenticator_class and any configured value being None won't
be set.
"""
tljh_auth_config = config['auth']
c.JupyterHub.authenticator_class = tljh_auth_config['type']
for auth_key, auth_value in tljh_auth_config.items():
if auth_key == "type":
continue
traitlet_class_name = auth_key
traitlet_class_config = auth_value
traitlet_class_instance = getattr(c, traitlet_class_name)
for config_name, config_value in traitlet_class_config.items():
set_if_not_none(traitlet_class_instance, config_name, config_value)
| def update_auth(c, config):
"""
Set auth related configuration from YAML config file.
As an example, this function should update the following TLJH auth
configuration:
```yaml
auth:
type: oauthenticator.github.GitHubOAuthenticator
GitHubOAuthenticator:
client_id: "..."
client_secret: "..."
oauth_callback_url: "..."
ArbitraryKey:
arbitrary_key: "..."
arbitrary_key_with_none_value:
```
by applying the following configuration:
```python
c.JupyterHub.authenticator_class = "oauthenticator.github.GitHubOAuthenticator"
c.GitHubOAuthenticator.client_id = "..."
c.GitHubOAuthenticator.client_secret = "..."
c.GitHubOAuthenticator.oauth_callback_url = "..."
c.ArbitraryKey.arbitrary_key = "..."
```
Note that "auth.type" and "auth.ArbitraryKey.arbitrary_key_with_none_value"
are treated a bit differently. auth.type will always map to
c.JupyterHub.authenticator_class and any configured value being None won't
be set.
"""
tljh_auth_config = config['auth']
c.JupyterHub.authenticator_class = tljh_auth_config['type']
for auth_key, auth_value in tljh_auth_config.items():
if not (auth_key[0] == auth_key[0].upper() and isinstance(auth_value, dict)):
continue
traitlet_class_name = auth_key
traitlet_class_config = auth_value
traitlet_class_instance = getattr(c, traitlet_class_name)
for config_name, config_value in traitlet_class_config.items():
set_if_not_none(traitlet_class_instance, config_name, config_value)
|
36,634 | def install(domain, localedir=None, names=None):
t = translation(domain, localedir, fallback=True)
t.install(names)
| def install(domain, localedir=None, *, names=None):
t = translation(domain, localedir, fallback=True)
t.install(names)
|
56,667 | def sync_completed_sponsored_books():
from internetarchive import search_items
params = {'page': 1, 'rows': 1000, 'scope': 'all'}
fields = ['identifier', 'openlibrary_edition']
q = 'collection:openlibraryscanningteam AND collection:inlibrary'
# XXX Note: This `search_items` query requires the `ia` tool (the
# one installed via virtualenv) to be configured with (scope:all)
# privileged s3 keys.
config = {'general': {'secure': False}}
items = search_items(q, fields=fields, params=params, config=config)
books = web.ctx.site.get_many([
'/books/%s' % i.get('openlibrary_edition') for i in items
])
unsynced = [b for b in books if not b.ocaid]
ocaid_lookup = dict(('/books/%s' % i.get('openlibrary_edition'), i.get('identifier')) for i in items)
for u in unsynced:
u.ocaid = ocaid_lookup[u.key]
print('saving: ' + u.ocaid)
# TODO: Perform save
# web.ctx.blah[u.key] = u ?
return unsynced
| def sync_completed_sponsored_books():
from internetarchive import search_items
# XXX Note: This `search_items` query requires the `ia` tool (the
# one installed via virtualenv) to be configured with (scope:all)
# privileged s3 keys.
items = search_items(
q='collection:openlibraryscanningteam AND collection:inlibrary',
fields=['identifier', 'openlibrary_edition'],
params={'page': 1, 'rows': 1000, 'scope': 'all'},
config={'general': {'secure': False}}
)
books = web.ctx.site.get_many([
'/books/%s' % i.get('openlibrary_edition') for i in items
])
unsynced = [b for b in books if not b.ocaid]
ocaid_lookup = dict(('/books/%s' % i.get('openlibrary_edition'), i.get('identifier')) for i in items)
for u in unsynced:
u.ocaid = ocaid_lookup[u.key]
print('saving: ' + u.ocaid)
# TODO: Perform save
# web.ctx.blah[u.key] = u ?
return unsynced
|
40,609 | def _test_model(model: Chainer, metrics_functions: List[Metric],
iterator: DataLearningIterator, batch_size=-1, data_type='valid',
start_time: float=None, show_examples=False) -> Dict[str, Union[int, OrderedDict, str]]:
if start_time is None:
start_time = time.time()
expected_outputs = list(set().union(model.out_params, *[m.inputs for m in metrics_functions]))
if not iterator.data[data_type]:
log.warning(f'Could not log examples for {data_type}, assuming it\'s empty')
return {'metrics': None}
outputs = {out: [] for out in expected_outputs}
examples = 0
for x, y_true in iterator.gen_batches(batch_size, data_type, shuffle=False):
examples += len(x)
y_predicted = list(model.compute(list(x), list(y_true), targets=expected_outputs))
if len(expected_outputs) == 1:
y_predicted = [y_predicted]
for out, val in zip(outputs.values(), y_predicted):
out += list(val)
metrics = [(m.name, m.fn(*[outputs[i] for i in m.inputs])) for m in metrics_functions]
report = {
'eval_examples_count': examples,
'metrics': prettify_metrics(metrics),
'time_spent': str(datetime.timedelta(seconds=round(time.time() - start_time + 0.5)))
}
if show_examples:
try:
y_predicted = zip(*[y_predicted_group
for out_name, y_predicted_group in zip(expected_outputs, y_predicted)
if out_name in model.out_params])
if len(model.out_params) == 1:
y_predicted = [y_predicted_item[0] for y_predicted_item in y_predicted]
report['examples'] = [{
'x': x_item,
'y_predicted': y_predicted_item,
'y_true': y_true_item
} for x_item, y_predicted_item, y_true_item in zip(x, y_predicted, y_true)]
except NameError:
log.warning(f'Could not log examples for {data_type}, assuming it\'s empty')
return report
| def _test_model(model: Chainer, metrics_functions: List[Metric],
iterator: DataLearningIterator, batch_size=-1, data_type='valid',
start_time: float=None, show_examples=False) -> Dict[str, Union[int, OrderedDict, str]]:
if start_time is None:
start_time = time.time()
expected_outputs = list(set().union(model.out_params, *[m.inputs for m in metrics_functions]))
if not iterator.data[data_type]:
log.warning(f'Could not log examples for {data_type}, assuming it\'s empty')
return {'eval_examples_count': 0, 'metrics': None, 'time_spent': str(datetime.timedelta(seconds=0))}
outputs = {out: [] for out in expected_outputs}
examples = 0
for x, y_true in iterator.gen_batches(batch_size, data_type, shuffle=False):
examples += len(x)
y_predicted = list(model.compute(list(x), list(y_true), targets=expected_outputs))
if len(expected_outputs) == 1:
y_predicted = [y_predicted]
for out, val in zip(outputs.values(), y_predicted):
out += list(val)
metrics = [(m.name, m.fn(*[outputs[i] for i in m.inputs])) for m in metrics_functions]
report = {
'eval_examples_count': examples,
'metrics': prettify_metrics(metrics),
'time_spent': str(datetime.timedelta(seconds=round(time.time() - start_time + 0.5)))
}
if show_examples:
try:
y_predicted = zip(*[y_predicted_group
for out_name, y_predicted_group in zip(expected_outputs, y_predicted)
if out_name in model.out_params])
if len(model.out_params) == 1:
y_predicted = [y_predicted_item[0] for y_predicted_item in y_predicted]
report['examples'] = [{
'x': x_item,
'y_predicted': y_predicted_item,
'y_true': y_true_item
} for x_item, y_predicted_item, y_true_item in zip(x, y_predicted, y_true)]
except NameError:
log.warning(f'Could not log examples for {data_type}, assuming it\'s empty')
return report
|
2,375 | def randomized_svd(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
transpose="auto",
flip_sign=True,
random_state="warn",
lapack_driver="gesdd"
):
"""Computes a truncated randomized SVD.
This method solves the fixed-rank approximation problem described in the
Halko et al paper (problem (1.5), p5).
Parameters
----------
M : {ndarray, sparse matrix}
Matrix to decompose.
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
.. versionchanged:: 0.18
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : bool or 'auto', default='auto'
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : bool, default=True
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, default='warn'
The seed of the pseudo random number generator to use when
shuffling the data, i.e. getting the random vectors to initialize
the algorithm. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
.. versionchanged:: 1.2
The previous behavior (`random_state=0`) is deprecated, and
from v1.2 the default value will be `random_state=None`. Set
the value of `random_state` explicitly to suppress the deprecation
warning.
lapack_driver : str, {'gesdd', 'gesvd'}, default='gesdd'
Whether to use the more efficient divide-and-conquer approach ('gesdd')
, or more general rectangular approach ('gesvd') to compute the SVD of
the matrix 'B', which is the projection of the 'M' into a the low
dimensional subspace, described by Halko et. al.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions (Algorithm 4.3)
Halko, et al., 2009 https://arxiv.org/abs/0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
if isinstance(M, (sparse.lil_matrix, sparse.dok_matrix)):
warnings.warn(
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(type(M).__name__),
sparse.SparseEfficiencyWarning,
)
if random_state == "warn":
warnings.warn(
"If 'random_state' is not supplied, the current default "
"is to use 0 as a fixed seed. This will change to "
"None in version 1.2 leading to non-deterministic results "
"that better reflect nature of the randomized_svd solver. "
"If you want to silence this warning, set 'random_state' "
"to an integer seed or to None explicitly depending "
"if you want your code to be deterministic or not.",
FutureWarning,
)
random_state = 0
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == "auto":
# Checks if the number of iterations is explicitly specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < 0.1 * min(M.shape) else 4
if transpose == "auto":
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(
M,
size=n_random,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, Vt = linalg.svd(
B, full_matrices=False, lapack_driver=lapack_driver
)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, Vt = svd_flip(U, Vt)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, Vt = svd_flip(U, Vt, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], Vt[:n_components, :]
| def randomized_svd(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
transpose="auto",
flip_sign=True,
random_state="warn",
lapack_driver="gesdd"
):
"""Computes a truncated randomized SVD.
This method solves the fixed-rank approximation problem described in the
Halko et al paper (problem (1.5), p5).
Parameters
----------
M : {ndarray, sparse matrix}
Matrix to decompose.
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
.. versionchanged:: 0.18
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : bool or 'auto', default='auto'
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : bool, default=True
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, default='warn'
The seed of the pseudo random number generator to use when
shuffling the data, i.e. getting the random vectors to initialize
the algorithm. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
.. versionchanged:: 1.2
The previous behavior (`random_state=0`) is deprecated, and
from v1.2 the default value will be `random_state=None`. Set
the value of `random_state` explicitly to suppress the deprecation
warning.
lapack_driver : {"gesdd", "gesvd"}, default="gesdd"
Whether to use the more efficient divide-and-conquer approach ('gesdd')
, or more general rectangular approach ('gesvd') to compute the SVD of
the matrix 'B', which is the projection of the 'M' into a the low
dimensional subspace, described by Halko et. al.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
References
----------
* Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions (Algorithm 4.3)
Halko, et al., 2009 https://arxiv.org/abs/0909.4061
* A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
* An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
if isinstance(M, (sparse.lil_matrix, sparse.dok_matrix)):
warnings.warn(
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(type(M).__name__),
sparse.SparseEfficiencyWarning,
)
if random_state == "warn":
warnings.warn(
"If 'random_state' is not supplied, the current default "
"is to use 0 as a fixed seed. This will change to "
"None in version 1.2 leading to non-deterministic results "
"that better reflect nature of the randomized_svd solver. "
"If you want to silence this warning, set 'random_state' "
"to an integer seed or to None explicitly depending "
"if you want your code to be deterministic or not.",
FutureWarning,
)
random_state = 0
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == "auto":
# Checks if the number of iterations is explicitly specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < 0.1 * min(M.shape) else 4
if transpose == "auto":
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(
M,
size=n_random,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, Vt = linalg.svd(
B, full_matrices=False, lapack_driver=lapack_driver
)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, Vt = svd_flip(U, Vt)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, Vt = svd_flip(U, Vt, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], Vt[:n_components, :]
|
56,733 | def generate_samples(generator, *args, **kwargs):
"""Generate samples from the distribution of a random variable.
Parameters
----------
generator: function
Function to generate the random samples. The function is
expected take parameters for generating samples and
a keyword argument ``size`` which determines the shape
of the samples.
The args and kwargs (stripped of the keywords below) will be
passed to the generator function.
keyword arguments
~~~~~~~~~~~~~~~~~
dist_shape: int or tuple of int
The shape of the random variable (i.e., the shape attribute).
size: int or tuple of int
The required shape of the samples.
broadcast_shape: tuple of int or None
The shape resulting from the broadcasting of the parameters.
If not specified it will be inferred from the shape of the
parameters. This may be required when the parameter shape
does not determine the shape of a single sample, for example,
the shape of the probabilities in the Categorical distribution.
not_broadcast_kwargs: dict or None
Key word argument dictionary to provide to the random generator, which
must not be broadcasted with the rest of the args and kwargs.
Any remaining args and kwargs are passed on to the generator function.
"""
dist_shape = kwargs.pop('dist_shape', ())
one_d = _is_one_d(dist_shape)
size = kwargs.pop('size', None)
broadcast_shape = kwargs.pop('broadcast_shape', None)
not_broadcast_kwargs = kwargs.pop('not_broadcast_kwargs', None)
if not_broadcast_kwargs is None:
not_broadcast_kwargs = dict()
# Parse out raw input parameters for the generator
args = tuple(p[0] if isinstance(p, tuple) else p for p in args)
for key in kwargs:
p = kwargs[key]
kwargs[key] = p[0] if isinstance(p, tuple) else p
# Convert size and dist_shape to tuples
size_tup = to_tuple(size)
dist_shape = to_tuple(dist_shape)
if dist_shape[:len(size_tup)] == size_tup:
# dist_shape is prepended with size_tup. This is not a consequence
# of the parameters being drawn size_tup times! By chance, the
# distribution's shape has its first elements equal to size_tup.
# This means that we must prepend the size_tup to dist_shape, and
# check if that broadcasts well with the parameters
_dist_shape = size_tup + dist_shape
else:
_dist_shape = dist_shape
if broadcast_shape is None:
# If broadcast_shape is not explicitly provided, it is inferred as the
# broadcasted shape of the input parameter and dist_shape, taking into
# account the potential size prefix
inputs = args + tuple(kwargs.values())
broadcast_shape = broadcast_dist_samples_shape(
[np.asarray(i).shape for i in inputs] + [_dist_shape],
size=size_tup
)
# We do this instead of broadcast_distribution_samples to avoid
# creating a dummy array with dist_shape in memory
inputs = get_broadcastable_dist_samples(
inputs,
size=size_tup,
must_bcast_with=broadcast_shape,
)
# We modify the arguments with their broadcasted counterparts
args = tuple(inputs[:len(args)])
for offset, key in enumerate(kwargs):
kwargs[key] = inputs[len(args) + offset]
# Update kwargs with the keyword arguments that were not broadcasted
kwargs.update(not_broadcast_kwargs)
# We ensure that broadcast_shape is a tuple
broadcast_shape = to_tuple(broadcast_shape)
try:
dist_bcast_shape = broadcast_dist_samples_shape(
[_dist_shape, broadcast_shape],
size=size,
)
except (ValueError, TypeError):
raise TypeError('''Attempted to generate values with incompatible shapes:
size: {size}
size_tup: {size_tup}
broadcast_shape[:len(size_tup)] == size_tup: {size_prepended}
dist_shape: {dist_shape}
broadcast_shape: {broadcast_shape}
'''.format(size=size,
size_tup=size_tup,
dist_shape=dist_shape,
broadcast_shape=broadcast_shape,
size_prepended=broadcast_shape[:len(size_tup)] == size_tup)
)
if dist_bcast_shape[:len(size_tup)] == size_tup:
samples = generator(size=dist_bcast_shape, *args, **kwargs)
else:
samples = generator(size=size_tup + dist_bcast_shape, *args, **kwargs)
samples = np.asarray(samples)
# reshape samples here
if samples.ndim > 0 and samples.shape[0] == 1 and size_tup == (1,):
if (len(samples.shape) > len(dist_shape) and
samples.shape[-len(dist_shape):] == dist_shape[-len(dist_shape):]
):
samples = samples.reshape(samples.shape[1:])
if (one_d and samples.ndim > 0 and samples.shape[-1] == 1
and (samples.shape != size_tup or
size_tup == tuple() or
size_tup == (1,))
):
samples = samples.reshape(samples.shape[:-1])
print("samples:",samples)
return np.asarray(samples)
| def generate_samples(generator, *args, **kwargs):
"""Generate samples from the distribution of a random variable.
Parameters
----------
generator: function
Function to generate the random samples. The function is
expected take parameters for generating samples and
a keyword argument ``size`` which determines the shape
of the samples.
The args and kwargs (stripped of the keywords below) will be
passed to the generator function.
keyword arguments
~~~~~~~~~~~~~~~~~
dist_shape: int or tuple of int
The shape of the random variable (i.e., the shape attribute).
size: int or tuple of int
The required shape of the samples.
broadcast_shape: tuple of int or None
The shape resulting from the broadcasting of the parameters.
If not specified it will be inferred from the shape of the
parameters. This may be required when the parameter shape
does not determine the shape of a single sample, for example,
the shape of the probabilities in the Categorical distribution.
not_broadcast_kwargs: dict or None
Key word argument dictionary to provide to the random generator, which
must not be broadcasted with the rest of the args and kwargs.
Any remaining args and kwargs are passed on to the generator function.
"""
dist_shape = kwargs.pop('dist_shape', ())
one_d = _is_one_d(dist_shape)
size = kwargs.pop('size', None)
broadcast_shape = kwargs.pop('broadcast_shape', None)
not_broadcast_kwargs = kwargs.pop('not_broadcast_kwargs', None)
if not_broadcast_kwargs is None:
not_broadcast_kwargs = dict()
# Parse out raw input parameters for the generator
args = tuple(p[0] if isinstance(p, tuple) else p for p in args)
for key in kwargs:
p = kwargs[key]
kwargs[key] = p[0] if isinstance(p, tuple) else p
# Convert size and dist_shape to tuples
size_tup = to_tuple(size)
dist_shape = to_tuple(dist_shape)
if dist_shape[:len(size_tup)] == size_tup:
# dist_shape is prepended with size_tup. This is not a consequence
# of the parameters being drawn size_tup times! By chance, the
# distribution's shape has its first elements equal to size_tup.
# This means that we must prepend the size_tup to dist_shape, and
# check if that broadcasts well with the parameters
_dist_shape = size_tup + dist_shape
else:
_dist_shape = dist_shape
if broadcast_shape is None:
# If broadcast_shape is not explicitly provided, it is inferred as the
# broadcasted shape of the input parameter and dist_shape, taking into
# account the potential size prefix
inputs = args + tuple(kwargs.values())
broadcast_shape = broadcast_dist_samples_shape(
[np.asarray(i).shape for i in inputs] + [_dist_shape],
size=size_tup
)
# We do this instead of broadcast_distribution_samples to avoid
# creating a dummy array with dist_shape in memory
inputs = get_broadcastable_dist_samples(
inputs,
size=size_tup,
must_bcast_with=broadcast_shape,
)
# We modify the arguments with their broadcasted counterparts
args = tuple(inputs[:len(args)])
for offset, key in enumerate(kwargs):
kwargs[key] = inputs[len(args) + offset]
# Update kwargs with the keyword arguments that were not broadcasted
kwargs.update(not_broadcast_kwargs)
# We ensure that broadcast_shape is a tuple
broadcast_shape = to_tuple(broadcast_shape)
try:
dist_bcast_shape = broadcast_dist_samples_shape(
[_dist_shape, broadcast_shape],
size=size,
)
except (ValueError, TypeError):
raise TypeError('''Attempted to generate values with incompatible shapes:
size: {size}
size_tup: {size_tup}
broadcast_shape[:len(size_tup)] == size_tup: {size_prepended}
dist_shape: {dist_shape}
broadcast_shape: {broadcast_shape}
'''.format(size=size,
size_tup=size_tup,
dist_shape=dist_shape,
broadcast_shape=broadcast_shape,
size_prepended=broadcast_shape[:len(size_tup)] == size_tup)
)
if dist_bcast_shape[:len(size_tup)] == size_tup:
samples = generator(size=dist_bcast_shape, *args, **kwargs)
else:
samples = generator(size=size_tup + dist_bcast_shape, *args, **kwargs)
samples = np.asarray(samples)
# reshape samples here
if samples.ndim > 0 and samples.shape[0] == 1 and size_tup == (1,):
if (len(samples.shape) > len(dist_shape) and
samples.shape[-len(dist_shape):] == dist_shape[-len(dist_shape):]
):
samples = samples.reshape(samples.shape[1:])
if (one_d and samples.ndim > 0 and samples.shape[-1] == 1
and (samples.shape != size_tup or
size_tup == tuple() or
size_tup == (1,))
):
samples = samples.reshape(samples.shape[:-1])
return np.asarray(samples)
|
14,677 | def load_data(hass, url=None, filepath=None, username=None, password=None,
authentication=None, num_retries=5, verify_ssl=None):
"""Load data into ByteIO/File container from a source."""
try:
if url is not None:
# Load data from URL
params = {"timeout": 15}
if username is not None and password is not None:
if authentication == HTTP_DIGEST_AUTHENTICATION:
params["auth"] = HTTPDigestAuth(username, password)
else:
params["auth"] = HTTPBasicAuth(username, password)
if verify_ssl is not None:
params["verify"] = verify_ssl
retry_num = 0
while retry_num < num_retries:
req = requests.get(url, **params)
if not req.ok:
_LOGGER.warning("Status code %s (retry #%s) loading %s",
req.status_code, retry_num + 1, url)
else:
data = io.BytesIO(req.content)
if data.read():
data.seek(0)
data.name = url
return data
_LOGGER.warning("Empty data (retry #%s) in %s)",
retry_num + 1, url)
retry_num += 1
_LOGGER.warning("Can't load data in %s after %s retries",
url, retry_num)
elif filepath is not None:
if hass.config.is_allowed_path(filepath):
return open(filepath, "rb")
_LOGGER.warning("'%s' are not secure to load data from!", filepath)
else:
_LOGGER.warning("Can't load data. No data found in params!")
except (OSError, TypeError) as error:
_LOGGER.error("Can't load data into ByteIO: %s", error)
return None
| def load_data(hass, url=None, filepath=None, username=None, password=None,
authentication=None, num_retries=5, verify_ssl=None):
"""Load data into ByteIO/File container from a source."""
try:
if url is not None:
# Load data from URL
params = {"timeout": 15}
if username is not None and password is not None:
if authentication == HTTP_DIGEST_AUTHENTICATION:
params["auth"] = HTTPDigestAuth(username, password)
else:
params["auth"] = HTTPBasicAuth(username, password)
if verify_ssl:
params["verify"] = verify_ssl
retry_num = 0
while retry_num < num_retries:
req = requests.get(url, **params)
if not req.ok:
_LOGGER.warning("Status code %s (retry #%s) loading %s",
req.status_code, retry_num + 1, url)
else:
data = io.BytesIO(req.content)
if data.read():
data.seek(0)
data.name = url
return data
_LOGGER.warning("Empty data (retry #%s) in %s)",
retry_num + 1, url)
retry_num += 1
_LOGGER.warning("Can't load data in %s after %s retries",
url, retry_num)
elif filepath is not None:
if hass.config.is_allowed_path(filepath):
return open(filepath, "rb")
_LOGGER.warning("'%s' are not secure to load data from!", filepath)
else:
_LOGGER.warning("Can't load data. No data found in params!")
except (OSError, TypeError) as error:
_LOGGER.error("Can't load data into ByteIO: %s", error)
return None
|
57,608 | def fetch_exchange(zone_key1='CR', zone_key2='PA', session=None,
target_datetime=None, logger=logging.getLogger(__name__)) -> dict:
"""
Requests the last known power exchange (in MW) between two countries.
"""
if target_datetime:
raise NotImplementedError(
'This parser is not yet able to parse past dates')
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
r = session or requests.session()
url = EXCHANGE_URL
response = r.get(url)
assert response.status_code == 200
df = pd.read_html(response.text)[0]
# A positive value on website indicates a flow from country specified to PA.
net_flow_cr = round(float(df[4][1]) + float(df[4][3]) + float(df[4][5]) + float(df[1][8]) + float(df[1][10]), 2)
net_flow_ni = round(float(df[4][8]) + float(df[4][10]) + float(df[4][13]) + float(df[4][15]), 2)
net_flow_hn = round(float(df[1][13]) + float(df[1][15]) + float(df[1][18]) + float(df[1][20]) + float(df[1][23]), 2)
net_flow_sv = round(float(df[4][18]) + float(df[4][20]) + float(df[1][26]) + float(df[1][28]), 2)
net_flow_gt = round(float(df[4][23]) + float(df[4][26]) + float(df[4][28]) + float(df[1][31]), 2)
net_flows = {
"CR->PA": net_flow_cr, # Costa Rica to Panama
"GT->PA": net_flow_gt, # Guatemala to Panama
"HN->PA": net_flow_hn, # Honduras to Panama
"NI->PA": net_flow_ni, # Nicaragua to Panama
"PA->SV": net_flow_sv, # Panama to El Salvador
}
# Invert sign of flows to account for correct flow direction
net_flows["PA->SV"] = -1 * net_flows["PA->SV"]
if sorted_zone_keys not in net_flows:
raise NotImplementedError(
'This exchange pair is not implemented: {}'.format(sorted_zone_keys))
data = {
'datetime': arrow.now(TIMEZONE).datetime,
'netFlow': net_flows[sorted_zone_keys],
'sortedZoneKeys': sorted_zone_keys,
'source': url
}
return data
| def fetch_exchange(zone_key1='CR', zone_key2='PA', session=None,
target_datetime=None, logger=logging.getLogger(__name__)) -> dict:
"""
Requests the last known power exchange (in MW) between two countries.
"""
if target_datetime:
raise NotImplementedError("This parser is not yet able to parse past dates")
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
r = session or requests.session()
url = EXCHANGE_URL
response = r.get(url)
assert response.status_code == 200
df = pd.read_html(response.text)[0]
# A positive value on website indicates a flow from country specified to PA.
net_flow_cr = round(float(df[4][1]) + float(df[4][3]) + float(df[4][5]) + float(df[1][8]) + float(df[1][10]), 2)
net_flow_ni = round(float(df[4][8]) + float(df[4][10]) + float(df[4][13]) + float(df[4][15]), 2)
net_flow_hn = round(float(df[1][13]) + float(df[1][15]) + float(df[1][18]) + float(df[1][20]) + float(df[1][23]), 2)
net_flow_sv = round(float(df[4][18]) + float(df[4][20]) + float(df[1][26]) + float(df[1][28]), 2)
net_flow_gt = round(float(df[4][23]) + float(df[4][26]) + float(df[4][28]) + float(df[1][31]), 2)
net_flows = {
"CR->PA": net_flow_cr, # Costa Rica to Panama
"GT->PA": net_flow_gt, # Guatemala to Panama
"HN->PA": net_flow_hn, # Honduras to Panama
"NI->PA": net_flow_ni, # Nicaragua to Panama
"PA->SV": net_flow_sv, # Panama to El Salvador
}
# Invert sign of flows to account for correct flow direction
net_flows["PA->SV"] = -1 * net_flows["PA->SV"]
if sorted_zone_keys not in net_flows:
raise NotImplementedError(
'This exchange pair is not implemented: {}'.format(sorted_zone_keys))
data = {
'datetime': arrow.now(TIMEZONE).datetime,
'netFlow': net_flows[sorted_zone_keys],
'sortedZoneKeys': sorted_zone_keys,
'source': url
}
return data
|
46,205 | def imread(filename: str):
"""custom imaplementation of imread to avoid skimage dependecy"""
ext = os.path.splitext(filename)[1]
if ext in [".tif", "tiff", ".lsm"]:
import tifffile
image = tifffile.imread(filename)
else:
import imageio
image = imageio.imread(filename)
if not hasattr(image, 'ndim'):
return image
if image.ndim > 2:
if image.shape[-1] not in (3, 4) and image.shape[-3] in (3, 4):
image = np.swapaxes(image, -1, -3)
image = np.swapaxes(image, -2, -3)
return image
| def imread(filename: str):
"""custom imaplementation of imread to avoid skimage dependecy"""
ext = os.path.splitext(filename)[1]
if ext in [".tif", ".tiff", ".lsm"]:
import tifffile
image = tifffile.imread(filename)
else:
import imageio
image = imageio.imread(filename)
if not hasattr(image, 'ndim'):
return image
if image.ndim > 2:
if image.shape[-1] not in (3, 4) and image.shape[-3] in (3, 4):
image = np.swapaxes(image, -1, -3)
image = np.swapaxes(image, -2, -3)
return image
|
15,136 | def test_deprecated_with_replacement_key(caplog, schema):
"""
Test deprecation behaves correctly when only a replacement key is provided.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning or difference in output if neither key nor
replacement_key are provided
"""
deprecated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter"), schema
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option is deprecated, " "please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"jupiter": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
| def test_deprecated_with_replacement_key(caplog, schema):
"""
Test deprecation behaves correctly when only a replacement key is provided.
Expected behavior:
- Outputs the appropriate deprecation warning if key is detected
- Processes schema moving the value from key to replacement_key
- Processes schema changing nothing if only replacement_key provided
- No warning if only replacement_key provided
- No warning or difference in output if neither key nor
replacement_key are provided
"""
deprecated_schema = vol.All(
cv.deprecated("mars", replacement_key="jupiter"), schema
)
test_data = {"mars": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 1
assert (
"The 'mars' option is deprecated, please replace it with 'jupiter'"
) in caplog.text
assert {"jupiter": True} == output
caplog.clear()
assert len(caplog.records) == 0
test_data = {"jupiter": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
test_data = {"venus": True}
output = deprecated_schema(test_data.copy())
assert len(caplog.records) == 0
assert test_data == output
|
4,284 | def _make_rename_map(chs):
orig_ch_names = [c['ch_name'] for c in chs]
ch_names = orig_ch_names.copy()
_unique_channel_names(ch_names, max_length=15, verbose='error')
rename_map = dict()
if orig_ch_names != ch_names:
rename_map.update(zip(orig_ch_names, ch_names))
return rename_map
| def _make_ch_names_mapping(chs):
orig_ch_names = [c['ch_name'] for c in chs]
ch_names = orig_ch_names.copy()
_unique_channel_names(ch_names, max_length=15, verbose='error')
rename_map = dict()
if orig_ch_names != ch_names:
rename_map.update(zip(orig_ch_names, ch_names))
return rename_map
|
30,676 | def update_server_configuration(client, server_configuration, error_msg):
"""updates server configuration
Args:
client (demisto_client): The configured client to use.
server_configuration (dict): The server configuration to be added
error_msg (str): The error message
Returns:
response_data: The response data
status_code: The response status code
"""
system_conf_response = demisto_client.generic_request_func(
self=client,
path='/system/config',
method='GET'
)
system_conf = ast.literal_eval(system_conf_response[0]).get('sysConf', {})
system_conf.update(server_configuration)
data = {
'data': system_conf,
'version': -1
}
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/system/config',
method='POST', body=data)
try:
result_object = ast.literal_eval(response_data)
except ValueError as err:
print_error('failed to parse response from demisto. response is {}.\nError:\n{}'.format(response_data, err))
return
if status_code >= 300 or status_code < 200:
message = result_object.get('message', '')
msg = error_msg + str(status_code) + '\n' + message
print_error(msg)
# client.api_client.pool.close()
return response_data, status_code, _
| def update_server_configuration(client, server_configuration, error_msg):
"""updates server configuration
Args:
client (demisto_client): The configured client to use.
server_configuration (dict): The server configuration to be added
error_msg (str): The error message
Returns:
response_data: The response data
status_code: The response status code
"""
system_conf_response = demisto_client.generic_request_func(
self=client,
path='/system/config',
method='GET'
)
system_conf = ast.literal_eval(system_conf_response[0]).get('sysConf', {})
system_conf.update(server_configuration)
data = {
'data': system_conf,
'version': -1
}
response_data, status_code, _ = demisto_client.generic_request_func(self=client, path='/system/config',
method='POST', body=data)
try:
result_object = ast.literal_eval(response_data)
except ValueError as err:
print_error('failed to parse response from demisto. response is {}.\nError:\n{}'.format(response_data, err))
return
if status_code >= 300 or status_code < 200:
message = result_object.get('message', '')
msg = f'{error_msg} {status_code}\n{message}'
print_error(msg)
# client.api_client.pool.close()
return response_data, status_code, _
|
26,188 | def call_auth_api(integrationToken):
# Get API key
try:
response = requests.get(AUTH_API_URL + '?token=' + integrationToken)
except requests.exceptions.Timeout:
raise CheckException('Failed to get API key by timeout')
except Exception as e:
raise CheckException('Failed to get API key by {}'.format(e))
if response.status_code != 200:
raise ConfigurationError('Failed to get API key. status_code={}'.format(response.status_code))
try:
tokenJson = json.loads(response.text)
token = tokenJson['accessToken']
except Exception as e:
raise CheckException('Failed to get API key by {}'.format(e))
return token
| def call_auth_api(integrationToken):
# Get API key
try:
response = self.http.get(AUTH_API_URL + '?token=' + integrationToken)
except requests.exceptions.Timeout:
raise CheckException('Failed to get API key by timeout')
except Exception as e:
raise CheckException('Failed to get API key by {}'.format(e))
if response.status_code != 200:
raise ConfigurationError('Failed to get API key. status_code={}'.format(response.status_code))
try:
tokenJson = json.loads(response.text)
token = tokenJson['accessToken']
except Exception as e:
raise CheckException('Failed to get API key by {}'.format(e))
return token
|
58,558 | def connected_context_num():
global _lock, _all_contexts
with _lock:
return len(_all_contexts)
| def num_connected_contexts():
"""Return the number of client connections active."""
global _lock, _all_contexts
with _lock:
return len(_all_contexts)
|
40,255 | def main():
parser = create_parser(
description=HELP_TEXT, logfilename='gvm-pyshell.log')
parser.add_protocol_argument()
parser.add_argument(
'-i', '--interactive', action='store_true', default=False,
help='Start an interactive Python shell')
parser.add_argument(
'scriptname', nargs='?', metavar="SCRIPT",
help='Path to script to be preloaded (example: myscript.gmp)')
parser.add_argument(
'scriptargs', nargs='*', metavar="ARG",
help='Arguments for preloaded script')
args = parser.parse_args()
if 'socket' in args.connection_type and args.sockpath:
print('The --sockpath parameter has been deprecated. Please use '
'--socketpath instead', file=sys.stderr)
connection = create_connection(**vars(args))
transform = EtreeCheckCommandTransform()
global_vars = {
'help': Help(),
'__version__': __version__,
'__api_version__': __api_version__,
}
username = None
password = None
if args.protocol == PROTOCOL_OSP:
protocol = Osp(connection, transform=transform)
global_vars['osp'] = protocol
global_vars['__name__'] = '__osp__'
else:
protocol = Gmp(connection, transform=transform)
global_vars['gmp'] = protocol
global_vars['__name__'] = '__gmp__'
if args.gmp_username:
(username, password) = authenticate(
protocol, username=args.gmp_username,
password=args.gmp_password)
shell_args = Namespace(
username=username, password=password)
global_vars['args'] = shell_args
with_script = args.scriptname and len(args.scriptname) > 0
if with_script:
argv = [os.path.abspath(args.scriptname), *args.scriptargs]
shell_args.argv = argv
# for backwards compatibility we add script here
shell_args.script = argv
no_script_no_interactive = not args.interactive and not with_script
script_and_interactive = args.interactive and with_script
only_interactive = not with_script and args.interactive
only_script = not args.interactive and with_script
if only_interactive or no_script_no_interactive:
enter_interactive_mode(global_vars)
if script_and_interactive or only_script:
if only_script:
print(
'Using gvm-pyshell for running scripts only is deprecated. '
'Please use gvm-script instead',
file=sys.stderr,
)
script_name = args.scriptname
run_script(script_name, global_vars)
if not only_script:
enter_interactive_mode(global_vars)
protocol.disconnect()
| def main():
parser = create_parser(
description=HELP_TEXT, logfilename='gvm-pyshell.log')
parser.add_protocol_argument()
parser.add_argument(
'-i', '--interactive', action='store_true', default=False,
help='Start an interactive Python shell')
parser.add_argument(
'scriptname', nargs='?', metavar="SCRIPT",
help='Path to script to be preloaded (example: myscript.gmp)')
parser.add_argument(
'scriptargs', nargs='*', metavar="ARG",
help='Arguments for preloaded script')
args = parser.parse_args()
if 'socket' in args.connection_type and args.sockpath:
print('The --sockpath parameter has been deprecated. Please use '
'--socketpath instead', file=sys.stderr)
connection = create_connection(**vars(args))
transform = EtreeCheckCommandTransform()
global_vars = {
'help': Help(),
'__version__': __version__,
'__api_version__': __api_version__,
}
username = None
password = None
if args.protocol == PROTOCOL_OSP:
protocol = Osp(connection, transform=transform)
global_vars['osp'] = protocol
global_vars['__name__'] = '__osp__'
else:
protocol = Gmp(connection, transform=transform)
global_vars['gmp'] = protocol
global_vars['__name__'] = '__gmp__'
if args.gmp_username:
(username, password) = authenticate(
protocol, username=args.gmp_username,
password=args.gmp_password)
shell_args = Namespace(
username=username, password=password)
global_vars['args'] = shell_args
with_script = args.scriptname and len(args.scriptname) > 0
if with_script:
argv = [os.path.abspath(args.scriptname), *args.scriptargs]
shell_args.argv = argv
# for backwards compatibility we add script here
shell_args.script = argv
no_script_no_interactive = not args.interactive and not with_script
script_and_interactive = args.interactive and with_script
only_interactive = not with_script and args.interactive
only_script = not args.interactive and with_script
if only_interactive or no_script_no_interactive:
enter_interactive_mode(global_vars)
if script_and_interactive or only_script:
if only_script:
print(
'Using gvm-pyshell for running scripts is deprecated. '
'Please use gvm-script instead',
file=sys.stderr,
)
script_name = args.scriptname
run_script(script_name, global_vars)
if not only_script:
enter_interactive_mode(global_vars)
protocol.disconnect()
|
7,913 | def test_external_mesh(cpp_driver):
### Materials ###
materials = openmc.Materials()
fuel_mat = openmc.Material(name="fuel")
fuel_mat.add_nuclide("U235", 1.0)
fuel_mat.set_density('g/cc', 4.5)
materials.append(fuel_mat)
zirc_mat = openmc.Material(name="zircaloy")
zirc_mat.add_element("Zr", 1.0)
zirc_mat.set_density("g/cc", 5.77)
materials.append(zirc_mat)
water_mat = openmc.Material(name="water")
water_mat.add_nuclide("H1", 2.0)
water_mat.add_nuclide("O16", 1.0)
water_mat.set_density("atom/b-cm", 0.07416)
materials.append(water_mat)
materials.export_to_xml()
### Geometry ###
fuel_min_x = openmc.XPlane(-5.0, name="minimum x")
fuel_max_x = openmc.XPlane(5.0, name="maximum x")
fuel_min_y = openmc.YPlane(-5.0, name="minimum y")
fuel_max_y = openmc.YPlane(5.0, name="maximum y")
fuel_min_z = openmc.ZPlane(-5.0, name="minimum z")
fuel_max_z = openmc.ZPlane(5.0, name="maximum z")
fuel_cell = openmc.Cell(name="fuel")
fuel_cell.region = +fuel_min_x & -fuel_max_x & \
+fuel_min_y & -fuel_max_y & \
+fuel_min_z & -fuel_max_z
fuel_cell.fill = fuel_mat
clad_min_x = openmc.XPlane(-6.0, name="minimum x")
clad_max_x = openmc.XPlane(6.0, name="maximum x")
clad_min_y = openmc.YPlane(-6.0, name="minimum y")
clad_max_y = openmc.YPlane(6.0, name="maximum y")
clad_min_z = openmc.ZPlane(-6.0, name="minimum z")
clad_max_z = openmc.ZPlane(6.0, name="maximum z")
clad_cell = openmc.Cell(name="clad")
clad_cell.region = (-fuel_min_x | +fuel_max_x |
-fuel_min_y | +fuel_max_y |
-fuel_min_z | +fuel_max_z) & \
(+clad_min_x & -clad_max_x &
+clad_min_y & -clad_max_y &
+clad_min_z & -clad_max_z)
clad_cell.fill = zirc_mat
bounds = (10, 10, 10)
water_min_x = openmc.XPlane(x0=-bounds[0],
name="minimum x",
boundary_type='vacuum')
water_max_x = openmc.XPlane(x0=bounds[0],
name="maximum x",
boundary_type='vacuum')
water_min_y = openmc.YPlane(y0=-bounds[1],
name="minimum y",
boundary_type='vacuum')
water_max_y = openmc.YPlane(y0=bounds[1],
name="maximum y",
boundary_type='vacuum')
water_min_z = openmc.ZPlane(z0=-bounds[2],
name="minimum z",
boundary_type='vacuum')
water_max_z = openmc.ZPlane(z0=bounds[2],
name="maximum z",
boundary_type='vacuum')
water_cell = openmc.Cell(name="water")
water_cell.region = (-clad_min_x | +clad_max_x |
-clad_min_y | +clad_max_y |
-clad_min_z | +clad_max_z) & \
(+water_min_x & -water_max_x &
+water_min_y & -water_max_y &
+water_min_z & -water_max_z)
water_cell.fill = water_mat
# create a containing universe
geometry = openmc.Geometry([fuel_cell, clad_cell, water_cell])
### Tallies ###
# Meshes
mesh_filename = "test_mesh_tets.h5m"
# Create a normal unstructured mesh to compare to
uscd_mesh = openmc.UnstructuredMesh(mesh_filename,'moab')
# Create filters
uscd_filter = openmc.MeshFilter(mesh=uscd_mesh)
# Create tallies
tallies = openmc.Tallies()
uscd_tally = openmc.Tally(name="unstructured mesh tally")
uscd_tally.filters = [uscd_filter]
uscd_tally.scores = ['flux']
uscd_tally.estimator = 'tracklength'
tallies.append(uscd_tally)
### Settings ###
settings = openmc.Settings()
settings.run_mode = 'fixed source'
settings.particles = 100
settings.batches = 10
# Source setup
r = openmc.stats.Uniform(a=0.0, b=0.0)
theta = openmc.stats.Discrete(x=[0.0], p=[1.0])
phi = openmc.stats.Discrete(x=[0.0], p=[1.0])
space = openmc.stats.SphericalIndependent(r, theta, phi)
angle = openmc.stats.Monodirectional((-1.0, 0.0, 0.0))
energy = openmc.stats.Discrete(x=[15.e+06], p=[1.0])
source = openmc.Source(space=space, energy=energy, angle=angle)
settings.source = source
model = openmc.model.Model(geometry=geometry,
materials=materials,
tallies=tallies,
settings=settings)
harness = ExternalMoabTest(cpp_driver,
'statepoint.10.h5',
model)
# Run open MC and check results
harness.main()
| def test_external_mesh(cpp_driver):
### Materials ###
materials = openmc.Materials()
fuel_mat = openmc.Material(name="fuel")
fuel_mat.add_nuclide("U235", 1.0)
fuel_mat.set_density('g/cc', 4.5)
materials.append(fuel_mat)
zirc_mat = openmc.Material(name="zircaloy")
zirc_mat.add_element("Zr", 1.0)
zirc_mat.set_density("g/cc", 5.77)
materials.append(zirc_mat)
water_mat = openmc.Material(name="water")
water_mat.add_nuclide("H1", 2.0)
water_mat.add_nuclide("O16", 1.0)
water_mat.set_density("atom/b-cm", 0.07416)
materials.append(water_mat)
materials.export_to_xml()
### Geometry ###
fuel_min_x = openmc.XPlane(-5.0, name="minimum x")
fuel_max_x = openmc.XPlane(5.0, name="maximum x")
fuel_min_y = openmc.YPlane(-5.0, name="minimum y")
fuel_max_y = openmc.YPlane(5.0, name="maximum y")
fuel_min_z = openmc.ZPlane(-5.0, name="minimum z")
fuel_max_z = openmc.ZPlane(5.0, name="maximum z")
fuel_cell = openmc.Cell(name="fuel")
fuel_cell.region = +fuel_min_x & -fuel_max_x & \
+fuel_min_y & -fuel_max_y & \
+fuel_min_z & -fuel_max_z
fuel_cell.fill = fuel_mat
clad_min_x = openmc.XPlane(-6.0, name="minimum x")
clad_max_x = openmc.XPlane(6.0, name="maximum x")
clad_min_y = openmc.YPlane(-6.0, name="minimum y")
clad_max_y = openmc.YPlane(6.0, name="maximum y")
clad_min_z = openmc.ZPlane(-6.0, name="minimum z")
clad_max_z = openmc.ZPlane(6.0, name="maximum z")
clad_cell = openmc.Cell(name="clad")
clad_cell.region = (-fuel_min_x | +fuel_max_x |
-fuel_min_y | +fuel_max_y |
-fuel_min_z | +fuel_max_z) & \
(+clad_min_x & -clad_max_x &
+clad_min_y & -clad_max_y &
+clad_min_z & -clad_max_z)
clad_cell.fill = zirc_mat
bounds = (10, 10, 10)
water_min_x = openmc.XPlane(x0=-bounds[0],
name="minimum x",
boundary_type='vacuum')
water_max_x = openmc.XPlane(x0=bounds[0],
name="maximum x",
boundary_type='vacuum')
water_min_y = openmc.YPlane(y0=-bounds[1],
name="minimum y",
boundary_type='vacuum')
water_max_y = openmc.YPlane(y0=bounds[1],
name="maximum y",
boundary_type='vacuum')
water_min_z = openmc.ZPlane(z0=-bounds[2],
name="minimum z",
boundary_type='vacuum')
water_max_z = openmc.ZPlane(z0=bounds[2],
name="maximum z",
boundary_type='vacuum')
water_cell = openmc.Cell(name="water")
water_cell.region = (-clad_min_x | +clad_max_x |
-clad_min_y | +clad_max_y |
-clad_min_z | +clad_max_z) & \
(+water_min_x & -water_max_x &
+water_min_y & -water_max_y &
+water_min_z & -water_max_z)
water_cell.fill = water_mat
# create a containing universe
geometry = openmc.Geometry([fuel_cell, clad_cell, water_cell])
### Tallies ###
# Meshes
mesh_filename = "test_mesh_tets.h5m"
# Create a normal unstructured mesh to compare to
uscd_mesh = openmc.UnstructuredMesh(mesh_filename,'moab')
# Create filters
uscd_filter = openmc.MeshFilter(mesh=uscd_mesh)
# Create tallies
tallies = openmc.Tallies()
uscd_tally = openmc.Tally(name="unstructured mesh tally")
uscd_tally.filters = [uscd_filter]
uscd_tally.scores = ['flux']
uscd_tally.estimator = 'tracklength'
tallies.append(uscd_tally)
### Settings ###
settings = openmc.Settings()
settings.run_mode = 'fixed source'
settings.particles = 100
settings.batches = 10
# Source setup
space = openmc.stats.Point()
angle = openmc.stats.Monodirectional((-1.0, 0.0, 0.0))
energy = openmc.stats.Discrete(x=[15.e+06], p=[1.0])
source = openmc.Source(space=space, energy=energy, angle=angle)
settings.source = source
model = openmc.model.Model(geometry=geometry,
materials=materials,
tallies=tallies,
settings=settings)
harness = ExternalMoabTest(cpp_driver,
'statepoint.10.h5',
model)
# Run open MC and check results
harness.main()
|
1,091 | def generate_boutiques_descriptor(
module, interface_name, container_image, container_type, container_index=None,
verbose=False, save=False, save_path=None, author=None, ignore_inputs=None, tags=None):
'''
Returns a JSON string containing a JSON Boutiques description of a Nipype interface.
Arguments:
* module: module where the Nipype interface is declared.
* interface_name: name of Nipype interface.
* container_image: name of the container image where the tool is installed
* container_type: type of container image (Docker or Singularity)
* container_index: optional index where the image is available
* verbose: print information messages
* save: True if you want to save descriptor to a file
* save_path: file path for the saved descriptor (defaults to name of the interface in current directory)
* author: author of the tool (required for publishing)
* ignore_inputs: list of interface inputs to not include in the descriptor
* tags: JSON object containing tags to include in the descriptor, e.g. "{/"key1/": /"value1/"}"
(note: the tags 'domain:neuroinformatics' and 'interface-type:nipype' are included by default)
'''
if not module:
raise Exception("Undefined module.")
# Retrieves Nipype interface
if isinstance(module, (str, bytes)):
import_module(module)
module_name = str(module)
module = sys.modules[module]
else:
module_name = str(module.__name__)
interface = getattr(module, interface_name)()
inputs = interface.input_spec()
outputs = interface.output_spec()
# Tool description
tool_desc = {}
tool_desc['name'] = interface_name
tool_desc[
'command-line'] = interface_name + " "
tool_desc['author'] = "Nipype (interface)"
if author is not None:
tool_desc['author'] = tool_desc['author'] + ", " + author + " (tool)"
tool_desc[
'description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")."
tool_desc['inputs'] = []
tool_desc['output-files'] = []
tool_desc['groups'] = []
tool_desc['tool-version'] = interface.version if interface.version is not None else "1.0.0"
tool_desc['schema-version'] = '0.5'
if container_image:
tool_desc['container-image'] = {}
tool_desc['container-image']['image'] = container_image
tool_desc['container-image']['type'] = container_type
if container_index:
tool_desc['container-image']['index'] = container_index
# Generates tool inputs
for name, spec in sorted(interface.inputs.traits(transient=None).items()):
inp = get_boutiques_input(inputs, interface, name, spec, verbose, ignore_inputs=ignore_inputs)
# Handle compound inputs (inputs that can be of multiple types and are mutually exclusive)
if inp is None:
continue
if isinstance(inp, list):
mutex_group_members = []
tool_desc['command-line'] += inp[0]['value-key'] + " "
for i in inp:
tool_desc['inputs'].append(i)
mutex_group_members.append(i['id'])
if verbose:
print("-> Adding input " + i['name'])
# Put inputs into a mutually exclusive group
tool_desc['groups'].append({'id': inp[0]['id'] + "_group",
'name': inp[0]['name'] + " group",
'members': mutex_group_members,
'mutually-exclusive': True})
else:
tool_desc['inputs'].append(inp)
tool_desc['command-line'] += inp['value-key'] + " "
if verbose:
print("-> Adding input " + inp['name'])
# Generates input groups
tool_desc['groups'] += get_boutiques_groups(interface.inputs.traits(transient=None).items())
if len(tool_desc['groups']) == 0:
del tool_desc['groups']
# Generates tool outputs
generate_tool_outputs(outputs, interface, tool_desc, verbose, True)
# Generate outputs with various different inputs to try to generate
# as many output values as possible
custom_inputs = generate_custom_inputs(tool_desc['inputs'])
for input_dict in custom_inputs:
interface = getattr(module, interface_name)(**input_dict)
outputs = interface.output_spec()
generate_tool_outputs(outputs, interface, tool_desc, verbose, False)
# Fill in all missing output paths
for output in tool_desc['output-files']:
if output['path-template'] == "":
fill_in_missing_output_path(output, output['name'], tool_desc['inputs'])
# Add tags
desc_tags = {
'domain': 'neuroinformatics',
'source': 'nipype-interface'
}
if tags is not None:
tags_dict = json.loads(tags)
for k, v in tags_dict.items():
if k in desc_tags:
if not isinstance(desc_tags[k], list):
desc_tags[k] = [desc_tags[k]]
desc_tags[k].append(v)
else:
desc_tags[k] = v
tool_desc['tags'] = desc_tags
# Check for positional arguments and reorder command line args if necessary
tool_desc['command-line'] = reorder_cmd_line_args(tool_desc['command-line'], interface, ignore_inputs)
# Remove the extra space at the end of the command line
tool_desc['command-line'] = tool_desc['command-line'].strip()
# Save descriptor to a file
if save:
path = save_path if save_path is not None else os.path.join(os.getcwd(), interface_name + '.json')
with open(path, 'w') as outfile:
json.dump(tool_desc, outfile, indent=4, separators=(',', ': '))
if verbose:
print("-> Descriptor saved to file " + outfile.name)
print("NOTE: Descriptors produced by this script may not entirely conform to the Nipype interface "
"specs. Please check that the descriptor is correct before using it.")
return json.dumps(tool_desc, indent=4, separators=(',', ': '))
| def generate_boutiques_descriptor(
module, interface_name, container_image, container_type, container_index=None,
verbose=False, save=False, save_path=None, author=None, ignore_inputs=None, tags=None):
'''
Returns a JSON string containing a JSON Boutiques description of a Nipype interface.
Arguments:
* module: module where the Nipype interface is declared.
* interface_name: name of Nipype interface.
* container_image: name of the container image where the tool is installed
* container_type: type of container image (Docker or Singularity)
* container_index: optional index where the image is available
* verbose: print information messages
* save: True if you want to save descriptor to a file
* save_path: file path for the saved descriptor (defaults to name of the interface in current directory)
* author: author of the tool (required for publishing)
* ignore_inputs: list of interface inputs to not include in the descriptor
* tags: JSON object containing tags to include in the descriptor, e.g. "{/"key1/": /"value1/"}"
(note: the tags 'domain:neuroinformatics' and 'interface-type:nipype' are included by default)
'''
if not module:
raise Exception("Undefined module.")
# Retrieves Nipype interface
if isinstance(module, (str, bytes)):
import_module(module)
module_name = str(module)
module = sys.modules[module]
else:
module_name = str(module.__name__)
interface = getattr(module, interface_name)()
inputs = interface.input_spec()
outputs = interface.output_spec()
# Tool description
tool_desc = {}
tool_desc['name'] = interface_name
tool_desc[
'command-line'] = interface_name + " "
tool_desc['author'] = "Nipype (interface)"
if author is not None:
tool_desc['author'] = tool_desc['author'] + ", " + author + " (tool)"
tool_desc[
'description'] = interface_name + ", as implemented in Nipype (module: " + module_name + ", interface: " + interface_name + ")."
tool_desc['inputs'] = []
tool_desc['output-files'] = []
tool_desc['groups'] = []
tool_desc['tool-version'] = interface.version if interface.version is not None else "1.0.0"
tool_desc['schema-version'] = '0.5'
if container_image:
tool_desc['container-image'] = {}
tool_desc['container-image']['image'] = container_image
tool_desc['container-image']['type'] = container_type
if container_index:
tool_desc['container-image']['index'] = container_index
# Generates tool inputs
for name, spec in sorted(interface.inputs.traits(transient=None).items()):
inp = get_boutiques_input(inputs, interface, name, spec, verbose, ignore_inputs=ignore_inputs)
# Handle compound inputs (inputs that can be of multiple types and are mutually exclusive)
if inp is None:
continue
if isinstance(inp, list):
mutex_group_members = []
tool_desc['command-line'] += inp[0]['value-key'] + " "
for i in inp:
tool_desc['inputs'].append(i)
mutex_group_members.append(i['id'])
if verbose:
print("-> Adding input " + i['name'])
# Put inputs into a mutually exclusive group
tool_desc['groups'].append({'id': inp[0]['id'] + "_group",
'name': inp[0]['name'] + " group",
'members': mutex_group_members,
'mutually-exclusive': True})
else:
tool_desc['inputs'].append(inp)
tool_desc['command-line'] += inp['value-key'] + " "
if verbose:
print("-> Adding input " + inp['name'])
# Generates input groups
tool_desc['groups'] += get_boutiques_groups(interface.inputs.traits(transient=None).items())
if len(tool_desc['groups']) == 0:
del tool_desc['groups']
# Generates tool outputs
generate_tool_outputs(outputs, interface, tool_desc, verbose, True)
# Generate outputs with various different inputs to try to generate
# as many output values as possible
custom_inputs = generate_custom_inputs(tool_desc['inputs'])
for input_dict in custom_inputs:
interface = getattr(module, interface_name)(**input_dict)
outputs = interface.output_spec()
generate_tool_outputs(outputs, interface, tool_desc, verbose, False)
# Fill in all missing output paths
for output in tool_desc['output-files']:
if output['path-template'] == "":
fill_in_missing_output_path(output, output['name'], tool_desc['inputs'])
# Add tags
desc_tags = {
'domain': 'neuroinformatics',
'source': 'nipype-interface'
}
if tags is not None:
tags_dict = json.loads(tags)
for k, v in tags_dict.items():
if k in desc_tags:
if not isinstance(desc_tags[k], list):
desc_tags[k] = [desc_tags[k]]
desc_tags[k].append(v)
else:
desc_tags[k] = v
tool_desc['tags'] = desc_tags
# Check for positional arguments and reorder command line args if necessary
tool_desc['command-line'] = reorder_cmd_line_args(tool_desc['command-line'], interface, ignore_inputs)
# Remove the extra space at the end of the command line
tool_desc['command-line'] = tool_desc['command-line'].strip()
# Save descriptor to a file
if save:
path = save_path or os.path.join(os.getcwd(), interface_name + '.json')
with open(path, 'w') as outfile:
json.dump(tool_desc, outfile, indent=4, separators=(',', ': '))
if verbose:
print("-> Descriptor saved to file " + outfile.name)
print("NOTE: Descriptors produced by this script may not entirely conform to the Nipype interface "
"specs. Please check that the descriptor is correct before using it.")
return json.dumps(tool_desc, indent=4, separators=(',', ': '))
|
33,967 | def _batch_args_kwargs(
list_of_flatten_args: List[List[Any]],
) -> Tuple[Tuple[Any], Dict[Any, Any]]:
"""Batch a list of flatten args and returns regular args and kwargs"""
# Ray's flatten arg format is a list with alternating key and values
# e.g. args=(1, 2), kwargs={"key": "val"} got turned into
# [None, 1, None, 2, "key", "val"]
arg_lengths = {len(args) for args in list_of_flatten_args}
assert (
len(arg_lengths) == 1
), "All batch requests should have the same number of parameters."
arg_length = arg_lengths.pop()
batched_flatten_args = []
for idx in range(arg_length):
if idx % 2 == 0:
batched_flatten_args.append(list_of_flatten_args[0][idx])
else:
batched_flatten_args.append([item[idx] for item in list_of_flatten_args])
return recover_args(batched_flatten_args)
| def _batch_args_kwargs(
list_of_flattened_args: List[List[Any]],
) -> Tuple[Tuple[Any], Dict[Any, Any]]:
"""Batch a list of flatten args and returns regular args and kwargs"""
# Ray's flatten arg format is a list with alternating key and values
# e.g. args=(1, 2), kwargs={"key": "val"} got turned into
# [None, 1, None, 2, "key", "val"]
arg_lengths = {len(args) for args in list_of_flatten_args}
assert (
len(arg_lengths) == 1
), "All batch requests should have the same number of parameters."
arg_length = arg_lengths.pop()
batched_flatten_args = []
for idx in range(arg_length):
if idx % 2 == 0:
batched_flatten_args.append(list_of_flatten_args[0][idx])
else:
batched_flatten_args.append([item[idx] for item in list_of_flatten_args])
return recover_args(batched_flatten_args)
|
29,900 | def start_prometheus_exporter_sidecar() -> None:
port = os.environ.get("BASEPLATE_SIDECAR_ADMIN_PORT")
endpoint = Endpoint("0.0.0.0:" + port)
start_prometheus_exporter(endpoint)
| def start_prometheus_exporter_for_sidecar() -> None:
port = os.environ.get("BASEPLATE_SIDECAR_ADMIN_PORT")
endpoint = Endpoint("0.0.0.0:" + port)
start_prometheus_exporter(endpoint)
|
9,510 | def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
),
supports_check_mode=True,
)
if not HAS_HPILO:
module.fail_json(msg='The hpilo python module is required')
host = module.params['host']
login = module.params['login']
password = module.params['password']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
facts = {
'module_hw': True,
}
# TODO: Count number of CPUs, DIMMs and total memory
data = ilo.get_host_data()
for entry in data:
if 'type' not in entry:
continue
elif entry['type'] == 0: # BIOS Information
facts['hw_bios_version'] = entry['Family']
facts['hw_bios_date'] = entry['Date']
elif entry['type'] == 1: # System Information
facts['hw_uuid'] = entry['UUID']
facts['hw_system_serial'] = entry['Serial Number'].rstrip()
facts['hw_product_name'] = entry['Product Name']
facts['hw_product_uuid'] = entry['cUUID']
elif entry['type'] == 209: # Embedded NIC MAC Assignment
if 'fields' in entry:
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_eth' + str(int(value) - 1)
except Exception:
factname = 'hw_eth_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
else:
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_iscsi' + str(int(value) - 1)
except Exception:
factname = 'hw_iscsi_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
# Collect health (RAM/CPU data)
health = ilo.get_embedded_health()
facts['hw_health'] = health
memory_details_summary = health.get('memory', {}).get('memory_details_summary')
# RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
if memory_details_summary:
facts['hw_memory_details_summary'] = memory_details_summary
facts['hw_memory_total'] = 0
for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size:
ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
if ram:
if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))
# reformat into a text friendly format
facts['hw_memory_total'] = "{0} GB".format(facts['hw_memory_total'])
power = ilo.get_host_power_status()
facts['hw_power_status'] = power
module.exit_json(ansible_facts=facts)
| def main():
module = AnsibleModule(
argument_spec=dict(
host=dict(type='str', required=True),
login=dict(type='str', default='Administrator'),
password=dict(type='str', default='admin', no_log=True),
ssl_version=dict(type='str', default='TLSv1', choices=['SSLv3', 'SSLv23', 'TLSv1', 'TLSv1_1', 'TLSv1_2']),
),
supports_check_mode=True,
)
if not HAS_HPILO:
module.fail_json(msg='The hpilo python module is required')
host = module.params['host']
login = module.params['login']
password = module.params['password']
ssl_version = getattr(hpilo.ssl, 'PROTOCOL_' + module.params.get('ssl_version').upper().replace('V', 'v'))
ilo = hpilo.Ilo(host, login=login, password=password, ssl_version=ssl_version)
facts = {
'module_hw': True,
}
# TODO: Count number of CPUs, DIMMs and total memory
data = ilo.get_host_data()
for entry in data:
if 'type' not in entry:
continue
elif entry['type'] == 0: # BIOS Information
facts['hw_bios_version'] = entry['Family']
facts['hw_bios_date'] = entry['Date']
elif entry['type'] == 1: # System Information
facts['hw_uuid'] = entry['UUID']
facts['hw_system_serial'] = entry['Serial Number'].rstrip()
facts['hw_product_name'] = entry['Product Name']
facts['hw_product_uuid'] = entry['cUUID']
elif entry['type'] == 209: # Embedded NIC MAC Assignment
if 'fields' in entry:
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_eth' + str(int(value) - 1)
except Exception:
factname = 'hw_eth_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
else:
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
elif entry['type'] == 209: # HPQ NIC iSCSI MAC Info
for (name, value) in [(e['name'], e['value']) for e in entry['fields']]:
if name.startswith('Port'):
try:
factname = 'hw_iscsi' + str(int(value) - 1)
except Exception:
factname = 'hw_iscsi_ilo'
elif name.startswith('MAC'):
facts[factname] = {
'macaddress': value.replace('-', ':'),
'macaddress_dash': value
}
elif entry['type'] == 233: # Embedded NIC MAC Assignment (Alternate data format)
(factname, entry_facts) = parse_flat_interface(entry, 'hw_eth_ilo')
facts[factname] = entry_facts
# Collect health (RAM/CPU data)
health = ilo.get_embedded_health()
facts['hw_health'] = health
memory_details_summary = health.get('memory', {}).get('memory_details_summary')
# RAM as reported by iLO 2.10 on ProLiant BL460c Gen8
if memory_details_summary:
facts['hw_memory_details_summary'] = memory_details_summary
facts['hw_memory_total'] = 0
for cpu, details in memory_details_summary.items():
cpu_total_memory_size = details.get('total_memory_size')
if cpu_total_memory_size:
ram = re.search(r'(\d+)\s+(\w+)', cpu_total_memory_size)
if ram:
if ram.group(2) == 'GB':
facts['hw_memory_total'] = facts['hw_memory_total'] + int(ram.group(1))
# reformat into a text friendly format
facts['hw_memory_total'] = "{0} GB".format(facts['hw_memory_total'])
facts['hw_power_status'] = ilo.get_host_power_status()
facts['hw_power_status'] = power
module.exit_json(ansible_facts=facts)
|
13,523 | def parse_value_name_collumn(value_name, value, signal_size, float_factory):
mini = maxi = offset = None
value_table = dict()
if ".." in value_name:
(mini, maxi) = value_name.strip().split("..", 2)
mini = float_factory(mini)
maxi = float_factory(maxi)
offset = mini
elif value_name.__len__() > 0:
if value.strip().__len__() > 0:
# Value Table
value = int(float(value))
value_table[value] = value_name
maxi = pow(2, signal_size) - 1
maxi = float_factory(maxi)
mini = 0
offset = 1
return mini, maxi, offset, value_table
| def parse_value_name_column(value_name, value, signal_size, float_factory):
mini = maxi = offset = None
value_table = dict()
if ".." in value_name:
(mini, maxi) = value_name.strip().split("..", 2)
mini = float_factory(mini)
maxi = float_factory(maxi)
offset = mini
elif value_name.__len__() > 0:
if value.strip().__len__() > 0:
# Value Table
value = int(float(value))
value_table[value] = value_name
maxi = pow(2, signal_size) - 1
maxi = float_factory(maxi)
mini = 0
offset = 1
return mini, maxi, offset, value_table
|
34,056 | def record_extra_usage_tag(key: str, value: str):
"""Record extra kv usage tag.
If the key already exists, the value will be overwritten.
Caller should make sure the uniqueness of the key to avoid conflicts.
It will make a synchronous call to the internal kv store.
"""
if _recorded_extra_usage_tags.get(key) == value:
return
_recorded_extra_usage_tags[key] = value
if not _internal_kv_initialized():
# This happens if the record is before ray.init
return
_put_extra_usage_tag(key, value)
| def record_extra_usage_tag(key: str, value: str):
"""Record extra kv usage tag.
If the key already exists, the value will be overwritten.
Caller should make sure the uniqueness of the key to avoid conflicts.
It will make a synchronous call to the internal kv store if the tag is updated.
"""
if _recorded_extra_usage_tags.get(key) == value:
return
_recorded_extra_usage_tags[key] = value
if not _internal_kv_initialized():
# This happens if the record is before ray.init
return
_put_extra_usage_tag(key, value)
|
45,307 | def train(
params: Dict,
dtrain: DMatrix,
*args,
evals=(),
num_actors: Optional[int] = None,
evals_result: Optional[Dict] = None,
**kwargs,
):
"""Run distributed training of XGBoost model.
During work it evenly distributes `dtrain` between workers according
to IP addresses partitions (in case of not evenly distribution of `dtrain`
is possible by IPs, part of partitions will be re-distributed between nodes),
runs xgb.train on each worker for subset of `dtrain` and reduce training results
of each worker using Rabit Context.
Parameters
----------
params : dict
Booster params.
dtrain : modin.experimental.DMatrix
Data to be trained against.
*args : iterable
Other parameters for `xgboost.train`.
evals: list of pairs (modin.experimental.DMatrix, string), optional. Default is empty
List of validation sets for which metrics will evaluated during training.
Validation metrics will help us track the performance of the model.
num_actors : int, optional. Default is None
Number of actors for training. If it's None, this value will be
computed automatically.
evals_result : dict, optional. Default is None
Dict to store evaluation results in.
**kwargs : dict
Other parameters are the same as `xgboost.train`.
Returns
-------
modin.experimental.xgboost.Booster
A trained booster.
"""
LOGGER.info("Training started")
if Engine.get() == "Ray":
from .xgboost_ray import _train
else:
raise ValueError("Current version supports only Ray engine.")
assert isinstance(
dtrain, DMatrix
), f"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}."
result = _train(dtrain, num_actors, params, *args, evals=evals, **kwargs)
if isinstance(evals_result, dict):
evals_result.update(result["history"])
LOGGER.info("Training finished")
return Booster(model_file=result["booster"])
| def train(
params: Dict,
dtrain: DMatrix,
*args,
evals=(),
num_actors: Optional[int] = None,
evals_result: Optional[Dict] = None,
**kwargs,
):
"""Run distributed training of XGBoost model.
During work it evenly distributes `dtrain` between workers according
to IP addresses partitions (in case of not evenly distribution of `dtrain`
is possible by IPs, part of partitions will be re-distributed between nodes),
runs xgb.train on each worker for subset of `dtrain` and reduce training results
of each worker using Rabit Context.
Parameters
----------
params : dict
Booster params.
dtrain : modin.experimental.DMatrix
Data to be trained against.
*args : iterable
Other parameters for `xgboost.train`.
evals: list of pairs (modin.experimental.xgboost.DMatrix, str), optional. Default is empty
List of validation sets for which metrics will evaluated during training.
Validation metrics will help us track the performance of the model.
num_actors : int, optional. Default is None
Number of actors for training. If it's None, this value will be
computed automatically.
evals_result : dict, optional. Default is None
Dict to store evaluation results in.
**kwargs : dict
Other parameters are the same as `xgboost.train`.
Returns
-------
modin.experimental.xgboost.Booster
A trained booster.
"""
LOGGER.info("Training started")
if Engine.get() == "Ray":
from .xgboost_ray import _train
else:
raise ValueError("Current version supports only Ray engine.")
assert isinstance(
dtrain, DMatrix
), f"Type of `dtrain` is {type(dtrain)}, but expected {DMatrix}."
result = _train(dtrain, num_actors, params, *args, evals=evals, **kwargs)
if isinstance(evals_result, dict):
evals_result.update(result["history"])
LOGGER.info("Training finished")
return Booster(model_file=result["booster"])
|
30,589 | def parse_url(item: str) -> str:
""" Parse url if in url form to valid EDL form - withouth http / https
Args:
item(str): Item to parse.
Returns:
str: parsed item, if URL returned without http / https
Examples:
>>> parse_url('http://google.com')
'google.com'
>>> parse_url('https://google.com')
'google.com'
>>> parse_url('https://google.com/hello_world')
'google.com/hello_world'
>>> parse_url('not url')
'google.com/hello_world'
"""
try:
url_obj: ParseResult = urlparse(item)
return url_obj.netloc + url_obj.path
except ValueError:
return item
| def parse_url(item: str) -> str:
""" Parse url if in url form to valid EDL form - without http / https
Args:
item(str): Item to parse.
Returns:
str: parsed item, if URL returned without http / https
Examples:
>>> parse_url('http://google.com')
'google.com'
>>> parse_url('https://google.com')
'google.com'
>>> parse_url('https://google.com/hello_world')
'google.com/hello_world'
>>> parse_url('not url')
'google.com/hello_world'
"""
try:
url_obj: ParseResult = urlparse(item)
return url_obj.netloc + url_obj.path
except ValueError:
return item
|
17,194 | def _convert_states(states: dict[str, Any]) -> dict[str, Any]:
"""Convert state definitions to State objects."""
result = {}
for entity_id, info in states.items():
entity_id = cv.entity_id(entity_id)
if isinstance(info, dict):
entity_attrs = info.copy()
state = entity_attrs.pop(ATTR_STATE, None)
attributes = entity_attrs
else:
state = info
attributes = {}
# YAML translates 'on' to a boolean
# http://yaml.org/type/bool.html
if isinstance(state, bool):
state = STATE_ON if state else STATE_OFF
elif not isinstance(state, str):
raise vol.Invalid(f"State for {entity_id} should be a string")
result[entity_id] = State(entity_id, state, attributes)
return result
| def _convert_states(states: dict[str, Any]) -> dict[str, State]:
"""Convert state definitions to State objects."""
result = {}
for entity_id, info in states.items():
entity_id = cv.entity_id(entity_id)
if isinstance(info, dict):
entity_attrs = info.copy()
state = entity_attrs.pop(ATTR_STATE, None)
attributes = entity_attrs
else:
state = info
attributes = {}
# YAML translates 'on' to a boolean
# http://yaml.org/type/bool.html
if isinstance(state, bool):
state = STATE_ON if state else STATE_OFF
elif not isinstance(state, str):
raise vol.Invalid(f"State for {entity_id} should be a string")
result[entity_id] = State(entity_id, state, attributes)
return result
|
30,339 | def get_group_tags():
"""
Retrieve the Tags for a Group
"""
group_type = demisto.args().get('group_type')
group_id = int(demisto.args().get('group_id'))
contents = []
context_entries = []
response = get_group_tags_request(group_type, group_id)
data = response.get('data', {}).get('tag', [])
if response.get('status') == 'Success':
for tags in data:
contents.append({
'Name': tags.get('name')
})
context_entries.append({
'ID': group_id,
'Name': tags.get('name')
})
else:
return_error(response.get('message'))
context = {
'TC.Group(val.Name && val.Name === obj.Name)': context_entries
}
return_outputs(
tableToMarkdown('Group tags', contents, removeNull=True),
context
)
| def get_group_tags():
"""
Retrieve the Tags for a Group
"""
group_type = demisto.args().get('group_type')
group_id = int(demisto.args().get('group_id'))
contents = []
context_entries = []
response = get_group_tags_request(group_type, group_id)
data = response.get('data', {}).get('tag', [])
if response.get('status') == 'Success':
for tags in data:
contents.append({
'Name': tags.get('name')
})
context_entries.append({
'ID': group_id,
'Name': tags.get('name')
})
else:
return_error(response.get('message'))
context = {
'TC.Group(val.Name && val.Name === obj.Name)': context_entries
}
return_outputs(
tableToMarkdown('ThreatConnect Group Tags', contents, removeNull=True),
context
)
|
24,517 | def main(
input_d, output_d, reject_d, extension=None, fheight=500, fwidth=500, facePercent=50, no_resize=False,
):
"""Crops folder of images to the desired height and width if a
face is found.
If `input_d == output_d` or `output_d is None`, overwrites all files
where the biggest face was found.
Parameters:
-----------
- `input_d`: `str`
* Directory to crop images from.
- `output_d`: `str`
* Directory where cropped images are placed.
- `reject_d`: `str`
* Directory where images that cannot be cropped are placed.
- `fheight`: `int`, default=`500`
* Height (px) to which to crop the image.
- `fwidth`: `int`, default=`500`
* Width (px) to which to crop the image.
- `facePercent`: `int`, default=`50`
* Percentage of face from height.
- `extension` : `str`
* Image extension to save at output.
Side Effects:
-------------
- Creates image files in output directory.
Type Signature:
---------------
`str, str, (int), (int) -> None`
"""
reject_count = 0
output_count = 0
input_files = [
os.path.join(input_d, f)
for f in os.listdir(input_d)
if any(f.endswith(t) for t in INPUT_FILETYPES)
]
if output_d is None:
output_d = input_d
if reject_d is None and output_d is None:
reject_d = input_d
if reject_d is None:
reject_d = output_d
# Guard against calling the function directly
input_count = len(input_files)
assert input_count > 0
# Main loop
"""
Check if the no resize flag is provided or not.
"""
if no_resize is True:
cropper = Cropper(width=fwidth, height=fheight, face_percent=facePercent, no_resize=True)
else:
cropper = Cropper(width=fwidth, height=fheight, face_percent=facePercent)
for input_filename in input_files:
basename = os.path.basename(input_filename)
if extension:
basename_noext = os.path.splitext(basename)[0]
output_filename = os.path.join(output_d, basename_noext + "." + extension)
else:
output_filename = os.path.join(output_d, basename)
reject_filename = os.path.join(reject_d, basename)
image = None
# Attempt the crop
try:
image = cropper.crop(input_filename)
except ImageReadError:
print("Read error: {}".format(input_filename))
continue
# Did the crop produce an invalid image?
if isinstance(image, type(None)):
reject(input_filename, reject_filename)
print("No face detected: {}".format(reject_filename))
reject_count += 1
else:
output(input_filename, output_filename, image)
print("Face detected: {}".format(output_filename))
output_count += 1
# Stop and print status
print(
f"{input_count} : Input files, {output_count} : Faces Cropped, {reject_count} : Rejected"
)
| def main(
input_d, output_d, reject_d, extension=None, fheight=500, fwidth=500, facePercent=50, no_resize=False,
):
"""Crops folder of images to the desired height and width if a
face is found.
If `input_d == output_d` or `output_d is None`, overwrites all files
where the biggest face was found.
Parameters:
-----------
- `input_d`: `str`
* Directory to crop images from.
- `output_d`: `str`
* Directory where cropped images are placed.
- `reject_d`: `str`
* Directory where images that cannot be cropped are placed.
- `fheight`: `int`, default=`500`
* Height (px) to which to crop the image.
- `fwidth`: `int`, default=`500`
* Width (px) to which to crop the image.
- `facePercent`: `int`, default=`50`
* Percentage of face from height.
- `extension` : `str`
* Image extension to save at output.
Side Effects:
-------------
- Creates image files in output directory.
Type Signature:
---------------
`str, str, (int), (int) -> None`
"""
reject_count = 0
output_count = 0
input_files = [
os.path.join(input_d, f)
for f in os.listdir(input_d)
if any(f.endswith(t) for t in INPUT_FILETYPES)
]
if output_d is None:
output_d = input_d
if reject_d is None and output_d is None:
reject_d = input_d
if reject_d is None:
reject_d = output_d
# Guard against calling the function directly
input_count = len(input_files)
assert input_count > 0
# Main loop
"""
Check if the no resize flag is provided or not.
"""
cropper = Cropper(width=fwidth, height=fheight, face_percent=facePercent, no_resize=no_resize)
for input_filename in input_files:
basename = os.path.basename(input_filename)
if extension:
basename_noext = os.path.splitext(basename)[0]
output_filename = os.path.join(output_d, basename_noext + "." + extension)
else:
output_filename = os.path.join(output_d, basename)
reject_filename = os.path.join(reject_d, basename)
image = None
# Attempt the crop
try:
image = cropper.crop(input_filename)
except ImageReadError:
print("Read error: {}".format(input_filename))
continue
# Did the crop produce an invalid image?
if isinstance(image, type(None)):
reject(input_filename, reject_filename)
print("No face detected: {}".format(reject_filename))
reject_count += 1
else:
output(input_filename, output_filename, image)
print("Face detected: {}".format(output_filename))
output_count += 1
# Stop and print status
print(
f"{input_count} : Input files, {output_count} : Faces Cropped, {reject_count} : Rejected"
)
|
56,668 | def get_sponsored_books():
"""Performs the `ia` query to fetch sponsored books from archive.org"""
from internetarchive import search_items
params = {'page': 1, 'rows': 1000, 'scope': 'all'}
fields = ['identifier','est_book_price','est_scan_price', 'scan_price',
'book_price', 'repub_state', 'imagecount', 'title', 'donor',
'openlibrary_edition', 'publicdate', 'collection', 'isbn']
q = 'collection:openlibraryscanningteam'
# XXX Note: This `search_items` query requires the `ia` tool (the
# one installed via virtualenv) to be configured with (scope:all)
# privileged s3 keys.
config = {'general': {'secure': False}}
return [item for item in search_items(q, fields=fields, params=params, config=config) if
not (item.get('repub_state') == '-1' and item.get('donor') in ("@alvin_wellington", "@gojust538"))
]
| def get_sponsored_books():
"""Performs the `ia` query to fetch sponsored books from archive.org"""
from internetarchive import search_items
params = {'page': 1, 'rows': 1000, 'scope': 'all'}
fields = ['identifier','est_book_price','est_scan_price', 'scan_price',
'book_price', 'repub_state', 'imagecount', 'title', 'donor',
'openlibrary_edition', 'publicdate', 'collection', 'isbn']
q = 'collection:openlibraryscanningteam'
# XXX Note: This `search_items` query requires the `ia` tool (the
# one installed via virtualenv) to be configured with (scope:all)
# privileged s3 keys.
config = {'general': {'secure': False}}
return [item for item in search_items(q, fields=fields, params=params, config=config) if
not (item.get('repub_state') == '-1' and item.get('donor') in ("@alvin_wellington", "@gojust538"))
]
|
35,101 | def test_load_late_bound_consts_with_no_late_bound_consts():
"""Check that load_late_bound_consts handles a model with no late bound consts."""
target = tvm.target.Target("llvm")
const_data = np.random.rand(1).astype("float64")
x = relay.var("x", shape=(1,), dtype="float64")
const = relay.const(const_data, dtype="float64")
func = relay.Function([x], relay.op.add(x, const))
mod = tvm.IRModule.from_expr(func)
vm_exec = vm.compile(mod, target=target)
temp = utils.tempdir()
path_consts = temp.relpath("consts")
path_dso = temp.relpath("lib.so")
# Ensure const_data is not above the byte threshold for a late-bound const.
byte_limit = len(const_data.tobytes()) + 1
vm_exec.move_late_bound_consts(path_consts, byte_limit=byte_limit)
vm_exec.mod.export_library(path_dso)
mod = runtime.load_module(path_dso)
mod["load_late_bound_consts"](path_consts)
| def test_load_late_bound_consts_with_no_late_bound_consts():
"""Check that load_late_bound_consts handles a model with no late bound consts."""
target = tvm.target.Target("llvm")
const_data = np.random.rand(1).astype("float64")
x = relay.var("x", shape=(1,), dtype="float64")
const = relay.const(const_data, dtype="float64")
func = relay.Function([x], relay.op.add(x, const))
mod = tvm.IRModule.from_expr(func)
vm_exec = vm.compile(mod, target=target)
temp = utils.tempdir()
path_consts = temp.relpath("consts")
path_dso = temp.relpath("lib.so")
# Ensure const_data is below the byte threshold for a late-bound const.
byte_limit = len(const_data.tobytes()) + 1
vm_exec.move_late_bound_consts(path_consts, byte_limit=byte_limit)
vm_exec.mod.export_library(path_dso)
mod = runtime.load_module(path_dso)
mod["load_late_bound_consts"](path_consts)
|
37,813 | def execute_cmd(
docker: DockerContainer,
cmd_str: str,
before_build: bool,
target_arch: str,
env: Optional[Dict[str, str]] = None,
) -> None:
invalid_cmd = False
pip_install_env_create = True
tmpdirpath = ""
assert env is not None
target_arch_env = TargetArchEnvUtil(env.get("CROSS_ROOT"), target_arch)
cmds = [cmd.strip().replace("\t", " ") for cmd in cmd_str.split("&&")]
# Copy install_deps.sh script from container's tmp to host machine tmp and use it
if not os.path.isfile(target_arch_env.tmp + "/install_deps.sh"):
docker.call(
[
"cp",
target_arch_env.tmp + "/install_deps.sh",
target_arch_env.host_machine_tmp_in_container,
]
)
for cmd in cmds:
if cmd.startswith("yum "):
# Install the dependencies into the emulated docker container and
# Copy back the installed files into host machine
print(
"\nRunning cmd: '"
+ cmd
+ "' in target's native container '"
+ native_docker_images[target_arch]
+ "' and copy the artifacts into the toolchain\n"
)
subprocess.run(
[
"docker",
"run",
"--rm",
"--volume=/:/host", # ignored on CircleCI
native_docker_images[target_arch],
"bash",
"-c",
target_arch_env.host_machine_tmp_in_container
+ '/install_deps.sh "'
+ cmd
+ '"',
],
check=True,
)
# The instaleld dependencies are in /tmp/install_deps on host machine.
# Copy them into the toolchain
dir_list = os.listdir(target_arch_env.host_machine_deps_usr_out_container)
for dir in dir_list:
docker.call(
[
"cp",
"-rf",
target_arch_env.host_machine_deps_usr_in_container + "/" + dir,
target_arch_env.toolchain_deps,
]
)
elif cmd.startswith("pip ") or cmd.startswith("python ") or cmd.startswith("python3 "):
if pip_install_env_create is True and before_build is True:
tmpdirpath = docker.call(["mktemp", "-d"], capture_output=True).strip()
# Adding temp directory in PATH
env_path_var = env.get("PATH")
env_path_var = f"{tmpdirpath}:{env_path_var}"
temp_dict = {"PATH": env_path_var}
env.update(temp_dict)
build_pip = docker.call(
["which", "build-pip"], env=env, capture_output=True
).strip()
build_pybin = build_pip[: build_pip.rindex("/")]
docker.call(["ln", "-s", build_pip, tmpdirpath + "/pip"], env=env)
docker.call(
["ln", "-s", build_pybin + "/build-pip3", tmpdirpath + "/pip3"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python", tmpdirpath + "/python"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python3", tmpdirpath + "/python3"], env=env
)
pip_install_env_create = False
docker.call(["sh", "-c", cmd], env=env)
else:
print(
"During cross compilation, in wheel build phase, only pip/python/yum related commands are allowed"
)
invalid_cmd = True
break
docker.call(["rm", "-rf", tmpdirpath])
if invalid_cmd is True:
sys.exit(1)
| def execute_cmd(
docker: DockerContainer,
cmd_str: str,
before_build: bool,
target_arch: str,
env: Optional[Dict[str, str]] = None,
) -> None:
invalid_cmd = False
pip_install_env_create = True
tmpdirpath = ""
assert env is not None
target_arch_env = TargetArchEnvUtil(env.get("CROSS_ROOT"), target_arch)
cmds = [cmd.strip().replace("\t", " ") for cmd in cmd_str.split("&&")]
# Copy install_deps.sh script from container's tmp to host machine tmp and use it
if not os.path.isfile(target_arch_env.tmp + "/install_deps.sh"):
docker.call(
[
"cp",
target_arch_env.tmp + "/install_deps.sh",
target_arch_env.host_machine_tmp_in_container,
]
)
for cmd in cmds:
if cmd.startswith("yum "):
# Install the dependencies into the emulated docker container and
# Copy back the installed files into host machine
print(
"\nRunning cmd: '"
+ cmd
+ "' in target's native container '"
+ native_docker_images[target_arch]
+ "' and copy the artifacts into the toolchain\n"
)
subprocess.run(
[
"docker",
"run",
"--rm",
"--volume=/:/host", # ignored on CircleCI
native_docker_images[target_arch],
"bash",
"-c",
target_arch_env.host_machine_tmp_in_container
+ '/install_deps.sh "'
+ cmd
+ '"',
],
check=True,
)
# The instaleld dependencies are in /tmp/install_deps on host machine.
# Copy them into the toolchain
dir_list = os.listdir(target_arch_env.host_machine_deps_usr_out_container)
for dir in dir_list:
docker.call(
[
"cp",
"-rf",
f"{target_arch_env.host_machine_deps_usr_in_container}/{dir}",
target_arch_env.toolchain_deps,
]
)
elif cmd.startswith("pip ") or cmd.startswith("python ") or cmd.startswith("python3 "):
if pip_install_env_create is True and before_build is True:
tmpdirpath = docker.call(["mktemp", "-d"], capture_output=True).strip()
# Adding temp directory in PATH
env_path_var = env.get("PATH")
env_path_var = f"{tmpdirpath}:{env_path_var}"
temp_dict = {"PATH": env_path_var}
env.update(temp_dict)
build_pip = docker.call(
["which", "build-pip"], env=env, capture_output=True
).strip()
build_pybin = build_pip[: build_pip.rindex("/")]
docker.call(["ln", "-s", build_pip, tmpdirpath + "/pip"], env=env)
docker.call(
["ln", "-s", build_pybin + "/build-pip3", tmpdirpath + "/pip3"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python", tmpdirpath + "/python"], env=env
)
docker.call(
["ln", "-s", build_pybin + "/build-python3", tmpdirpath + "/python3"], env=env
)
pip_install_env_create = False
docker.call(["sh", "-c", cmd], env=env)
else:
print(
"During cross compilation, in wheel build phase, only pip/python/yum related commands are allowed"
)
invalid_cmd = True
break
docker.call(["rm", "-rf", tmpdirpath])
if invalid_cmd is True:
sys.exit(1)
|
48,783 | def _run_task_by_executor(args, dag, ti):
"""
Sends the task to the executor for execution. This can result in the task being started by another host
if the executor implementation does
"""
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
with create_session() as session:
pickle = DagPickle(dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
# TODO: This should be written to a log
print(f'Pickled dag {dag} as pickle_id: {pickle_id}')
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
executor.heartbeat()
executor.end()
| def _run_task_by_executor(args, dag, ti):
"""
Sends the task to the executor for execution. This can result in the task being started by another host
if the executor implementation does
"""
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
with create_session() as session:
pickle = DagPickle(dag)
session.add(pickle)
pickle_id = pickle.id
# TODO: This should be written to a log
print(f'Pickled dag {dag} as pickle_id: {pickle_id}')
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
executor.heartbeat()
executor.end()
|
33,986 | def _inject_ray_to_conda_site(
conda_path, logger: Optional[logging.Logger] = default_logger
):
"""Write the current Ray site package directory to a new site"""
if _WIN32:
python_binary = os.path.join(conda_path, "python")
else:
python_binary = os.path.join(conda_path, "bin/python")
site_packages_path = (
subprocess.check_output(
[
python_binary,
"-c",
"import sysconfig; print(sysconfig.get_paths()['purelib'])",
]
)
.decode()
.strip()
)
ray_path = sysconfig.get_paths()["purelib"]
logger.warning(
f"Injecting {ray_path} to environment site-packages {site_packages_path} "
"because _inject_current_ray flag is on."
)
maybe_ray_dir = os.path.join(site_packages_path, "ray")
if os.path.isdir(maybe_ray_dir):
logger.warning(f"Replacing existing ray installation with {ray_path}")
shutil.rmtree(maybe_ray_dir)
# See usage of *.pth file at
# https://docs.python.org/3/library/site.html
with open(os.path.join(site_packages_path, "ray_shared.pth"), "w") as f:
f.write(ray_path)
if _resolve_current_ray_path() != ray_path:
# a `pip install .` ?
f.write("\n")
f.write(_resolve_current_ray_path())
| def _inject_ray_to_conda_site(
conda_path, logger: Optional[logging.Logger] = default_logger
):
"""Write the current Ray site package directory to a new site"""
if _WIN32:
python_binary = os.path.join(conda_path, "python")
else:
python_binary = os.path.join(conda_path, "bin/python")
site_packages_path = (
subprocess.check_output(
[
python_binary,
"-c",
"import sysconfig; print(sysconfig.get_paths()['purelib'])",
]
)
.decode()
.strip()
)
ray_path = sysconfig.get_paths()["purelib"]
logger.warning(
f"Injecting {ray_path} to environment site-packages {site_packages_path} "
"because _inject_current_ray flag is on."
)
maybe_ray_dir = os.path.join(site_packages_path, "ray")
if os.path.isdir(maybe_ray_dir):
logger.warning(f"Replacing existing ray installation with {ray_path}")
shutil.rmtree(maybe_ray_dir)
# See usage of *.pth file at
# https://docs.python.org/3/library/site.html
with open(os.path.join(site_packages_path, "ray_shared.pth"), "w") as f:
f.write(ray_path)
if _resolve_current_ray_path() != ray_path:
# a `pip install -e .` hits this path
f.write("\n")
f.write(_resolve_current_ray_path())
|
57,373 | def _update_yaml_with_extracted_metadata(
config_data: Dict[str, Any],
parts_config: project_loader.PartsConfig,
prime_dir: str,
) -> extractors.ExtractedMetadata:
if "adopt-info" in config_data:
part_name = config_data["adopt-info"]
part = parts_config.get_part(part_name)
if not part:
raise meta_errors.AdoptedPartMissingError(part_name)
pull_state = part.get_pull_state()
build_state = part.get_build_state()
stage_state = part.get_stage_state()
prime_state = part.get_prime_state()
# Get the metadata from the pull step first.
metadata = pull_state.extracted_metadata["metadata"]
# Now update it using the metadata from the build step (i.e. the data
# from the build step takes precedence over the pull step).
metadata.update(build_state.extracted_metadata["metadata"])
# Now make sure any scriptlet data are taken into account. Later steps
# take precedence, and scriptlet data (even in earlier steps) take
# precedence over extracted data.
metadata.update(pull_state.scriptlet_metadata)
metadata.update(build_state.scriptlet_metadata)
metadata.update(stage_state.scriptlet_metadata)
metadata.update(prime_state.scriptlet_metadata)
if not metadata:
# If we didn't end up with any metadata, let's ensure this part was
# actually supposed to parse info. If not, let's try to be very
# clear about what's happening, here. We do this after checking for
# metadata because metadata could be supplied by scriptlets, too.
if "parse-info" not in config_data["parts"][part_name]:
raise meta_errors.AdoptedPartNotParsingInfo(part_name)
_adopt_info(config_data, metadata, prime_dir)
return metadata
return None
| def _update_yaml_with_extracted_metadata(
config_data: Dict[str, Any],
parts_config: project_loader.PartsConfig,
prime_dir: str,
) -> extractors.ExtractedMetadata:
if "adopt-info" not in config_data:
return None
part_name = config_data["adopt-info"]
part = parts_config.get_part(part_name)
if not part:
raise meta_errors.AdoptedPartMissingError(part_name)
pull_state = part.get_pull_state()
build_state = part.get_build_state()
stage_state = part.get_stage_state()
prime_state = part.get_prime_state()
# Get the metadata from the pull step first.
metadata = pull_state.extracted_metadata["metadata"]
# Now update it using the metadata from the build step (i.e. the data
# from the build step takes precedence over the pull step).
metadata.update(build_state.extracted_metadata["metadata"])
# Now make sure any scriptlet data are taken into account. Later steps
# take precedence, and scriptlet data (even in earlier steps) take
# precedence over extracted data.
metadata.update(pull_state.scriptlet_metadata)
metadata.update(build_state.scriptlet_metadata)
metadata.update(stage_state.scriptlet_metadata)
metadata.update(prime_state.scriptlet_metadata)
if not metadata:
# If we didn't end up with any metadata, let's ensure this part was
# actually supposed to parse info. If not, let's try to be very
# clear about what's happening, here. We do this after checking for
# metadata because metadata could be supplied by scriptlets, too.
if "parse-info" not in config_data["parts"][part_name]:
raise meta_errors.AdoptedPartNotParsingInfo(part_name)
_adopt_info(config_data, metadata, prime_dir)
return metadata
return None
|
44,839 | def _add_code_to_system_path(code_path):
sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
# delete code modules that are already in sys.modules
# so they will get reloaded anew from the correct code path
# othterwise python will use the alredy loaded modules
modules = []
for path in [code_path] + _get_code_dirs(code_path):
modules_py = glob.glob(join(path, "*.py"))
modules += [ basename(f)[:-3] for f in modules_py if isfile(f) and not f.endswith('__init__.py')]
for module in modules:
if module in sys.modules:
sys.modules.pop(module)
| def _add_code_to_system_path(code_path):
sys.path = [code_path] + _get_code_dirs(code_path) + sys.path
# delete code modules that are already in sys.modules
# so they will get reloaded anew from the correct code path
# otherwise python will use the already loaded modules
modules = []
for path in [code_path] + _get_code_dirs(code_path):
modules_py = glob.glob(join(path, "*.py"))
modules += [ basename(f)[:-3] for f in modules_py if isfile(f) and not f.endswith('__init__.py')]
for module in modules:
if module in sys.modules:
sys.modules.pop(module)
|
4,214 | def test_read_write_info(tmpdir):
"""Test IO of info."""
info = read_info(raw_fname)
temp_file = str(tmpdir.join('info.fif'))
# check for bug `#1198`
info['dev_head_t']['trans'] = np.eye(4)
t1 = info['dev_head_t']['trans']
write_info(temp_file, info)
info2 = read_info(temp_file)
t2 = info2['dev_head_t']['trans']
assert (len(info['chs']) == len(info2['chs']))
assert_array_equal(t1, t2)
# proc_history (e.g., GH#1875)
creator = u'é'
info = read_info(chpi_fname)
info['proc_history'][0]['creator'] = creator
info['hpi_meas'][0]['creator'] = creator
info['subject_info']['his_id'] = creator
info['subject_info']['weight'] = 11.1
info['subject_info']['height'] = 2.3
if info['gantry_angle'] is None: # future testing data may include it
info['gantry_angle'] = 0. # Elekta supine position
gantry_angle = info['gantry_angle']
meas_id = info['meas_id']
write_info(temp_file, info)
info = read_info(temp_file)
assert info['proc_history'][0]['creator'] == creator
assert info['hpi_meas'][0]['creator'] == creator
assert info['subject_info']['his_id'] == creator
assert info['gantry_angle'] == gantry_angle
assert info['subject_info']['height'] == 2.3
assert info['subject_info']['weight'] == 11.1
for key in ['secs', 'usecs', 'version']:
assert info['meas_id'][key] == meas_id[key]
assert_array_equal(info['meas_id']['machid'], meas_id['machid'])
# Test that writing twice produces the same file
m1 = hashlib.md5()
with open(temp_file, 'rb') as fid:
m1.update(fid.read())
m1 = m1.hexdigest()
temp_file_2 = tmpdir.join('info2.fif')
assert temp_file_2 != temp_file
write_info(temp_file_2, info)
m2 = hashlib.md5()
with open(str(temp_file_2), 'rb') as fid:
m2.update(fid.read())
m2 = m2.hexdigest()
assert m1 == m2
# check for bug 7067
info = read_info(raw_fname)
info['meas_date'] = None
tmp_fname_3 = tmpdir.join('info3.fif')
write_info(tmp_fname_3, info)
info2 = read_info(tmp_fname_3)
assert info2['meas_date'] is None
| def test_read_write_info(tmpdir):
"""Test IO of info."""
info = read_info(raw_fname)
temp_file = str(tmpdir.join('info.fif'))
# check for bug `#1198`
info['dev_head_t']['trans'] = np.eye(4)
t1 = info['dev_head_t']['trans']
write_info(temp_file, info)
info2 = read_info(temp_file)
t2 = info2['dev_head_t']['trans']
assert (len(info['chs']) == len(info2['chs']))
assert_array_equal(t1, t2)
# proc_history (e.g., GH#1875)
creator = u'é'
info = read_info(chpi_fname)
info['proc_history'][0]['creator'] = creator
info['hpi_meas'][0]['creator'] = creator
info['subject_info']['his_id'] = creator
info['subject_info']['weight'] = 11.1
info['subject_info']['height'] = 2.3
if info['gantry_angle'] is None: # future testing data may include it
info['gantry_angle'] = 0. # Elekta supine position
gantry_angle = info['gantry_angle']
meas_id = info['meas_id']
write_info(temp_file, info)
info = read_info(temp_file)
assert info['proc_history'][0]['creator'] == creator
assert info['hpi_meas'][0]['creator'] == creator
assert info['subject_info']['his_id'] == creator
assert info['gantry_angle'] == gantry_angle
assert info['subject_info']['height'] == 2.3
assert info['subject_info']['weight'] == 11.1
for key in ['secs', 'usecs', 'version']:
assert info['meas_id'][key] == meas_id[key]
assert_array_equal(info['meas_id']['machid'], meas_id['machid'])
# Test that writing twice produces the same file
m1 = hashlib.md5()
with open(temp_file, 'rb') as fid:
m1.update(fid.read())
m1 = m1.hexdigest()
temp_file_2 = tmpdir.join('info2.fif')
assert temp_file_2 != temp_file
write_info(temp_file_2, info)
m2 = hashlib.md5()
with open(str(temp_file_2), 'rb') as fid:
m2.update(fid.read())
m2 = m2.hexdigest()
assert m1 == m2
# check for bug #7067
info = read_info(raw_fname)
info['meas_date'] = None
tmp_fname_3 = tmpdir.join('info3.fif')
write_info(tmp_fname_3, info)
info2 = read_info(tmp_fname_3)
assert info2['meas_date'] is None
|
39,411 | def get_cmap_safe(cmap):
"""Fetch a colormap by name from matplotlib, colorcet, or cmocean."""
try:
from matplotlib.cm import get_cmap
except ImportError:
raise ImportError('The use of custom colormaps requires the installation of matplotlib')
if isinstance(cmap, str):
# check if this colormap has been mapped between ipygany
if cmap in IPYGANY_MAP:
cmap = IPYGANY_MAP[cmap]
# Try colorcet first
try:
import colorcet
cmap = colorcet.cm[cmap]
except (ImportError, KeyError):
pass
else:
return cmap
# Try cmocean second
try:
import cmocean
cmap = getattr(cmocean.cm, cmap)
except (ImportError, AttributeError):
pass
else:
return cmap
# Else use Matplotlib
cmap = get_cmap(cmap)
elif isinstance(cmap, list):
for item in cmap:
if not isinstance(item, str):
raise TypeError('When inputting a list as a cmap, each item should be a string.')
try:
from matplotlib.colors import ListedColormap
except ImportError:
raise ImportError('Listed colormaps require the installation of matplotlib')
cmap = ListedColormap(cmap)
return cmap
| def get_cmap_safe(cmap):
"""Fetch a colormap by name from matplotlib, colorcet, or cmocean."""
try:
from matplotlib.cm import get_cmap
except ImportError:
raise ImportError('The use of custom colormaps requires the installation of matplotlib')
if isinstance(cmap, str):
# check if this colormap has been mapped between ipygany
if cmap in IPYGANY_MAP:
cmap = IPYGANY_MAP[cmap]
# Try colorcet first
try:
import colorcet
cmap = colorcet.cm[cmap]
except (ImportError, KeyError):
pass
else:
return cmap
# Try cmocean second
try:
import cmocean
cmap = getattr(cmocean.cm, cmap)
except (ImportError, AttributeError):
pass
else:
return cmap
# Else use Matplotlib
cmap = get_cmap(cmap)
elif isinstance(cmap, list):
for item in cmap:
if not isinstance(item, str):
raise TypeError('When inputting a list as a cmap, each item should be a string.')
try:
from matplotlib.colors import ListedColormap
except ImportError: # pragma: no cover
raise ImportError('Listed colormaps require the installation of matplotlib')
cmap = ListedColormap(cmap)
return cmap
|
32,240 | def reboot(topology: Topology, hostid: str) -> RestartSystemCommandResult:
"""
Reboot the given host.
:param topology: `Topology` instance !no-auto-argument
:param hostid: ID of host (serial or hostname) to reboot
"""
result: RestartSystemCommandResult = UniversalCommand.reboot(topology, hostid=hostid)
return result
| def reboot(topology: Topology, hostid: str) -> RestartSystemCommandResult:
"""
Reboot the given host.
:param topology: `Topology` instance !no-auto-argument
:param hostid: ID of host (serial or hostname) to reboot
"""
return UniversalCommand.reboot(topology, hostid=hostid)
|
7,529 | def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', std_ddof=0,
axis=None, grow=False):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array_like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g., `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/pydata/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g., `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed
to the ``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those that
fall outwith the clipping limits (only applied along `axis`, if
specified). A value of 1 will mask the nearest pixels in a cross
pattern around each deviant pixel, while 1.5 will also reject the
nearest diagonal neighbours and so on.
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return np.ma.masked, np.ma.masked, np.ma.masked
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc, grow=grow)
data_clipped = sigclip(data, axis=axis, masked=False, return_bounds=False,
copy=False)
if HAS_BOTTLENECK:
mean = _nanmean(data_clipped, axis=axis)
median = _nanmedian(data_clipped, axis=axis)
std = _nanstd(data_clipped, ddof=std_ddof, axis=axis)
else: # pragma: no cover
mean = np.nanmean(data_clipped, axis=axis)
median = np.nanmedian(data_clipped, axis=axis)
std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std
| def sigma_clipped_stats(data, mask=None, mask_value=None, sigma=3.0,
sigma_lower=None, sigma_upper=None, maxiters=5,
cenfunc='median', stdfunc='std', std_ddof=0,
axis=None, grow=False):
"""
Calculate sigma-clipped statistics on the provided data.
Parameters
----------
data : array_like or `~numpy.ma.MaskedArray`
Data array or object that can be converted to an array.
mask : `numpy.ndarray` (bool), optional
A boolean mask with the same shape as ``data``, where a `True`
value indicates the corresponding element of ``data`` is masked.
Masked pixels are excluded when computing the statistics.
mask_value : float, optional
A data value (e.g., ``0.0``) that is ignored when computing the
statistics. ``mask_value`` will be masked in addition to any
input ``mask``.
sigma : float, optional
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. The default is
3.
sigma_lower : float or `None`, optional
The number of standard deviations to use as the lower bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
sigma_upper : float or `None`, optional
The number of standard deviations to use as the upper bound for
the clipping limit. If `None` then the value of ``sigma`` is
used. The default is `None`.
maxiters : int or `None`, optional
The maximum number of sigma-clipping iterations to perform or
`None` to clip until convergence is achieved (i.e., iterate
until the last iteration clips nothing). If convergence is
achieved prior to ``maxiters`` iterations, the clipping
iterations will stop. The default is 5.
cenfunc : {'median', 'mean'} or callable, optional
The statistic or callable function/object used to compute the
center value for the clipping. If set to ``'median'`` or
``'mean'`` then having the optional `bottleneck`_ package
installed will result in the best performance. If using a
callable function/object and the ``axis`` keyword is used, then
it must be callable that can ignore NaNs (e.g., `numpy.nanmean`)
and has an ``axis`` keyword to return an array with axis
dimension(s) removed. The default is ``'median'``.
.. _bottleneck: https://github.com/pydata/bottleneck
stdfunc : {'std'} or callable, optional
The statistic or callable function/object used to compute the
standard deviation about the center value. If set to ``'std'``
then having the optional `bottleneck`_ package installed will
result in the best performance. If using a callable
function/object and the ``axis`` keyword is used, then it must
be callable that can ignore NaNs (e.g., `numpy.nanstd`) and has
an ``axis`` keyword to return an array with axis dimension(s)
removed. The default is ``'std'``.
std_ddof : int, optional
The delta degrees of freedom for the standard deviation
calculation. The divisor used in the calculation is ``N -
std_ddof``, where ``N`` represents the number of elements. The
default is 0.
axis : `None` or int or tuple of int, optional
The axis or axes along which to sigma clip the data. If `None`,
then the flattened data will be used. ``axis`` is passed
to the ``cenfunc`` and ``stdfunc``. The default is `None`.
grow : float or `False`, optional
Radius within which to mask the neighbouring pixels of those that
fall outwith the clipping limits (only applied along ``axis``, if
specified). A value of 1 will mask the nearest pixels in a cross
pattern around each deviant pixel, while 1.5 will also reject the
nearest diagonal neighbours and so on.
Returns
-------
mean, median, stddev : float
The mean, median, and standard deviation of the sigma-clipped
data.
See Also
--------
SigmaClip, sigma_clip
"""
if mask is not None:
data = np.ma.MaskedArray(data, mask)
if mask_value is not None:
data = np.ma.masked_values(data, mask_value)
if isinstance(data, np.ma.MaskedArray) and data.mask.all():
return np.ma.masked, np.ma.masked, np.ma.masked
sigclip = SigmaClip(sigma=sigma, sigma_lower=sigma_lower,
sigma_upper=sigma_upper, maxiters=maxiters,
cenfunc=cenfunc, stdfunc=stdfunc, grow=grow)
data_clipped = sigclip(data, axis=axis, masked=False, return_bounds=False,
copy=False)
if HAS_BOTTLENECK:
mean = _nanmean(data_clipped, axis=axis)
median = _nanmedian(data_clipped, axis=axis)
std = _nanstd(data_clipped, ddof=std_ddof, axis=axis)
else: # pragma: no cover
mean = np.nanmean(data_clipped, axis=axis)
median = np.nanmedian(data_clipped, axis=axis)
std = np.nanstd(data_clipped, ddof=std_ddof, axis=axis)
return mean, median, std
|
50,677 | def _load_audio_single(file_path, return_pts_based_timestamps=False):
try:
container = av.open(str(file_path))
stream = next(iter(container.streams.audio))
logger.debug("Loaded audiostream: %s" % stream)
except (av.AVError, StopIteration):
return None
ts_path = file_path.with_name(file_path.stem + "_timestamps.npy")
try:
timestamps = np.load(ts_path)
except IOError:
return None
start = timestamps[0]
packet_pts = np.array(
[p.pts for p in container.demux(stream) if p is not None and p.pts is not None],
)
if return_pts_based_timestamps:
timestamps = start + packet_pts * stream.time_base
# pts seeking requires Python ints; convert after `packet_pts * stream.time_base`
# to leverage numpy element-wise function application
packet_pts = packet_pts.tolist()
try:
container.seek(0)
except av.AVError as err:
logger.debug(f"{err}")
return None
return LoadedAudio(container, stream, timestamps, packet_pts)
| def _load_audio_single(file_path, return_pts_based_timestamps=False):
try:
container = av.open(str(file_path))
stream = next(iter(container.streams.audio))
logger.debug("Loaded audiostream: %s" % stream)
except (av.AVError, StopIteration):
return None
ts_path = file_path.with_name(file_path.stem + "_timestamps.npy")
try:
timestamps = np.load(ts_path)
except IOError:
return None
start = timestamps[0]
packet_pts = np.array(
[p.pts for p in container.demux(stream) if p is not None and p.pts is not None],
)
if return_pts_based_timestamps:
timestamps = start + packet_pts * stream.time_base
# pts seeking requires primitive Python integers and does not accept numpy int types;
# `.tolist()` converts numpy integers to primitive Python integers; do conversion after
# `packet_pts * stream.time_base` to leverage numpy element-wise function application
packet_pts = packet_pts.tolist()
try:
container.seek(0)
except av.AVError as err:
logger.debug(f"{err}")
return None
return LoadedAudio(container, stream, timestamps, packet_pts)
|
32,099 | def status_get_command(args, is_polling=False):
request_ids_for_polling = {}
request_ids = argToList(args.get('request_ids'))
timeout = args.get('timeout')
timeout_duration = args.get('timeout_duration')
timeout = timeout and int(timeout)
responses = []
files_output = []
file_standard_context = []
sha256 = "" # uses for the polling, if not empty indicates that the status is ready
for request_id in request_ids:
response = status_get_cmd(request_id, timeout, timeout_duration)
responses.append(response)
resources: dict = response.get('resources', {})
for host_id, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not get command\n{errors}'
return_error(error_message)
files_output.append({
'ID': resource.get('id'),
'TaskID': resource.get('cloud_request_id'),
'CreatedAt': resource.get('created_at'),
'DeletedAt': resource.get('deleted_at'),
'UpdatedAt': resource.get('updated_at'),
'Name': resource.get('name'),
'Size': resource.get('size'),
'SHA256': resource.get('sha256')
})
file_standard_context.append({
'Name': resource.get('name'),
'SHA256': resource.get('sha256'),
'Size': resource.get('size'),
})
sha256 = resource.get('sha256', '')
request_ids_for_polling[host_id] = {'SHA256': sha256, 'RequestID': request_id}
if is_polling:
args['SHA256'] = sha256
return request_ids_for_polling, args
human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output)
entry_context = {
'CrowdStrike.File(val.ID === obj.ID || val.TaskID === obj.TaskID)': files_output,
outputPaths['file']: file_standard_context
}
if len(responses) == 1:
return create_entry_object(contents=responses[0], ec=entry_context, hr=human_readable)
else:
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
| def status_get_command(args, is_polling=False):
request_ids_for_polling = {}
request_ids = argToList(args.get('request_ids'))
timeout = args.get('timeout')
timeout_duration = args.get('timeout_duration')
timeout = timeout and int(timeout)
responses = []
files_output = []
file_standard_context = []
sha256 = "" # Used for the polling. When this isn't empty it indicates that the status is "ready".
for request_id in request_ids:
response = status_get_cmd(request_id, timeout, timeout_duration)
responses.append(response)
resources: dict = response.get('resources', {})
for host_id, resource in resources.items():
errors = resource.get('errors', [])
if errors:
error_message = errors[0].get('message', '')
if not error_message:
error_message = f'Could not get command\n{errors}'
return_error(error_message)
files_output.append({
'ID': resource.get('id'),
'TaskID': resource.get('cloud_request_id'),
'CreatedAt': resource.get('created_at'),
'DeletedAt': resource.get('deleted_at'),
'UpdatedAt': resource.get('updated_at'),
'Name': resource.get('name'),
'Size': resource.get('size'),
'SHA256': resource.get('sha256')
})
file_standard_context.append({
'Name': resource.get('name'),
'SHA256': resource.get('sha256'),
'Size': resource.get('size'),
})
sha256 = resource.get('sha256', '')
request_ids_for_polling[host_id] = {'SHA256': sha256, 'RequestID': request_id}
if is_polling:
args['SHA256'] = sha256
return request_ids_for_polling, args
human_readable = tableToMarkdown('CrowdStrike Falcon files', files_output)
entry_context = {
'CrowdStrike.File(val.ID === obj.ID || val.TaskID === obj.TaskID)': files_output,
outputPaths['file']: file_standard_context
}
if len(responses) == 1:
return create_entry_object(contents=responses[0], ec=entry_context, hr=human_readable)
else:
return create_entry_object(contents=response, ec=entry_context, hr=human_readable)
|
52,227 | def _prepare_certificate_signing_request(domain, key_file, output_folder):
from OpenSSL import crypto # lazy loading this module for performance reasons
# Init a request
csr = crypto.X509Req()
# Set the domain
csr.get_subject().CN = domain
# Include xmpp-upload subdomain in subject alternate names
subdomain="xmpp-upload." + domain
try:
_dns_ip_match_public_ip(get_public_ip(), subdomain)
csr.add_extensions([crypto.X509Extension("subjectAltName", False, "DNS:" + subdomain)])
except YunohostError:
logger.warning(m18n.n('certmanager_warning_subdomain_dns_record', subdomain=subdomain, domain=domain))
# Set the key
with open(key_file, 'rt') as f:
key = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
csr.set_pubkey(key)
# Sign the request
csr.sign(key, "sha256")
# Save the request in tmp folder
csr_file = output_folder + domain + ".csr"
logger.debug("Saving to %s.", csr_file)
with open(csr_file, "w") as f:
f.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr))
| def _prepare_certificate_signing_request(domain, key_file, output_folder):
from OpenSSL import crypto # lazy loading this module for performance reasons
# Init a request
csr = crypto.X509Req()
# Set the domain
csr.get_subject().CN = domain
from yunohost.domain import domain_list
# For "parent" domains, include xmpp-upload subdomain in subject alternate names
if domain in domain_list(exclude_subdomains=True)["domains"]:
subdomain="xmpp-upload." + domain
try:
_dns_ip_match_public_ip(get_public_ip(), subdomain)
csr.add_extensions([crypto.X509Extension("subjectAltName", False, "DNS:" + subdomain)])
except YunohostError:
logger.warning(m18n.n('certmanager_warning_subdomain_dns_record', subdomain=subdomain, domain=domain))
# Set the key
with open(key_file, 'rt') as f:
key = crypto.load_privatekey(crypto.FILETYPE_PEM, f.read())
csr.set_pubkey(key)
# Sign the request
csr.sign(key, "sha256")
# Save the request in tmp folder
csr_file = output_folder + domain + ".csr"
logger.debug("Saving to %s.", csr_file)
with open(csr_file, "w") as f:
f.write(crypto.dump_certificate_request(crypto.FILETYPE_PEM, csr))
|
42,984 | def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
| def apply_twomode_gate(mat, state, pure, modes, n, trunc, gate="BSgate"):
"""Applies a two-mode gate to a state.
Applies the two-mode gate to the state using custom tensor contractions and
the numba compiler for faster application.
Args:
mat (ndarray): The BS operator to be applied to the state
state (ndarray): The state that the BS is applied to
pure (bool): If the state is pure or mixed
modes (list[int]): A list of modes to which the BS is applied
n (int): The total number of modes
trunc (int): The Hilbert space truncation/cutoff
gate (str): the gate which should be called (BSgate, S2gate)
Returns:
ndarray: State where the two-mode operation has been applied
"""
if pure:
t1 = modes[0]
t2 = modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(n)
switch_list_2 = np.arange(n)
switch_list_1[[0, t1]] = switch_list_1[[t1, 0]]
switch_list_2[[1, t2]] = switch_list_2[[t2, 1]]
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(switch_list_1)
else:
t1 = 2 * modes[0]
t2 = 2 * modes[1]
# put the ket-values in front to be operated on in the apply function
switch_list_1 = np.arange(2 * n)
switch_list_2 = np.arange(2 * n)
switch_list_1[[0, 1, t1, t1+1]] = switch_list_1[[t1, t1+1, 0, 1]]
switch_list_2[[0, 1, t2, t2+1]] = switch_list_2[[t2, t2+1, 0, 1]]
# put bra-values to the left, and ket-values to the right (ignoring values not operated on)
transpose_list = np.arange(2 * n)
transpose_list[[t1+1, t2]] = transpose_list[[t2, t1+1]]
state = state.transpose(transpose_list)
state = state.transpose(switch_list_1)
if gate == "BSgate":
state = _apply_BS(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_BS(mat.conj(), state, trunc)
elif gate == "S2gate":
state = _apply_S2(mat, state, trunc)
state = state.transpose(switch_list_1)
state = state.transpose(switch_list_2)
state = _apply_S2(mat.conj(), state, trunc)
else:
raise NotImplementedError
state = state.transpose(switch_list_2)
ret = state.transpose(transpose_list)
return ret
|
59,186 | def dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wraps(func)(wrapper) | def dec(func):
def wrapper(*args, **kwargs):
return func(*args, **kwargs)
return wraps(func)(wrapper)
|
49,609 | def add_path_column(df, column_name, path, dtype):
if column_name in df.columns:
raise ValueError(
f"Files already contain the column name: '{column_name}', so the path "
"column cannot use this name. Please set `include_path_column` to a "
"`include_path_column` to a unique name."
)
return df.assign(**{column_name: pd.Series([path] * len(df), dtype=dtype)})
| def add_path_column(df, column_name, path, dtype):
if column_name in df.columns:
raise ValueError(
f"Files already contain the column name: '{column_name}', so the path "
"column cannot use this name. Please set `include_path_column` to a "
"unique name."
)
return df.assign(**{column_name: pd.Series([path] * len(df), dtype=dtype)})
|
23,112 | def set_partitions_pre(s, divisions, ascending=True):
try:
if ascending:
partitions = divisions.searchsorted(s, side="right") - 1
else:
partitions = -divisions.searchsorted(s, side="right") % (len(divisions) - 1)
except TypeError:
# When `searchsorted` fails with `TypeError`, it may be
# caused by nulls in `s`. Try again with the null-values
# explicitly mapped to the first partition.
partitions = np.empty(len(s), dtype="int32")
partitions[s.isna()] = 0
not_null = s.notna()
if ascending:
partitions[not_null] = divisions.searchsorted(s[not_null], side="right") - 1
else:
partitions[not_null] = -divisions.searchsorted(
s[not_null], side="right"
) % (len(divisions) - 1)
partitions[(s >= divisions.iloc[-1]).values] = (
len(divisions) - 2 if ascending else 0
)
return partitions
| def set_partitions_pre(s, divisions, ascending=True):
try:
if ascending:
partitions = divisions.searchsorted(s, side="right") - 1
else:
partitions = len(divisions) - divisions.searchsorted(s, side="right") - 1
except TypeError:
# When `searchsorted` fails with `TypeError`, it may be
# caused by nulls in `s`. Try again with the null-values
# explicitly mapped to the first partition.
partitions = np.empty(len(s), dtype="int32")
partitions[s.isna()] = 0
not_null = s.notna()
if ascending:
partitions[not_null] = divisions.searchsorted(s[not_null], side="right") - 1
else:
partitions[not_null] = -divisions.searchsorted(
s[not_null], side="right"
) % (len(divisions) - 1)
partitions[(s >= divisions.iloc[-1]).values] = (
len(divisions) - 2 if ascending else 0
)
return partitions
|
4,177 | def _check_full_data(inst, full_data):
"""Check whether data is represented as kernel or not."""
if full_data:
assert isinstance(inst._kernel, type(None))
assert isinstance(inst._sens_data, type(None))
assert isinstance(inst._data, np.ndarray)
else:
assert isinstance(inst._kernel, np.ndarray)
assert isinstance(inst._sens_data, np.ndarray)
assert isinstance(inst._data, type(None))
assert not inst._kernel_removed
| def _check_full_data(inst, full_data):
"""Check whether data is represented as kernel or not."""
if full_data:
assert inst._kernel is None
assert isinstance(inst._sens_data, type(None))
assert isinstance(inst._data, np.ndarray)
else:
assert isinstance(inst._kernel, np.ndarray)
assert isinstance(inst._sens_data, np.ndarray)
assert isinstance(inst._data, type(None))
assert not inst._kernel_removed
|
7,894 | def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
| def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0] + ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
|
43,567 | def DisplacementEmbedding(features, wires, method='amplitude', c=0.1):
r"""Encodes :math:`N` features into the displacement amplitudes :math:`r` or phases :math:`\phi` of :math:`M` modes,
where :math:`N\leq M`.
The mathematical definition of the displacement gate is given by the operator
.. math::
D(\alpha) = \exp(r (e^{i\phi}\ad -e^{-i\phi}\a)),
where :math:`\a` and :math:`\ad` are the bosonic creation and annihilation operators.
``features`` has to be an array of at most ``len(wires)`` floats. If there are fewer entries in
``features`` than wires, the circuit does not apply the remaining displacement gates.
Args:
features (array): Array of features of size (N,)
wires (Sequence[int]): sequence of mode indices that the template acts on
Keyword Args:
method (str): ``'phase'`` encodes the input into the phase of single-mode displacement, while
``'amplitude'`` uses the amplitude
c (float): value of the phase of all displacement gates if ``execution='amplitude'``, or
the amplitude of all displacement gates if ``execution='phase'``
Raises:
ValueError: if `features` or `wires` is invalid or if `method` is unknown
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if len(wires) < len(features):
raise ValueError("Number of features to embed cannot be larger than number of wires, which is {}; "
"got {}.".format(len(wires), len(features)))
for idx, f in enumerate(features):
if method == 'amplitude':
Displacement(f, c, wires=wires[idx])
elif method == 'phase':
Displacement(c, f, wires=wires[idx])
else:
raise ValueError("Execution method '{}' not known. Has to be 'phase' or 'amplitude'.".format(method))
| def DisplacementEmbedding(features, wires, method='amplitude', c=0.1):
r"""Encodes :math:`N` features into the displacement amplitudes :math:`r` or phases :math:`\phi` of :math:`M` modes,
where :math:`N\leq M`.
The mathematical definition of the displacement gate is given by the operator
.. math::
D(\alpha) = \exp(r (e^{i\phi}\ad -e^{-i\phi}\a)),
where :math:`\a` and :math:`\ad` are the bosonic creation and annihilation operators.
``features`` has to be an array of at most ``len(wires)`` floats. If there are fewer entries in
``features`` than wires, the circuit does not apply the remaining displacement gates.
Args:
features (array): Array of features of size (N,)
wires (Sequence[int]): sequence of mode indices that the template acts on
Keyword Args:
method (str): ``'phase'`` encodes the input into the phase of single-mode displacement, while
``'amplitude'`` uses the amplitude
c (float): value of the phase of all displacement gates if ``execution='amplitude'``, or
the amplitude of all displacement gates if ``execution='phase'``
Raises:
ValueError: if ``features`` or ``wires`` is invalid or if ``method`` is unknown
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if len(wires) < len(features):
raise ValueError("Number of features to embed cannot be larger than number of wires, which is {}; "
"got {}.".format(len(wires), len(features)))
for idx, f in enumerate(features):
if method == 'amplitude':
Displacement(f, c, wires=wires[idx])
elif method == 'phase':
Displacement(c, f, wires=wires[idx])
else:
raise ValueError("Execution method '{}' not known. Has to be 'phase' or 'amplitude'.".format(method))
|
55,471 | def run_deployment(
deployment_name: str,
max_polls: int = 60,
poll_interval: float = 5,
parameters: dict = None,
):
"""
Runs a deployment immediately.
This function will block until the deployment run enters a terminal state or until
the polling duration has been exceeded.
"""
with _minimal_client() as client:
body = {"parameters": parameters}
flow_run_res = client.post(
f"/deployments/name/{deployment_name}/schedule_flow_run",
json=body,
)
flow_run_id = flow_run_res.json()
for poll in range(max_polls):
time.sleep(poll_interval)
try:
flow_run = client.get(f"/flow_runs/{flow_run_id}")
flow_state = flow_run.json()["state"]["type"]
except KeyError:
raise MissingFlowRunError("Error polling flow run")
if flow_state in TERMINAL_STATE_STRINGS:
return flow_state
raise DeploymentTimeout(
f"Deployment run did not terminate and is in the {flow_state} state"
)
| def run_deployment(
deployment_name: str,
max_polls: int = 60,
poll_interval: float = 5,
parameters: dict = None,
):
"""
Runs a deployment immediately.
This function will block until the deployment run enters a terminal state or until
the polling duration has been exceeded. If max_polls is not a positive integer,
flow run is triggered without waiting for completion (fire-and-forget).
"""
with _minimal_client() as client:
body = {"parameters": parameters}
flow_run_res = client.post(
f"/deployments/name/{deployment_name}/schedule_flow_run",
json=body,
)
flow_run_id = flow_run_res.json()
if max_polls <= 0:
pass
else:
for poll in range(max_polls):
time.sleep(poll_interval)
try:
flow_run = client.get(f"/flow_runs/{flow_run_id}")
flow_state = flow_run.json()["state"]["type"]
except KeyError:
raise MissingFlowRunError("Error polling flow run")
if flow_state in TERMINAL_STATE_STRINGS:
return flow_state
raise DeploymentTimeout(
f"Deployment run did not terminate and is in the {flow_state} state"
)
|
54,262 | def get_editor() -> Optional[List[str]]:
editor_line = os.environ.get('VISUAL') or os.environ.get('EDITOR')
if editor_line:
return editor_line.split(' ')
for editor in ('vim', 'nano', 'mcedit', 'edit', 'emacs', 'e3', 'atom', 'adie', 'dedit', 'gedit', 'jedit', 'kate', 'kwrite', 'leafpad', 'mousepad', 'notepadqq', 'pluma', 'code', 'xed', 'kak', 'nvim', 'nvim-qt', 'geany'):
path = shutil.which(editor)
if path:
return [path, ]
return None
| def get_editor() -> Optional[List[str]]:
editor_line = os.environ.get('VISUAL') or os.environ.get('EDITOR')
if editor_line:
return editor_line.split(' ')
for editor in (
'vim', 'nano', 'mcedit', 'edit', 'emacs',
'e3', 'atom', 'adie', 'dedit', 'gedit', 'jedit', 'kate', 'kwrite', 'leafpad', 'mousepad',
'notepadqq', 'pluma', 'code', 'xed', 'kak', 'nvim', 'nvim-qt', 'geany',
):
path = shutil.which(editor)
if path:
return [path, ]
return None
|
30,808 | def update_user_command(client, args):
"""
Update user using PUT to Envoy API , if Connection to the service is successful.
Args: demisto command line argument
client: Envoy
Returns:
success : success=True, id, email, login as username, details, active status
fail : success=False, id, login as username, errorCod, errorMessage, details
"""
old_scim = verify_and_load_scim_data(args.get('oldScim'))
new_scim = verify_and_load_scim_data(args.get('newScim'))
format_pre_text = 'Update'
return process_update_command(client, args, old_scim, new_scim, format_pre_text)
| def update_user_command(client, args):
"""
Update user using PUT to Envoy API , if Connection to the service is successful.
Args: demisto command line argument
client: Envoy
Returns:
success : success=True, id, email, login as username, details, active status
fail : success=False, id, login as username, errorCod, errorMessage, details
"""
old_scim = verify_and_load_scim_data(args.get('oldScim'))
new_scim = verify_and_load_scim_data(args.get('newScim'))
command_name = 'Update'
return process_update_command(client, args, old_scim, new_scim, format_pre_text)
|
31,435 | def main():
try:
args = demisto.args()
pwd_generation_script = args.get("pwdGenerationScript")
username = args.get("sAMAccountName")
user_email = args.get("email")
display_name = args.get("displayname")
to_email = args.get("to_email")
# Generate a random password
outputs = demisto.executeCommand(pwd_generation_script, {})
password_dict = demisto.get(outputs[0], 'Contents')
password = password_dict.get("NEW_PASSWORD")
# set a new password
ad_create_user_arguments = {
'username': username,
'password': 'Test123!@#', # password,
'attribute-name': 'pwdLastSet',
'attribute-value': -1
}
flow_worked = True
password_outputs = demisto.executeCommand("ad-set-new-password", ad_create_user_arguments)
if is_error(password_outputs):
flow_worked = False
return_results(password_outputs)
update_outputs = demisto.executeCommand("ad-update-user", ad_create_user_arguments)
if is_error(update_outputs):
flow_worked = False
return_results(update_outputs)
if flow_worked:
send_email(display_name, username, user_email, password, to_email)
return_results("User was enabled and a password was set.")
else:
return_results("Some commands failed, please check the errors.")
except Exception as e:
return_error(str(e))
| def main():
try:
args = demisto.args()
pwd_generation_script = args.get("pwdGenerationScript")
username = args.get("sAMAccountName")
user_email = args.get("email")
display_name = args.get("displayname")
to_email = args.get("to_email")
# Generate a random password
outputs = demisto.executeCommand(pwd_generation_script, {})
password_dict = demisto.get(outputs[0], 'Contents')
password = password_dict.get("NEW_PASSWORD")
# set a new password
ad_create_user_arguments = {
'username': username,
'password': password,
'attribute-name': 'pwdLastSet',
'attribute-value': -1
}
flow_worked = True
password_outputs = demisto.executeCommand("ad-set-new-password", ad_create_user_arguments)
if is_error(password_outputs):
flow_worked = False
return_results(password_outputs)
update_outputs = demisto.executeCommand("ad-update-user", ad_create_user_arguments)
if is_error(update_outputs):
flow_worked = False
return_results(update_outputs)
if flow_worked:
send_email(display_name, username, user_email, password, to_email)
return_results("User was enabled and a password was set.")
else:
return_results("Some commands failed, please check the errors.")
except Exception as e:
return_error(str(e))
|
17,459 | def test_dataset_groupby():
data = Dataset(
{"z": (["x", "y"], np.random.randn(3, 5))},
{"x": ("x", list("abc")), "c": ("x", [0, 1, 0]), "y": range(5)},
)
groupby = data.groupby("x")
assert len(groupby) == 3
expected_groups = {"a": 0, "b": 1, "c": 2}
assert groupby.groups == expected_groups
expected_items = [
("a", data.isel(x=0)),
("b", data.isel(x=1)),
("c", data.isel(x=2)),
]
for actual, expected in zip(groupby, expected_items):
assert actual[0] == expected[0]
assert_equal(actual[1], expected[1])
def identity(x):
return x
for k in ["x", "c", "y"]:
actual = data.groupby(k, squeeze=False).map(identity)
assert_equal(data, actual)
| def test_groupby_dataset():
data = Dataset(
{"z": (["x", "y"], np.random.randn(3, 5))},
{"x": ("x", list("abc")), "c": ("x", [0, 1, 0]), "y": range(5)},
)
groupby = data.groupby("x")
assert len(groupby) == 3
expected_groups = {"a": 0, "b": 1, "c": 2}
assert groupby.groups == expected_groups
expected_items = [
("a", data.isel(x=0)),
("b", data.isel(x=1)),
("c", data.isel(x=2)),
]
for actual, expected in zip(groupby, expected_items):
assert actual[0] == expected[0]
assert_equal(actual[1], expected[1])
def identity(x):
return x
for k in ["x", "c", "y"]:
actual = data.groupby(k, squeeze=False).map(identity)
assert_equal(data, actual)
|
52,276 | def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This program segments automatically the spinal cord on T1- and T2-weighted images, for any field of view. "
"You must provide the type of contrast, the image as well as the output folder path. The segmentation "
"follows the spinal cord centerline, which is provided by an automatic tool: Optic. The initialization of "
"the segmentation is made on the median slice of the centerline, and can be ajusted using the -init "
"parameter. The initial radius of the tubular mesh that will be propagated should be adapted to size of "
"the spinal cord on the initial propagation slice. \n"
"\n"
"Primary output is the binary mask of the spinal cord segmentation. This method must provide VTK "
"triangular mesh of the segmentation (option -mesh). Spinal cord centerline is available as a binary image "
"(-centerline-binary) or a text file with coordinates in world referential (-centerline-coord).\n"
"\n"
"Cross-sectional areas along the spinal cord can be available (-cross). Several tips on segmentation "
"correction can be found on the 'Correcting sct_propseg' page in the Tutorials section of the "
"documentation.\n"
"\n"
"If the segmentation fails at some location (e.g. due to poor contrast between spinal cord and CSF), edit "
"your anatomical image (e.g. with fslview) and manually enhance the contrast by adding bright values "
"around the spinal cord for T2-weighted images (dark values for T1-weighted). Then, launch the "
"segmentation again.\n"
"\n"
"References:\n"
" - De Leener B, Kadoury S, Cohen-Adad J. Robust, accurate and fast automatic segmentation of the spinal "
"cord. Neuroimage 98, 2014. pp 528-536. DOI: 10.1016/j.neuroimage.2014.04.051\n"
" - De Leener B, Cohen-Adad J, Kadoury S. Automatic segmentation of the spinal cord and spinal canal "
"coupled with vertebral labeling. Medical Imaging, IEEE Transactions on (in press). "
"DOI: 10.1109/TMI.2015.2437192"
),
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: ti.nii.gz"
)
mandatory.add_argument(
'-c',
choices=['t1', 't2', 't2s', 'dwi'],
required=True,
help="Type of image contrast. If your contrast is not in the available options (t1, t2, t2s, dwi), use "
"t1 (cord bright / CSF dark) or t2 (cord dark / CSF bright)"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-ofolder',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="Output folder."
)
optional.add_argument(
'-down',
metavar=Metavar.int,
type=int,
help="Down limit of the propagation. Default is 0."
)
optional.add_argument(
'-up',
metavar=Metavar.int,
type=int,
help="Up limit of the propagation. Default is the highest slice of the image."
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Whether to remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
choices=['0', '1'],
default='1',
help="Verbose. 1: display on, 0: display off (default)"
)
optional.add_argument(
'-mesh',
action="store_true",
help="Output: mesh of the spinal cord segmentation"
)
optional.add_argument(
'-centerline-binary',
action="store_true",
help="Output: centerline as a binary image."
)
optional.add_argument(
'-CSF',
action="store_true",
help="Output: CSF segmentation."
)
optional.add_argument(
'-centerline-coord',
action="store_true",
help="Output: centerline in world coordinates."
)
optional.add_argument(
'-cross',
action="store_true",
help="Output: cross-sectional areas."
)
optional.add_argument(
'-init-tube',
action="store_true",
help="Output: initial tubular meshes."
)
optional.add_argument(
'-low-resolution-mesh',
action="store_true",
help="Output: low-resolution mesh."
)
optional.add_argument(
'-init-centerline',
metavar=Metavar.file,
help="R|Filename of centerline to use for the propagation. Use format .txt or .nii; see file structure in "
"documentation.\n"
"Replace filename by 'viewer' to use interactive viewer for providing centerline. Example: "
"-init-centerline viewer"
)
optional.add_argument(
'-init',
metavar=Metavar.float,
type=float,
help="Axial slice where the propagation starts, default is middle axial slice."
)
optional.add_argument(
'-init-mask',
metavar=Metavar.file,
help="R|Mask containing three center of the spinal cord, used to initiate the propagation.\n"
"Replace filename by 'viewer' to use interactive viewer for providing mask. Example: -init-mask viewer"
)
optional.add_argument(
'-mask-correction',
metavar=Metavar.file,
help="mask containing binary pixels at edges of the spinal cord on which the segmentation algorithm will be "
"forced to register the surface. Can be used in case of poor/missing contrast between spinal cord and "
"CSF or in the presence of artefacts/pathologies."
)
optional.add_argument(
'-rescale',
metavar=Metavar.float,
type=float,
default=1.0,
help="Rescale the image (only the header, not the data) in order to enable segmentation on spinal cords with "
"dimensions different than that of humans (e.g., mice, rats, elephants, etc.). For example, if the "
"spinal cord is 2x smaller than that of human, then use -rescale 2"
)
optional.add_argument(
'-radius',
metavar=Metavar.float,
type=float,
help="Approximate radius (in mm) of the spinal cord. Default is 4."
)
optional.add_argument(
'-nbiter',
metavar=Metavar.int,
type=int,
help="Stop condition (affects only the Z propogation): number of iteration for the propagation for both "
"direction. Default is 200."
)
optional.add_argument(
'-max-area',
metavar=Metavar.float,
type=float,
help="[mm^2], stop condition (affects only the Z propogation): maximum cross-sectional area. Default is 120."
)
optional.add_argument(
'-max-deformation',
metavar=Metavar.float,
type=float,
help="[mm], stop condition (affects only the Z propogation): maximum deformation per iteration. Default is "
"2.5"
)
optional.add_argument(
'-min-contrast',
metavar=Metavar.float,
type=float,
help="[intensity value], stop condition (affects only the Z propogation): minimum local SC/CSF contrast, "
"default is 50"
)
optional.add_argument(
'-d',
metavar=Metavar.float,
type=float,
help="trade-off between distance of most promising point (d is high) and feature strength (d is low), "
"default depend on the contrast. Range of values from 0 to 50. 15-25 values show good results. Default "
"is 10."
)
optional.add_argument(
'-distance-search',
metavar=Metavar.float,
type=float,
help="maximum distance of optimal points computation along the surface normals. Range of values from 0 to 30. "
"Default is 15"
)
optional.add_argument(
'-alpha',
metavar=Metavar.float,
type=float,
help="Trade-off between internal (alpha is high) and external (alpha is low) forces. Range of values from 0 "
"to 50. Default is 25."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
optional.add_argument(
'-correct-seg',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Enable (1) or disable (0) the algorithm that checks and correct the output segmentation. More "
"specifically, the algorithm checks if the segmentation is consistent with the centerline provided by "
"isct_propseg."
)
optional.add_argument(
'-igt',
metavar=Metavar.file,
help="File name of ground-truth segmentation."
)
return parser
| def get_parser():
# Initialize the parser
parser = argparse.ArgumentParser(
description=(
"This program segments automatically the spinal cord on T1- and T2-weighted images, for any field of view. "
"You must provide the type of contrast, the image as well as the output folder path. The segmentation "
"follows the spinal cord centerline, which is provided by an automatic tool: Optic. The initialization of "
"the segmentation is made on the median slice of the centerline, and can be ajusted using the -init "
"parameter. The initial radius of the tubular mesh that will be propagated should be adapted to size of "
"the spinal cord on the initial propagation slice. \n"
"\n"
"Primary output is the binary mask of the spinal cord segmentation. This method must provide VTK "
"triangular mesh of the segmentation (option -mesh). Spinal cord centerline is available as a binary image "
"(-centerline-binary) or a text file with coordinates in world referential (-centerline-coord).\n"
"\n"
"Cross-sectional areas along the spinal cord can be available (-cross). Several tips on segmentation "
"correction can be found on the 'Correcting sct_propseg' page in the Tutorials section of the "
"documentation.\n"
"\n"
"If the segmentation fails at some location (e.g. due to poor contrast between spinal cord and CSF), edit "
"your anatomical image (e.g. with fslview) and manually enhance the contrast by adding bright values "
"around the spinal cord for T2-weighted images (dark values for T1-weighted). Then, launch the "
"segmentation again.\n"
"\n"
"References:\n"
" - [De Leener B, Kadoury S, Cohen-Adad J. Robust, accurate and fast automatic segmentation of the spinal "
"cord. Neuroimage 98, 2014. pp 528-536. DOI: 10.1016/j.neuroimage.2014.04.051](https://pubmed.ncbi.nlm.nih.gov/24780696/)\n"
" - De Leener B, Cohen-Adad J, Kadoury S. Automatic segmentation of the spinal cord and spinal canal "
"coupled with vertebral labeling. Medical Imaging, IEEE Transactions on (in press). "
"DOI: 10.1109/TMI.2015.2437192"
),
formatter_class=SmartFormatter,
add_help=None,
prog=os.path.basename(__file__).strip(".py")
)
mandatory = parser.add_argument_group("\nMANDATORY ARGUMENTS")
mandatory.add_argument(
'-i',
metavar=Metavar.file,
required=True,
help="Input image. Example: ti.nii.gz"
)
mandatory.add_argument(
'-c',
choices=['t1', 't2', 't2s', 'dwi'],
required=True,
help="Type of image contrast. If your contrast is not in the available options (t1, t2, t2s, dwi), use "
"t1 (cord bright / CSF dark) or t2 (cord dark / CSF bright)"
)
optional = parser.add_argument_group("\nOPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit."
)
optional.add_argument(
'-ofolder',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="Output folder."
)
optional.add_argument(
'-down',
metavar=Metavar.int,
type=int,
help="Down limit of the propagation. Default is 0."
)
optional.add_argument(
'-up',
metavar=Metavar.int,
type=int,
help="Up limit of the propagation. Default is the highest slice of the image."
)
optional.add_argument(
'-r',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Whether to remove temporary files. 0 = no, 1 = yes"
)
optional.add_argument(
'-v',
choices=['0', '1'],
default='1',
help="Verbose. 1: display on, 0: display off (default)"
)
optional.add_argument(
'-mesh',
action="store_true",
help="Output: mesh of the spinal cord segmentation"
)
optional.add_argument(
'-centerline-binary',
action="store_true",
help="Output: centerline as a binary image."
)
optional.add_argument(
'-CSF',
action="store_true",
help="Output: CSF segmentation."
)
optional.add_argument(
'-centerline-coord',
action="store_true",
help="Output: centerline in world coordinates."
)
optional.add_argument(
'-cross',
action="store_true",
help="Output: cross-sectional areas."
)
optional.add_argument(
'-init-tube',
action="store_true",
help="Output: initial tubular meshes."
)
optional.add_argument(
'-low-resolution-mesh',
action="store_true",
help="Output: low-resolution mesh."
)
optional.add_argument(
'-init-centerline',
metavar=Metavar.file,
help="R|Filename of centerline to use for the propagation. Use format .txt or .nii; see file structure in "
"documentation.\n"
"Replace filename by 'viewer' to use interactive viewer for providing centerline. Example: "
"-init-centerline viewer"
)
optional.add_argument(
'-init',
metavar=Metavar.float,
type=float,
help="Axial slice where the propagation starts, default is middle axial slice."
)
optional.add_argument(
'-init-mask',
metavar=Metavar.file,
help="R|Mask containing three center of the spinal cord, used to initiate the propagation.\n"
"Replace filename by 'viewer' to use interactive viewer for providing mask. Example: -init-mask viewer"
)
optional.add_argument(
'-mask-correction',
metavar=Metavar.file,
help="mask containing binary pixels at edges of the spinal cord on which the segmentation algorithm will be "
"forced to register the surface. Can be used in case of poor/missing contrast between spinal cord and "
"CSF or in the presence of artefacts/pathologies."
)
optional.add_argument(
'-rescale',
metavar=Metavar.float,
type=float,
default=1.0,
help="Rescale the image (only the header, not the data) in order to enable segmentation on spinal cords with "
"dimensions different than that of humans (e.g., mice, rats, elephants, etc.). For example, if the "
"spinal cord is 2x smaller than that of human, then use -rescale 2"
)
optional.add_argument(
'-radius',
metavar=Metavar.float,
type=float,
help="Approximate radius (in mm) of the spinal cord. Default is 4."
)
optional.add_argument(
'-nbiter',
metavar=Metavar.int,
type=int,
help="Stop condition (affects only the Z propogation): number of iteration for the propagation for both "
"direction. Default is 200."
)
optional.add_argument(
'-max-area',
metavar=Metavar.float,
type=float,
help="[mm^2], stop condition (affects only the Z propogation): maximum cross-sectional area. Default is 120."
)
optional.add_argument(
'-max-deformation',
metavar=Metavar.float,
type=float,
help="[mm], stop condition (affects only the Z propogation): maximum deformation per iteration. Default is "
"2.5"
)
optional.add_argument(
'-min-contrast',
metavar=Metavar.float,
type=float,
help="[intensity value], stop condition (affects only the Z propogation): minimum local SC/CSF contrast, "
"default is 50"
)
optional.add_argument(
'-d',
metavar=Metavar.float,
type=float,
help="trade-off between distance of most promising point (d is high) and feature strength (d is low), "
"default depend on the contrast. Range of values from 0 to 50. 15-25 values show good results. Default "
"is 10."
)
optional.add_argument(
'-distance-search',
metavar=Metavar.float,
type=float,
help="maximum distance of optimal points computation along the surface normals. Range of values from 0 to 30. "
"Default is 15"
)
optional.add_argument(
'-alpha',
metavar=Metavar.float,
type=float,
help="Trade-off between internal (alpha is high) and external (alpha is low) forces. Range of values from 0 "
"to 50. Default is 25."
)
optional.add_argument(
'-qc',
metavar=Metavar.folder,
action=ActionCreateFolder,
help="The path where the quality control generated content will be saved."
)
optional.add_argument(
'-qc-dataset',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the dataset the process was run on."
)
optional.add_argument(
'-qc-subject',
metavar=Metavar.str,
help="If provided, this string will be mentioned in the QC report as the subject the process was run on."
)
optional.add_argument(
'-correct-seg',
metavar=Metavar.int,
type=int,
choices=[0, 1],
default=1,
help="Enable (1) or disable (0) the algorithm that checks and correct the output segmentation. More "
"specifically, the algorithm checks if the segmentation is consistent with the centerline provided by "
"isct_propseg."
)
optional.add_argument(
'-igt',
metavar=Metavar.file,
help="File name of ground-truth segmentation."
)
return parser
|
20,537 | def func_median(data, mask=None, map_clusters=None):
"""
Compute weighted median.
Code inspired from: https://gist.github.com/tinybike/d9ff1dad515b66cc0d87
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask
:param map_clusters: not used
:return:
"""
# Check if mask has an additional dimension (in case it is a label). If so, select the first label
if mask.ndim == data.ndim + 1:
mask = mask[..., 0]
data, mask = data.reshape(-1), mask.reshape(-1)
s_data, s_mask = map(np.array, zip(*sorted(zip(data, mask))))
midpoint = 0.5 * sum(s_mask)
if any(mask > midpoint):
w_median = (data[mask == np.max(mask)])[0]
else:
cs_mask = np.cumsum(s_mask)
idx = np.where(cs_mask <= midpoint)[0][-1]
if cs_mask[idx] == midpoint:
w_median = np.mean(s_data[idx:idx + 2])
else:
w_median = s_data[idx + 1]
return w_median, None
| def func_median(data, mask=None, map_clusters=None):
"""
Compute weighted median.
Code inspired from: https://gist.github.com/tinybike/d9ff1dad515b66cc0d87
:param data: nd-array: input data
:param mask: (n+1)d-array: input mask
:param map_clusters: not used
:return:
"""
# Check if mask has an additional dimension (in case it is a label). If so, select the first label
if mask.ndim == data.ndim + 1:
mask = mask[..., 0]
data, mask = data.reshape(-1), mask.reshape(-1)
w_median = wquantiles.median(data=data, weights=mask)
return w_median, None
|
6,412 | def invoice_appointment(appointment_doc):
automate_invoicing = frappe.db.get_single_value('Healthcare Settings', 'automate_appointment_invoicing')
appointment_invoiced = frappe.db.get_value('Patient Appointment', appointment_doc.name, 'invoiced')
enable_free_follow_ups = frappe.db.get_single_value('Healthcare Settings', 'enable_free_follow_ups')
if enable_free_follow_ups:
fee_validity = check_fee_validity(appointment_doc)
if fee_validity.status == 'Completed':
fee_validity = None
elif not fee_validity:
if frappe.db.exists('Fee Validity Reference', {'appointment': appointment_doc.name}):
return
if check_is_new_patient(appointment_doc.patient, appointment_doc.name):
return
else:
fee_validity = None
if automate_invoicing and not appointment_invoiced and not fee_validity:
sales_invoice = frappe.new_doc('Sales Invoice')
sales_invoice.customer = frappe.get_value('Patient', appointment_doc.patient, 'customer')
sales_invoice.appointment = appointment_doc.name
sales_invoice.due_date = getdate()
sales_invoice.is_pos = True
sales_invoice.company = appointment_doc.company
sales_invoice.debit_to = get_receivable_account(appointment_doc.company)
item = sales_invoice.append('items', {})
item = get_appointment_item(appointment_doc, item)
payment = sales_invoice.append('payments', {})
payment.mode_of_payment = appointment_doc.mode_of_payment
payment.amount = appointment_doc.paid_amount
sales_invoice.set_missing_values(for_validate=True)
sales_invoice.save(ignore_permissions=True)
sales_invoice.submit()
frappe.msgprint(_('Sales Invoice {0} created as paid'.format(sales_invoice.name)), alert=True)
frappe.db.set_value('Patient Appointment', appointment_doc.name, 'invoiced', 1)
frappe.db.set_value('Patient Appointment', appointment_doc.name, 'ref_sales_invoice', sales_invoice.name)
| def invoice_appointment(appointment_doc):
automate_invoicing = frappe.db.get_single_value('Healthcare Settings', 'automate_appointment_invoicing')
appointment_invoiced = frappe.db.get_value('Patient Appointment', appointment_doc.name, 'invoiced')
enable_free_follow_ups = frappe.db.get_single_value('Healthcare Settings', 'enable_free_follow_ups')
if enable_free_follow_ups:
fee_validity = check_fee_validity(appointment_doc)
if fee_validity.status == 'Completed':
fee_validity = None
elif not fee_validity:
if frappe.db.exists('Fee Validity Reference', {'appointment': appointment_doc.name}):
return
if check_is_new_patient(appointment_doc.patient, appointment_doc.name):
return
else:
fee_validity = None
if automate_invoicing and not appointment_invoiced and not fee_validity:
sales_invoice = frappe.new_doc('Sales Invoice')
sales_invoice.customer = frappe.get_value('Patient', appointment_doc.patient, 'customer')
sales_invoice.appointment = appointment_doc.name
sales_invoice.due_date = getdate()
sales_invoice.is_pos = 1
sales_invoice.company = appointment_doc.company
sales_invoice.debit_to = get_receivable_account(appointment_doc.company)
item = sales_invoice.append('items', {})
item = get_appointment_item(appointment_doc, item)
payment = sales_invoice.append('payments', {})
payment.mode_of_payment = appointment_doc.mode_of_payment
payment.amount = appointment_doc.paid_amount
sales_invoice.set_missing_values(for_validate=True)
sales_invoice.save(ignore_permissions=True)
sales_invoice.submit()
frappe.msgprint(_('Sales Invoice {0} created as paid'.format(sales_invoice.name)), alert=True)
frappe.db.set_value('Patient Appointment', appointment_doc.name, 'invoiced', 1)
frappe.db.set_value('Patient Appointment', appointment_doc.name, 'ref_sales_invoice', sales_invoice.name)
|
40,544 | def get_custom_locations_oid(cmd, cl_oid):
try:
sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx)
sub_filters = []
sub_filters.append("displayName eq '{}'".format("Custom Locations RP"))
result = list(sp_graph_client.list(filter=(' and '.join(sub_filters))))
if len(result) != 0:
if cl_oid is not None and cl_oid != result[0].object_id:
logger.warning("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id))
return result[0].object_id # Using the fetched OID
if cl_oid is None:
logger.warning("Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature.")
telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
return ""
else:
return cl_oid
except Exception as e:
log_string = "Unable to fetch oid of 'custom-locations' app. "
telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
if cl_oid:
log_string += "Proceeding with the OID passed to enable the 'custom-locations' feature."
logger.warning(log_string)
return cl_oid
log_string += "Proceeding without enabling the feature. " + str(e)
logger.warning(log_string)
return ""
| def get_custom_locations_oid(cmd, cl_oid):
try:
sp_graph_client = get_graph_client_service_principals(cmd.cli_ctx)
sub_filters = []
sub_filters.append("displayName eq '{}'".format("Custom Locations RP"))
result = list(sp_graph_client.list(filter=(' and '.join(sub_filters))))
if len(result) != 0:
if cl_oid is not None and cl_oid != result[0].object_id:
logger.warning("The 'Custom-locations' OID passed is different from the actual OID({}) of the Custom Locations RP app. Proceeding with the correct one...".format(result[0].object_id))
return result[0].object_id # Using the fetched OID
if cl_oid is None:
logger.warning("Unable to fetch oid of 'custom-locations' app. Proceeding without enabling the feature.")
telemetry.set_exception(exception='Unable to fetch oid of custom locations app.', fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
return ""
else:
return cl_oid
except Exception as e:
log_string = "Unable to fetch oid of 'custom-locations' app. "
telemetry.set_exception(exception=e, fault_type=consts.Custom_Locations_OID_Fetch_Fault_Type,
summary='Unable to fetch oid for custom locations app.')
if cl_oid:
log_string += "Proceeding with the OID passed to enable the 'custom-locations' feature."
logger.warning(log_string)
return cl_oid
log_string += "Unable to enable the 'custom-locations' feature." + str(e)
logger.warning(log_string)
return ""
|
31,523 | def get_report_command(args: dict):
"""
pingcastle-get-report command: Returns the last report sent by PingCastle
Args:
args (dict): A dict object containing the arguments for this command
"""
delete_report = args.get('delete_report') == 'Yes'
context = demisto.getIntegrationContext()
report = context.get('report')
if report is None:
return 'No report available'
if delete_report:
context.pop('report')
demisto.setIntegrationContext(context)
return CommandResults(
outputs_prefix='PingCastle.Report',
outputs={'report': report},
raw_response=report
)
| def get_report_command(args: dict):
"""
pingcastle-get-report command: Returns the last report sent by PingCastle
Args:
args (dict): A dict object containing the arguments for this command
"""
delete_report = args.get('delete_report') == 'Yes'
context = get_integration_context()
report = context.get('report')
if report is None:
return 'No report available'
if delete_report:
context.pop('report')
demisto.setIntegrationContext(context)
return CommandResults(
outputs_prefix='PingCastle.Report',
outputs={'report': report},
raw_response=report
)
|
1,358 | def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples in each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for j in [0, 1]:
if merge[j] < n_samples:
current_count += 1
else:
current_count += counts[merge[j] - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
| def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples under each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for j in [0, 1]:
if merge[j] < n_samples:
current_count += 1
else:
current_count += counts[merge[j] - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
|
27,876 | def n_step_gru(
n_layers, dropout_ratio, hx, ws, bs, xs, **kwargs):
"""n_step_gru(n_layers, dropout_ratio, hx, ws, bs, xs)
Stacked Uni-directional Gated Recurrent Unit function.
This function calculates stacked Uni-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r_t &= \\sigma(W_0 x_t + W_3 h_{t-1} + b_0 + b_3) \\\\
z_t &= \\sigma(W_1 x_t + W_4 h_{t-1} + b_1 + b_4) \\\\
h'_t &= \\tanh(W_2 x_t + b_2 + r_t \\cdot (W_5 h_{t-1} + b_5)) \\\\
h_t &= (1 - z_t) \\cdot h'_t + z_t \\cdot h_{t-1}
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (:class:`~chainer.Variable`):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs,
use_bi_direction=False, **kwargs)
| def n_step_gru(
n_layers, dropout_ratio, hx, ws, bs, xs, **kwargs):
"""n_step_gru(n_layers, dropout_ratio, hx, ws, bs, xs)
Stacked Uni-directional Gated Recurrent Unit function.
This function calculates stacked Uni-directional GRU with sequences.
This function gets an initial hidden state :math:`h_0`, an input
sequence :math:`x`, weight matrices :math:`W`, and bias vectors :math:`b`.
This function calculates hidden states :math:`h_t` for each time :math:`t`
from input :math:`x_t`.
.. math::
r_t &= \\sigma(W_0 x_t + W_3 h_{t-1} + b_0 + b_3) \\\\
z_t &= \\sigma(W_1 x_t + W_4 h_{t-1} + b_1 + b_4) \\\\
h'_t &= \\tanh(W_2 x_t + b_2 + r_t \\cdot (W_5 h_{t-1} + b_5)) \\\\
h_t &= (1 - z_t) \\cdot h'_t + z_t \\cdot h_{t-1}
As the function accepts a sequence, it calculates :math:`h_t` for all
:math:`t` with one call. Six weight matrices and six bias vectors are
required for each layers. So, when :math:`S` layers exists, you need to
prepare :math:`6S` weight matrices and :math:`6S` bias vectors.
If the number of layers ``n_layers`` is greather than :math:`1`, input
of ``k``-th layer is hidden state ``h_t`` of ``k-1``-th layer.
Note that all input variables except first layer may have different shape
from the first layer.
Args:
n_layers(int): Number of layers.
dropout_ratio(float): Dropout ratio.
hx (~chainer.Variable):
Variable holding stacked hidden states.
Its shape is ``(S, B, N)`` where ``S`` is number of layers and is
equal to ``n_layers``, ``B`` is mini-batch size, and ``N`` is
dimension of hidden units.
ws (list of list of :class:`~chainer.Variable`): Weight matrices.
``ws[i]`` represents weights for i-th layer.
Each ``ws[i]`` is a list containing six matrices.
``ws[i][j]`` is corresponding with ``W_j`` in the equation.
Only ``ws[0][j]`` where ``0 <= j < 3`` is ``(I, N)`` shape as they
are multiplied with input variables. All other matrices has
``(N, N)`` shape.
bs (list of list of :class:`~chainer.Variable`): Bias vectors.
``bs[i]`` represnents biases for i-th layer.
Each ``bs[i]`` is a list containing six vectors.
``bs[i][j]`` is corresponding with ``b_j`` in the equation.
Shape of each matrix is ``(N,)`` where ``N`` is dimension of
hidden units.
xs (list of :class:`~chainer.Variable`):
A list of :class:`~chainer.Variable`
holding input values. Each element ``xs[t]`` holds input value
for time ``t``. Its shape is ``(B_t, I)``, where ``B_t`` is
mini-batch size for time ``t``, and ``I`` is size of input units.
Note that this function supports variable length sequences.
When sequneces has different lengths, sort sequences in descending
order by length, and transpose the sorted sequence.
:func:`~chainer.functions.transpose_sequence` transpose a list
of :func:`~chainer.Variable` holding sequence.
So ``xs`` needs to satisfy
``xs[t].shape[0] >= xs[t + 1].shape[0]``.
Returns:
tuple: This function returns a tuple containing three elements,
``hy`` and ``ys``.
- ``hy`` is an updated hidden states whose shape is same as ``hx``.
- ``ys`` is a list of :class:`~chainer.Variable` . Each element
``ys[t]`` holds hidden states of the last layer corresponding
to an input ``xs[t]``. Its shape is ``(B_t, N)`` where ``B_t`` is
mini-batch size for time ``t``, and ``N`` is size of hidden
units. Note that ``B_t`` is the same value as ``xs[t]``.
"""
return n_step_gru_base(n_layers, dropout_ratio, hx, ws, bs, xs,
use_bi_direction=False, **kwargs)
|
24,867 | def do_somtest_finds_short_name_exceptionething():
"""Do something.
Raises:
~fake_package.exceptions.BadError: When something bad happened.
"""
raise BadError("A bad thing happened.")
| def test_finds_short_name_exception():
"""Do something.
Raises:
~fake_package.exceptions.BadError: When something bad happened.
"""
raise BadError("A bad thing happened.")
|
47,474 | def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment
if args.with_tracking:
accelerator = Accelerator(log_with="all")
else:
accelerator = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[:{args.validation_split_percentage}%]",
)
raw_datasets["train"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[{args.validation_split_percentage}%:]",
)
else:
data_files = {}
dataset_args = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
**dataset_args,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
with accelerator.main_process_first():
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with accelerator.main_process_first():
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
train_dataset = lm_datasets["train"]
eval_dataset = lm_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
# Note -> the training dataloader needs to be prepared before we grab its length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Figure out how many steps we should save the Accelerator states
if hasattr(args.checkpointing_steps, "isdigit"):
checkpointing_steps = args.checkpointing_steps
if args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
checkpointing_steps = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
accelerator.init_trackers("clm_no_trainer", args)
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
# Potentially load in the weights and states from a previous save
state_restored = True
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
resume_step = None
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
if "epoch" in path.name:
num_epochs -= int(path.name.replace("epoch_", ""))
else:
resume_step = int(path.name.replace("step_", ""))
num_epochs -= resume_step // len(train_dataloader)
resume_step = (num_epochs * len(train_dataloader)) - resume_step
state_restored = False
for epoch in range(args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for step, batch in enumerate(train_dataloader):
# We need to skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == 0 and step < resume_step:
continue
outputs = model(**batch)
loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
accelerator.save_state(f"step_{completed_steps}")
if completed_steps >= args.max_train_steps:
break
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))
losses = torch.cat(losses)
losses = losses[: len(eval_dataset)]
try:
perplexity = math.exp(torch.mean(losses))
except OverflowError:
perplexity = float("inf")
logger.info(f"epoch {epoch}: perplexity: {perplexity}")
if args.with_tracking:
accelerator.log(
{
"perplexity": perplexity,
"train_loss": total_loss,
"epoch": epoch,
},
step=completed_steps,
)
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.checkpointing_steps == "epoch":
accelerator.save_state(f"epoch_{epoch}")
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
| def main():
args = parse_args()
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment
accelerator = Accelerator(log_with="all") if args.with_tracking else Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[:{args.validation_split_percentage}%]",
)
raw_datasets["train"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[{args.validation_split_percentage}%:]",
)
else:
data_files = {}
dataset_args = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
**dataset_args,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(args.config_name)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(args.model_name_or_path)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=not args.use_slow_tokenizer)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, use_fast=not args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
text_column_name = "text" if "text" in column_names else column_names[0]
def tokenize_function(examples):
return tokenizer(examples[text_column_name])
with accelerator.main_process_first():
tokenized_datasets = raw_datasets.map(
tokenize_function,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on dataset",
)
if args.block_size is None:
block_size = tokenizer.model_max_length
if block_size > 1024:
logger.warning(
f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). "
"Picking 1024 instead. You can change that default value by passing --block_size xxx."
)
block_size = 1024
else:
if args.block_size > tokenizer.model_max_length:
logger.warning(
f"The block_size passed ({args.block_size}) is larger than the maximum length for the model"
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
)
block_size = min(args.block_size, tokenizer.model_max_length)
# Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
def group_texts(examples):
# Concatenate all texts.
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
total_length = len(concatenated_examples[list(examples.keys())[0]])
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
# customize this part to your needs.
if total_length >= block_size:
total_length = (total_length // block_size) * block_size
# Split by chunks of max_len.
result = {
k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
for k, t in concatenated_examples.items()
}
result["labels"] = result["input_ids"].copy()
return result
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
# for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
# to preprocess.
#
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
with accelerator.main_process_first():
lm_datasets = tokenized_datasets.map(
group_texts,
batched=True,
num_proc=args.preprocessing_num_workers,
load_from_cache_file=not args.overwrite_cache,
desc=f"Grouping texts in chunks of {block_size}",
)
train_dataset = lm_datasets["train"]
eval_dataset = lm_datasets["validation"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
# Note -> the training dataloader needs to be prepared before we grab its length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# Figure out how many steps we should save the Accelerator states
if hasattr(args.checkpointing_steps, "isdigit"):
checkpointing_steps = args.checkpointing_steps
if args.checkpointing_steps.isdigit():
checkpointing_steps = int(args.checkpointing_steps)
else:
checkpointing_steps = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
accelerator.init_trackers("clm_no_trainer", args)
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
# Potentially load in the weights and states from a previous save
state_restored = True
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
resume_step = None
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
if "epoch" in path.name:
num_epochs -= int(path.name.replace("epoch_", ""))
else:
resume_step = int(path.name.replace("step_", ""))
num_epochs -= resume_step // len(train_dataloader)
resume_step = (num_epochs * len(train_dataloader)) - resume_step
state_restored = False
for epoch in range(args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
for step, batch in enumerate(train_dataloader):
# We need to skip steps until we reach the resumed step
if args.resume_from_checkpoint and epoch == 0 and step < resume_step:
continue
outputs = model(**batch)
loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
accelerator.save_state(f"step_{completed_steps}")
if completed_steps >= args.max_train_steps:
break
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(**batch)
loss = outputs.loss
losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))
losses = torch.cat(losses)
losses = losses[: len(eval_dataset)]
try:
perplexity = math.exp(torch.mean(losses))
except OverflowError:
perplexity = float("inf")
logger.info(f"epoch {epoch}: perplexity: {perplexity}")
if args.with_tracking:
accelerator.log(
{
"perplexity": perplexity,
"train_loss": total_loss,
"epoch": epoch,
},
step=completed_steps,
)
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
repo.push_to_hub(
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
)
if args.checkpointing_steps == "epoch":
accelerator.save_state(f"epoch_{epoch}")
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(args.output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
12,470 | def _determine_eq_order(ctx: 'mypy.plugin.ClassDefContext') -> bool:
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
value of order.
"""
cmp = _get_decorator_optional_bool_argument(ctx, 'cmp')
eq = _get_decorator_optional_bool_argument(ctx, 'eq')
order = _get_decorator_optional_bool_argument(ctx, 'order')
if cmp is not None and any((eq is not None, order is not None)):
ctx.api.fail('Don\'t mix "%s" with "%s" and "%s"' % (cmp, eq, order), ctx.reason)
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
return cmp
# If left None, equality is on and ordering mirrors equality.
if eq is None:
eq = True
if order is None:
order = eq
if eq is False and order is True:
ctx.api.fail('eq must be "True" if order is "True"', ctx.reason)
return order
| def _determine_eq_order(ctx: 'mypy.plugin.ClassDefContext') -> bool:
"""
Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
value of order.
"""
cmp = _get_decorator_optional_bool_argument(ctx, 'cmp')
eq = _get_decorator_optional_bool_argument(ctx, 'eq')
order = _get_decorator_optional_bool_argument(ctx, 'order')
if cmp is not None and any((eq is not None, order is not None)):
ctx.api.fail('Don\'t mix "cmp" with "eq" and "order"', ctx.reason)
# cmp takes precedence due to bw-compatibility.
if cmp is not None:
return cmp
# If left None, equality is on and ordering mirrors equality.
if eq is None:
eq = True
if order is None:
order = eq
if eq is False and order is True:
ctx.api.fail('eq must be "True" if order is "True"', ctx.reason)
return order
|
172 | def upgrade():
"""Add a new Bool column to bugs table, set default to False."""
op.add_column('bugs', sa.Column('private', sa.Boolean(), nullable=True))
op.execute("""UPDATE bugs SET private = FALSE""")
op.alter_column('bugs', 'private', nullable=False, server_default='false')
| def upgrade():
"""Add a new Bool column to bugs table, set default to False."""
op.add_column('bugs', sa.Column('private', sa.Boolean(), nullable=True))
op.execute("""UPDATE bugs SET private = FALSE""")
op.alter_column('bugs', 'private', nullable=False)
|
57,660 | def list_workers_command(client: Client, args: dict) -> CommandResults:
count = int(args.get('count', "1"))
page = int(args.get('page', "1"))
num_of_managers = int(args.get('managers', "3"))
employee_id = args.get('employee_id')
raw_json_response, workers_data = client.list_workers(page, count, employee_id)
workers_context = create_worker_context(workers_data, num_of_managers)
workers_readable = tableToMarkdown('Workers', workers_context, removeNull=True, headers=HEADERS)
return CommandResults(
readable_output=workers_readable,
outputs_prefix='Workday.Worker',
outputs_key_field='Worker_ID',
outputs=workers_context,
raw_response=raw_json_response)
| def list_workers_command(client: Client, args: dict) -> CommandResults:
count = int(args.get('count', '1'))
page = int(args.get('page', '1'))
num_of_managers = int(args.get('managers', '3'))
employee_id = args.get('employee_id')
raw_json_response, workers_data = client.list_workers(page, count, employee_id)
workers_context = create_worker_context(workers_data, num_of_managers)
workers_readable = tableToMarkdown('Workers', workers_context, removeNull=True, headers=HEADERS)
return CommandResults(
readable_output=workers_readable,
outputs_prefix='Workday.Worker',
outputs_key_field='Worker_ID',
outputs=workers_context,
raw_response=raw_json_response)
|
59,523 | def _write_instruction(file_obj, instruction_tuple, custom_instructions, index_map):
gate_class_name = instruction_tuple[0].__class__.__name__
if (
(
not hasattr(library, gate_class_name)
and not hasattr(circuit_mod, gate_class_name)
and not hasattr(extensions, gate_class_name)
and not hasattr(quantum_initializer, gate_class_name)
and not hasattr(controlflow, gate_class_name)
)
or gate_class_name == "Gate"
or gate_class_name == "Instruction"
or isinstance(instruction_tuple[0], library.BlueprintCircuit)
):
if instruction_tuple[0].name not in custom_instructions:
custom_instructions[instruction_tuple[0].name] = instruction_tuple[0]
gate_class_name = instruction_tuple[0].name
elif isinstance(instruction_tuple[0], library.PauliEvolutionGate):
gate_class_name = r"###PauliEvolutionGate_" + str(uuid.uuid4())
custom_instructions[gate_class_name] = instruction_tuple[0]
has_condition = False
condition_register = b""
condition_value = 0
if instruction_tuple[0].condition:
has_condition = True
if isinstance(instruction_tuple[0].condition[0], Clbit):
bit_index = index_map["c"][instruction_tuple[0].condition[0]]
condition_register = b"\x00" + str(bit_index).encode("utf8")
condition_value = int(instruction_tuple[0].condition[1])
else:
condition_register = instruction_tuple[0].condition[0].name.encode("utf8")
condition_value = instruction_tuple[0].condition[1]
gate_class_name = gate_class_name.encode("utf8")
label = getattr(instruction_tuple[0], "label")
if label:
label_raw = label.encode("utf8")
else:
label_raw = b""
instruction_raw = struct.pack(
INSTRUCTION_PACK,
len(gate_class_name),
len(label_raw),
len(instruction_tuple[0].params),
instruction_tuple[0].num_qubits,
instruction_tuple[0].num_clbits,
has_condition,
len(condition_register),
condition_value,
)
file_obj.write(instruction_raw)
file_obj.write(gate_class_name)
file_obj.write(label_raw)
file_obj.write(condition_register)
# Encode instruciton args
for qbit in instruction_tuple[1]:
instruction_arg_raw = struct.pack(INSTRUCTION_ARG_PACK, b"q", index_map["q"][qbit])
file_obj.write(instruction_arg_raw)
for clbit in instruction_tuple[2]:
instruction_arg_raw = struct.pack(INSTRUCTION_ARG_PACK, b"c", index_map["c"][clbit])
file_obj.write(instruction_arg_raw)
# Encode instruction params
for param in instruction_tuple[0].params:
container = io.BytesIO()
if isinstance(param, int):
type_key = "i"
data = struct.pack("<q", param)
size = struct.calcsize("<q")
elif isinstance(param, float):
type_key = "f"
data = struct.pack("<d", param)
size = struct.calcsize("<d")
elif isinstance(param, str):
type_key = "s"
data = param.encode("utf8")
size = len(data)
elif isinstance(param, ParameterVectorElement):
type_key = "v"
_write_parameter_vec(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, Parameter):
type_key = "p"
_write_parameter(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, ParameterExpression):
type_key = "e"
_write_parameter_expression(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, complex):
type_key = "c"
data = struct.pack(COMPLEX_PACK, param.real, param.imag)
size = struct.calcsize(COMPLEX_PACK)
elif isinstance(param, (np.integer, np.floating, np.ndarray, np.complexfloating)):
type_key = "n"
np.save(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif param is None:
type_key = "z"
data = b""
size = len(data)
elif isinstance(param, QuantumCircuit):
type_key = "q"
_write_circuit(container, param)
data = container.getvalue()
size = len(data)
elif isinstance(param, range):
type_key = "r"
data = struct.pack(RANGE_PACK, param.start, param.stop, param.step)
size = len(data)
# Tuples are arbitrary containers of python objects and can have anythin
# as a value, qpy doesn't support this as a type currently. But in the
# ForLoopOp the indexset parameter is a tuple of only intergers
# as the consumed integer iterator to use as loop iteration values.
# Serialize this as a numpy array of integers and use a unique type
# code to indicate the array needs to be case to a tuple on
# deserialization
elif gate_class_name == b"ForLoopOp" and isinstance(param, tuple):
type_key = "t"
output_array = np.array(param, dtype=int)
np.save(container, output_array)
data = container.getvalue()
size = len(data)
else:
raise TypeError(
f"Invalid parameter type {instruction_tuple[0]} for gate {type(param)},"
)
instruction_param_raw = struct.pack(INSTRUCTION_PARAM_PACK, type_key.encode("utf8"), size)
file_obj.write(instruction_param_raw)
file_obj.write(data)
container.close()
| def _write_instruction(file_obj, instruction_tuple, custom_instructions, index_map):
gate_class_name = instruction_tuple[0].__class__.__name__
if (
(
not hasattr(library, gate_class_name)
and not hasattr(circuit_mod, gate_class_name)
and not hasattr(extensions, gate_class_name)
and not hasattr(quantum_initializer, gate_class_name)
and not hasattr(controlflow, gate_class_name)
)
or gate_class_name == "Gate"
or gate_class_name == "Instruction"
or isinstance(instruction_tuple[0], library.BlueprintCircuit)
):
if instruction_tuple[0].name not in custom_instructions:
custom_instructions[instruction_tuple[0].name] = instruction_tuple[0]
gate_class_name = instruction_tuple[0].name
elif isinstance(instruction_tuple[0], library.PauliEvolutionGate):
gate_class_name = r"###PauliEvolutionGate_" + str(uuid.uuid4())
custom_instructions[gate_class_name] = instruction_tuple[0]
has_condition = False
condition_register = b""
condition_value = 0
if instruction_tuple[0].condition:
has_condition = True
if isinstance(instruction_tuple[0].condition[0], Clbit):
bit_index = index_map["c"][instruction_tuple[0].condition[0]]
condition_register = b"\x00" + str(bit_index).encode("utf8")
condition_value = int(instruction_tuple[0].condition[1])
else:
condition_register = instruction_tuple[0].condition[0].name.encode("utf8")
condition_value = instruction_tuple[0].condition[1]
gate_class_name = gate_class_name.encode("utf8")
label = getattr(instruction_tuple[0], "label")
if label:
label_raw = label.encode("utf8")
else:
label_raw = b""
instruction_raw = struct.pack(
INSTRUCTION_PACK,
len(gate_class_name),
len(label_raw),
len(instruction_tuple[0].params),
instruction_tuple[0].num_qubits,
instruction_tuple[0].num_clbits,
has_condition,
len(condition_register),
condition_value,
)
file_obj.write(instruction_raw)
file_obj.write(gate_class_name)
file_obj.write(label_raw)
file_obj.write(condition_register)
# Encode instruciton args
for qbit in instruction_tuple[1]:
instruction_arg_raw = struct.pack(INSTRUCTION_ARG_PACK, b"q", index_map["q"][qbit])
file_obj.write(instruction_arg_raw)
for clbit in instruction_tuple[2]:
instruction_arg_raw = struct.pack(INSTRUCTION_ARG_PACK, b"c", index_map["c"][clbit])
file_obj.write(instruction_arg_raw)
# Encode instruction params
for param in instruction_tuple[0].params:
container = io.BytesIO()
if isinstance(param, int):
type_key = "i"
data = struct.pack("<q", param)
size = struct.calcsize("<q")
elif isinstance(param, float):
type_key = "f"
data = struct.pack("<d", param)
size = struct.calcsize("<d")
elif isinstance(param, str):
type_key = "s"
data = param.encode("utf8")
size = len(data)
elif isinstance(param, ParameterVectorElement):
type_key = "v"
_write_parameter_vec(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, Parameter):
type_key = "p"
_write_parameter(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, ParameterExpression):
type_key = "e"
_write_parameter_expression(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif isinstance(param, complex):
type_key = "c"
data = struct.pack(COMPLEX_PACK, param.real, param.imag)
size = struct.calcsize(COMPLEX_PACK)
elif isinstance(param, (np.integer, np.floating, np.ndarray, np.complexfloating)):
type_key = "n"
np.save(container, param)
container.seek(0)
data = container.read()
size = len(data)
elif param is None:
type_key = "z"
data = b""
size = len(data)
elif isinstance(param, QuantumCircuit):
type_key = "q"
_write_circuit(container, param)
data = container.getvalue()
size = len(data)
elif isinstance(param, range):
type_key = "r"
data = struct.pack(RANGE_PACK, param.start, param.stop, param.step)
size = len(data)
# Tuples are arbitrary containers of python objects and can have anything
# as a value, qpy doesn't support this as a type currently. But in the
# ForLoopOp the indexset parameter is a tuple of only integers
# as the consumed integer iterator to use as loop iteration values.
# Serialize this as a numpy array of integers and use a unique type
# code to indicate the array needs to be case to a tuple on
# deserialization
elif gate_class_name == b"ForLoopOp" and isinstance(param, tuple):
type_key = "t"
output_array = np.array(param, dtype=int)
np.save(container, output_array)
data = container.getvalue()
size = len(data)
else:
raise TypeError(
f"Invalid parameter type {instruction_tuple[0]} for gate {type(param)},"
)
instruction_param_raw = struct.pack(INSTRUCTION_PARAM_PACK, type_key.encode("utf8"), size)
file_obj.write(instruction_param_raw)
file_obj.write(data)
container.close()
|
35,385 | def process_measurement_list(lines, lists=None, m2w=None, pcrval=None):
errs = [0, 0, 0, 0]
runninghash = START_HASH
found_pcr = (pcrval == None)
if lists is not None:
lists = ast.literal_eval(lists)
whitelist = lists['whitelist']
exclude_list = lists['exclude']
else:
whitelist = None
is_valid, compiled_regex, err_msg = common.valid_exclude_list(exclude_list)
if not is_valid:
# This should not happen as the exclude list has already been validated
# by the verifier before acceping it. This is a safety net just in case.
err_msg += " Exclude list will be ignored."
logger.error(err_msg)
for line in lines:
line = line.strip()
tokens = line.split(None, 4)
if line == '':
continue
if len(tokens) != 5:
logger.error("invalid measurement list file line: -%s-" % (line))
return None
# print tokens
#pcr = tokens[0]
template_hash = codecs.decode(tokens[1], 'hex')
mode = tokens[2]
if mode == "ima-ng":
filedata = tokens[3]
ftokens = filedata.split(":")
filedata_algo = str(ftokens[0])
filedata_hash = codecs.decode(ftokens[1], 'hex')
path = str(tokens[4])
# this is some IMA weirdness
if template_hash == START_HASH:
template_hash = FF_HASH
else:
# verify template hash. yep this is terrible
fmt = "<I%dsBB%dsI%dsB" % (
len(filedata_algo), len(filedata_hash), len(path))
# +2 for the : and the null terminator, and +1 on path for null terminator
tohash = struct.pack(fmt, len(filedata_hash)+len(filedata_algo)+2, filedata_algo.encode(
'utf-8'), ord(':'), ord('\0'), filedata_hash, len(path)+1, path.encode("utf-8"), ord('\0'))
expected_template_hash = hashlib.sha1(tohash).digest()
if expected_template_hash != template_hash:
errs[0] += 1
logger.warning("template hash for file %s does not match %s != %s" % (path, codecs.encode(
expected_template_hash, 'hex').decode('utf-8'), codecs.encode(template_hash, 'hex').decode('utf-8')))
elif mode == 'ima':
filedata_hash = codecs.decode(tokens[3], "hex")
path = str(tokens[4])
# this is some IMA weirdness
if template_hash == START_HASH:
template_hash = FF_HASH
else:
# verify template hash. yep this is terrible
# name needs to be null padded out to MAX len. +1 is for the null terminator of the string itself
fmt = "<%ds%ds%ds" % (len(filedata_hash), len(
path), TCG_EVENT_NAME_LEN_MAX-len(path)+1)
tohash = struct.pack(fmt, filedata_hash, path.encode(
"utf-8"), bytearray(TCG_EVENT_NAME_LEN_MAX-len(path)+1))
expected_template_hash = hashlib.sha1(tohash).digest()
if expected_template_hash != template_hash:
errs[0] += 1
logger.warning("template hash for file %s does not match %s != %s" % (path, codecs.encode(
expected_template_hash, 'hex').decode('utf-8'), codecs.encode(template_hash, 'hex').decode('utf-8')))
else:
raise Exception("unsupported ima template mode: %s" % mode)
# update hash
runninghash = hashlib.sha1(runninghash+template_hash).digest()
if not found_pcr:
found_pcr = \
(codecs.encode(runninghash, 'hex').decode('utf-8') == pcrval)
# write out the new hash
if m2w is not None:
m2w.write("%s %s\n" % (codecs.encode(
filedata_hash, 'hex').decode('utf-8'), path))
if whitelist is not None:
# just skip if it is a weird overwritten path
if template_hash == FF_HASH:
# print "excluding ffhash %s"%path
continue
# determine if path matches any exclusion list items
if compiled_regex is not None and compiled_regex.match(path):
logger.debug("IMA: ignoring excluded path %s" % path)
continue
accept_list = whitelist.get(path, None)
accept_list = accept_list
if accept_list is None:
logger.warning("File not found in whitelist: %s" % (path))
errs[1] += 1
continue
# print('codecs.encode', codecs.encode(filedata_hash, 'hex').decode('utf-8'))
# print('accept_list:', accept_list)
if codecs.encode(filedata_hash, 'hex').decode('utf-8') not in accept_list:
logger.warning("Hashes for file %s don't match %s not in %s" % (
path, codecs.encode(filedata_hash, 'hex').decode('utf-8'), accept_list))
errs[2] += 1
continue
errs[3] += 1
# check PCR value has been found
if not found_pcr:
logger.error(
"IMA measurement list expected pcr value %s does not match TPM PCR %s"
%(codecs.encode(runninghash, 'hex').decode('utf-8'), pcrval))
return None
# clobber the retval if there were IMA file errors
if sum(errs[:3]) > 0:
logger.error(
"IMA ERRORS: template-hash %d fnf %d hash %d good %d" % tuple(errs))
return None
return codecs.encode(runninghash, 'hex').decode('utf-8')
| def process_measurement_list(lines, lists=None, m2w=None, pcrval=None):
errs = [0, 0, 0, 0]
runninghash = START_HASH
found_pcr = (pcrval == None)
if lists is not None:
lists = ast.literal_eval(lists)
whitelist = lists['whitelist']
exclude_list = lists['exclude']
else:
whitelist = None
is_valid, compiled_regex, err_msg = common.valid_exclude_list(exclude_list)
if not is_valid:
# This should not happen as the exclude list has already been validated
# by the verifier before acceping it. This is a safety net just in case.
err_msg += " Exclude list will be ignored."
logger.error(err_msg)
for line in lines:
line = line.strip()
tokens = line.split(None, 4)
if line == '':
continue
if len(tokens) != 5:
logger.error("invalid measurement list file line: -%s-" % (line))
return None
# print tokens
#pcr = tokens[0]
template_hash = codecs.decode(tokens[1], 'hex')
mode = tokens[2]
if mode == "ima-ng":
filedata = tokens[3]
ftokens = filedata.split(":")
filedata_algo = str(ftokens[0])
filedata_hash = codecs.decode(ftokens[1], 'hex')
path = str(tokens[4])
# this is some IMA weirdness
if template_hash == START_HASH:
template_hash = FF_HASH
else:
# verify template hash. yep this is terrible
fmt = "<I%dsBB%dsI%dsB" % (
len(filedata_algo), len(filedata_hash), len(path))
# +2 for the : and the null terminator, and +1 on path for null terminator
tohash = struct.pack(fmt, len(filedata_hash)+len(filedata_algo)+2, filedata_algo.encode(
'utf-8'), ord(':'), ord('\0'), filedata_hash, len(path)+1, path.encode("utf-8"), ord('\0'))
expected_template_hash = hashlib.sha1(tohash).digest()
if expected_template_hash != template_hash:
errs[0] += 1
logger.warning("template hash for file %s does not match %s != %s" % (path, codecs.encode(
expected_template_hash, 'hex').decode('utf-8'), codecs.encode(template_hash, 'hex').decode('utf-8')))
elif mode == 'ima':
filedata_hash = codecs.decode(tokens[3], "hex")
path = str(tokens[4])
# this is some IMA weirdness
if template_hash == START_HASH:
template_hash = FF_HASH
else:
# verify template hash. yep this is terrible
# name needs to be null padded out to MAX len. +1 is for the null terminator of the string itself
fmt = "<%ds%ds%ds" % (len(filedata_hash), len(
path), TCG_EVENT_NAME_LEN_MAX-len(path)+1)
tohash = struct.pack(fmt, filedata_hash, path.encode(
"utf-8"), bytearray(TCG_EVENT_NAME_LEN_MAX-len(path)+1))
expected_template_hash = hashlib.sha1(tohash).digest()
if expected_template_hash != template_hash:
errs[0] += 1
logger.warning("template hash for file %s does not match %s != %s" % (path, codecs.encode(
expected_template_hash, 'hex').decode('utf-8'), codecs.encode(template_hash, 'hex').decode('utf-8')))
else:
raise Exception("unsupported ima template mode: %s" % mode)
# update hash
runninghash = hashlib.sha1(runninghash+template_hash).digest()
if not found_pcr:
found_pcr = \
(codecs.encode(runninghash, 'hex').decode('utf-8') == pcrval)
# write out the new hash
if m2w is not None:
m2w.write("%s %s\n" % (codecs.encode(
filedata_hash, 'hex').decode('utf-8'), path))
if whitelist is not None:
# just skip if it is a weird overwritten path
if template_hash == FF_HASH:
# print "excluding ffhash %s"%path
continue
# determine if path matches any exclusion list items
if compiled_regex is not None and compiled_regex.match(path):
logger.debug("IMA: ignoring excluded path %s" % path)
continue
accept_list = whitelist.get(path, None)
accept_list = accept_list
if accept_list is None:
logger.warning("File not found in whitelist: %s" % (path))
errs[1] += 1
continue
# print('codecs.encode', codecs.encode(filedata_hash, 'hex').decode('utf-8'))
# print('accept_list:', accept_list)
if codecs.encode(filedata_hash, 'hex').decode('utf-8') not in accept_list:
logger.warning("Hashes for file %s don't match %s not in %s" % (
path, codecs.encode(filedata_hash, 'hex').decode('utf-8'), accept_list))
errs[2] += 1
continue
errs[3] += 1
# check PCR value has been found
if not found_pcr:
logger.error("IMA measurement list does not match TPM PCR %s" % pcrval)
return None
# clobber the retval if there were IMA file errors
if sum(errs[:3]) > 0:
logger.error(
"IMA ERRORS: template-hash %d fnf %d hash %d good %d" % tuple(errs))
return None
return codecs.encode(runninghash, 'hex').decode('utf-8')
|
31,137 | def url_quota_command():
cmd_url = '/urlCategories/urlQuota'
response = http_request('GET', cmd_url).json()
human_readable = {
'Unique Provisioned URLs': response.get('uniqueUrlsProvisioned'),
'Remaining URLs Quota': response.get('remainingUrlsQuota')
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Quota Information", human_readable),
'EntryContext': {'Zscaler.Quota': response}
}
return entry
| def url_quota_command():
cmd_url = '/urlCategories/urlQuota'
response = http_request('GET', cmd_url).json()
human_readable = {
'Unique Provisioned URLs': response.get('uniqueUrlsProvisioned'),
'Remaining URLs Quota': response.get('remainingUrlsQuota'),
}
entry = {
'Type': entryTypes['note'],
'Contents': response,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Quota Information", human_readable),
'EntryContext': {'Zscaler.Quota': response}
}
return entry
|
57,730 | def get_duo_admin_by_name(name):
duo_administrators = admin_api.get_admins()
duo_administrator = next((admin for admin in duo_administrators if admin['name'] == name), None)
entry = get_entry_for_object(
'Information about admin ' + name, duo_administrator, duo_administrator,
{
'DuoAdmin.AdminDetail(val.name && val.name==obj.name)':
{'details': duo_administrator}
}
)
demisto.results(entry)
| def get_duo_admin_by_name(name):
duo_administrators = admin_api.get_admins()
duo_administrator = next((admin for admin in duo_administrators if admin['name'] == name), None)
entry = get_entry_for_object(
'Information about admin ' + name, duo_administrator, duo_administrator,
{
'DuoAdmin.AdminDetail(val.name && val.name==obj.name)':
{
'name': name,
'details': duo_administrator
}
}
)
demisto.results(entry)
|
21,948 | def lqe(*args, **keywords):
"""lqe(A, G, C, Q, R, [, N])
Linear quadratic estimator design (Kalman filter) for continuous-time
systems. Given the system
.. math::
x &= Ax + Bu + Gw \\\\
y &= Cx + Du + v
with unbiased process noise w and measurement noise v with covariances
.. math:: E{ww'} = Q, E{vv'} = R, E{wv'} = N
The lqe() function computes the observer gain matrix L such that the
stationary (non-time-varying) Kalman filter
.. math:: x_e = A x_e + B u + L(y - C x_e - D u)
produces a state estimate x_e that minimizes the expected squared error
using the sensor measurements y. The noise cross-correlation `N` is
set to zero when omitted.
The function can be called with either 3, 4, 5, or 6 arguments:
* ``L, P, E = lqe(sys, Q, R)``
* ``L, P, E = lqe(sys, Q, R, N)``
* ``L, P, E = lqe(A, G, C, Q, R)``
* ``L, P, E = lqe(A, B, C, Q, R, N)``
where `sys` is an `LTI` object, and `A`, `G`, `C`, `Q`, `R`, and `N` are
2D arrays or matrices of appropriate dimension.
Parameters
----------
A, G, C : 2D array_like
Dynamics, process noise (disturbance), and output matrices
sys : LTI (StateSpace or TransferFunction)
Linear I/O system, with the process noise input taken as the system
input.
Q, R : 2D array_like
Process and sensor noise covariance matrices
N : 2D array, optional
Cross covariance matrix. Not currently implemented.
Returns
-------
L : 2D array (or matrix)
Kalman estimator gain
P : 2D array (or matrix)
Solution to Riccati equation
.. math::
A P + P A^T - (P C^T + G N) R^{-1} (C P + N^T G^T) + G Q G^T = 0
E : 1D array
Eigenvalues of estimator poles eig(A - L C)
Notes
-----
The return type for 2D arrays depends on the default class set for
state space operations. See :func:`~control.use_numpy_matrix`.
Examples
--------
>>> L, P, E = lqe(A, G, C, QN, RN)
>>> L, P, E = lqe(A, G, C, QN, RN, NN)
See Also
--------
lqr
"""
# TODO: incorporate cross-covariance NN, something like this,
# which doesn't work for some reason
# if NN is None:
# NN = np.zeros(QN.size(0),RN.size(1))
# NG = G @ NN
#
# Process the arguments and figure out what inputs we received
#
# Get the system description
if (len(args) < 3):
raise ControlArgument("not enough input arguments")
try:
sys = args[0] # Treat the first argument as a system
if isinstance(sys, LTI):
# Convert LTI system to state space
sys = _convert_to_statespace(sys)
# Extract A, G (assume disturbances come through input), and C
A = np.array(sys.A, ndmin=2, dtype=float)
G = np.array(sys.B, ndmin=2, dtype=float)
C = np.array(sys.C, ndmin=2, dtype=float)
index = 1
except AttributeError:
# Arguments should be A and B matrices
A = np.array(args[0], ndmin=2, dtype=float)
G = np.array(args[1], ndmin=2, dtype=float)
C = np.array(args[2], ndmin=2, dtype=float)
index = 3
# Get the weighting matrices (converting to matrices, if needed)
Q = np.array(args[index], ndmin=2, dtype=float)
R = np.array(args[index+1], ndmin=2, dtype=float)
# Get the cross-covariance matrix, if given
if (len(args) > index + 2):
N = np.array(args[index+2], ndmin=2, dtype=float)
raise ControlNotImplemented("cross-covariance not implemented")
else:
N = np.zeros((Q.shape[0], R.shape[1]))
# Check dimensions for consistency
nstates = A.shape[0]
ninputs = G.shape[1]
noutputs = C.shape[0]
if (A.shape[0] != nstates or A.shape[1] != nstates or
G.shape[0] != nstates or C.shape[1] != nstates):
raise ControlDimension("inconsistent system dimensions")
elif (Q.shape[0] != ninputs or Q.shape[1] != ninputs or
R.shape[0] != noutputs or R.shape[1] != noutputs or
N.shape[0] != ninputs or N.shape[1] != noutputs):
raise ControlDimension("incorrect covariance matrix dimensions")
# LT, P, E = lqr(A.T, C.T, G @ Q @ G.T, R)
# P, E, LT = care(A.T, C.T, G @ Q @ G.T, R)
P, E, LT = care(A.T, C.T, np.dot(np.dot(G, Q), G.T), R)
return _ssmatrix(LT.T), _ssmatrix(P), E
| def lqe(*args, **keywords):
"""lqe(A, G, C, Q, R, [, N])
Linear quadratic estimator design (Kalman filter) for continuous-time
systems. Given the system
.. math::
x &= Ax + Bu + Gw \\\\
y &= Cx + Du + v
with unbiased process noise w and measurement noise v with covariances
.. math:: E{ww'} = Q, E{vv'} = R, E{wv'} = N
The lqe() function computes the observer gain matrix L such that the
stationary (non-time-varying) Kalman filter
.. math:: x_e = A x_e + B u + L(y - C x_e - D u)
produces a state estimate x_e that minimizes the expected squared error
using the sensor measurements y. The noise cross-correlation `N` is
set to zero when omitted.
The function can be called with either 3, 4, 5, or 6 arguments:
* ``L, P, E = lqe(sys, Q, R)``
* ``L, P, E = lqe(sys, Q, R, N)``
* ``L, P, E = lqe(A, G, C, Q, R)``
* ``L, P, E = lqe(A, B, C, Q, R, N)``
where `sys` is an `LTI` object, and `A`, `G`, `C`, `Q`, `R`, and `N` are
2D arrays or matrices of appropriate dimension.
Parameters
----------
A, G, C : 2D array_like
Dynamics, process noise (disturbance), and output matrices
sys : LTI (StateSpace or TransferFunction)
Linear I/O system, with the process noise input taken as the system
input.
Q, R : 2D array_like
Process and sensor noise covariance matrices
N : 2D array, optional
Cross covariance matrix. Not currently implemented.
Returns
-------
L : 2D array (or matrix)
Kalman estimator gain
P : 2D array (or matrix)
Solution to Riccati equation
.. math::
A P + P A^T - (P C^T + G N) R^{-1} (C P + N^T G^T) + G Q G^T = 0
E : 1D array
Eigenvalues of estimator poles eig(A - L C)
Notes
-----
The return type for 2D arrays depends on the default class set for
state space operations. See :func:`~control.use_numpy_matrix`.
Examples
--------
>>> L, P, E = lqe(A, G, C, QN, RN)
>>> L, P, E = lqe(A, G, C, QN, RN, NN)
See Also
--------
lqr
"""
# TODO: incorporate cross-covariance NN, something like this,
# which doesn't work for some reason
# if NN is None:
# NN = np.zeros(QN.size(0),RN.size(1))
# NG = G @ NN
#
# Process the arguments and figure out what inputs we received
#
# Get the system description
if (len(args) < 3):
raise ControlArgument("not enough input arguments")
try:
sys = args[0] # Treat the first argument as a system
if isinstance(sys, LTI):
# Convert LTI system to state space
sys = _convert_to_statespace(sys)
# Extract A, G (assume disturbances come through input), and C
A = np.array(sys.A, ndmin=2, dtype=float)
G = np.array(sys.B, ndmin=2, dtype=float)
C = np.array(sys.C, ndmin=2, dtype=float)
index = 1
except AttributeError:
# Arguments should be A and B matrices
A = np.array(args[0], ndmin=2, dtype=float)
G = np.array(args[1], ndmin=2, dtype=float)
C = np.array(args[2], ndmin=2, dtype=float)
index = 3
# Get the weighting matrices (converting to matrices, if needed)
Q = np.array(args[index], ndmin=2, dtype=float)
R = np.array(args[index+1], ndmin=2, dtype=float)
# Get the cross-covariance matrix, if given
if (len(args) > index + 2):
N = np.array(args[index+2], ndmin=2, dtype=float)
raise ControlNotImplemented("cross-covariance not implemented")
else:
N = np.zeros((Q.shape[0], R.shape[1]))
# Check dimensions for consistency
nstates = A.shape[0]
ninputs = G.shape[1]
noutputs = C.shape[0]
if (A.shape[0] != nstates or A.shape[1] != nstates or
G.shape[0] != nstates or C.shape[1] != nstates):
raise ControlDimension("inconsistent system dimensions")
elif (Q.shape[0] != ninputs or Q.shape[1] != ninputs or
R.shape[0] != noutputs or R.shape[1] != noutputs or
N.shape[0] != ninputs or N.shape[1] != noutputs):
raise ControlDimension("incorrect covariance matrix dimensions")
# L, P, E = lqe(...)
# P, E, LT = care(A.T, C.T, G @ Q @ G.T, R)
P, E, LT = care(A.T, C.T, np.dot(np.dot(G, Q), G.T), R)
return _ssmatrix(LT.T), _ssmatrix(P), E
|
34,183 | def create_dir_for_file(file_path: Text) -> None:
"""Creates any missing parent directories of this files path."""
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
| def create_directory_for_file(file_path: Text) -> None:
"""Creates any missing parent directories of this files path."""
try:
os.makedirs(os.path.dirname(file_path))
except OSError as e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
|
40,252 | def meshgrid(x, y, indexing='xy'):
"""Construct coordinate matrices from two coordinate vectors.
Parameters
----------
x : list[float]
The values of the "x axis" of the grid.
y : list[float]
The values of the "y axis" of the grid.
indexing : {'xy', 'ij'}, optional
The indexing strategy determines the structure of the output.
Returns
-------
list[list[float]
The X values of the coordinate grid.
list[list[float]
The Y values of the coordinate grid.
Notes
-----
The output of this function consists of two "matrices", `X` and `Y`.
The structure of the matrices is determined by the choice of `indexing`.
Assuming ``m = len(x)`` and ``n = len(y)``.
If `indexing` is ``'xy'``,
the shape of both matrices is ``(n, m)``,
with `X` containing the elements of `x` in its rows, and `Y` the elements of `y` in its columns.
If `indexing` is ``'ij'``,
the shape of both matrices is ``(m, n)``,
with `X` containing the elements of `x` in its columns, and `Y` the elements of `y` in its rows.
References
----------
This function mimicks the functionality of ``numpy.meshgrid`` [1]_, but in a simpler form.
.. [1] `numpy.meshgrid`.
Available at https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html
Examples
--------
>>> from compas.utilities import linspace, meshgrid
>>> x = list(linspace(0, 1, 3))
>>> y = list(linspace(0, 1, 2))
>>> X, Y = meshgrid(x, y)
>>> X
[[0.0, 0.5, 1.0], [0.0, 0.5, 1.0]]
>>> Y
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
>>> X, Y = meshgrid(x, y, 'ij')
>>> X
[[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]]
>>> Y
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]]
"""
x = list(x)
y = list(y)
if indexing == 'xy':
X = [[x[j] for j in range(len(x))] for i in range(len(y))]
Y = [[y[i] for j in range(len(x))] for i in range(len(y))]
else:
X = [[x[i] for j in range(len(y))] for i in range(len(x))]
Y = [[y[j] for j in range(len(y))] for i in range(len(x))]
return X, Y
| def meshgrid(x, y, indexing='xy'):
"""Construct coordinate matrices from two coordinate vectors.
Parameters
----------
x : list[float]
The values of the "x axis" of the grid.
y : list[float]
The values of the "y axis" of the grid.
indexing : {'xy', 'ij'}, optional
The indexing strategy determines the structure of the output.
Returns
-------
list[list[float]
The X values of the coordinate grid.
list[list[float]]
The Y values of the coordinate grid.
Notes
-----
The output of this function consists of two "matrices", `X` and `Y`.
The structure of the matrices is determined by the choice of `indexing`.
Assuming ``m = len(x)`` and ``n = len(y)``.
If `indexing` is ``'xy'``,
the shape of both matrices is ``(n, m)``,
with `X` containing the elements of `x` in its rows, and `Y` the elements of `y` in its columns.
If `indexing` is ``'ij'``,
the shape of both matrices is ``(m, n)``,
with `X` containing the elements of `x` in its columns, and `Y` the elements of `y` in its rows.
References
----------
This function mimicks the functionality of ``numpy.meshgrid`` [1]_, but in a simpler form.
.. [1] `numpy.meshgrid`.
Available at https://numpy.org/doc/stable/reference/generated/numpy.meshgrid.html
Examples
--------
>>> from compas.utilities import linspace, meshgrid
>>> x = list(linspace(0, 1, 3))
>>> y = list(linspace(0, 1, 2))
>>> X, Y = meshgrid(x, y)
>>> X
[[0.0, 0.5, 1.0], [0.0, 0.5, 1.0]]
>>> Y
[[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]
>>> X, Y = meshgrid(x, y, 'ij')
>>> X
[[0.0, 0.0], [0.5, 0.5], [1.0, 1.0]]
>>> Y
[[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]]
"""
x = list(x)
y = list(y)
if indexing == 'xy':
X = [[x[j] for j in range(len(x))] for i in range(len(y))]
Y = [[y[i] for j in range(len(x))] for i in range(len(y))]
else:
X = [[x[i] for j in range(len(y))] for i in range(len(x))]
Y = [[y[j] for j in range(len(y))] for i in range(len(x))]
return X, Y
|
38,062 | def _load_static_earth_relief(**kwargs):
"""
Load the static_earth_relief file for internal testing.
Returns
-------
data : xarray.DataArray
A grid of Earth relief for internal tests.
"""
fname = which("@static_earth_relief.nc", download="c")
data = xr.open_dataarray(fname)
return data
| def _load_static_earth_relief(**kwargs):
"""
Load the static_earth_relief file for internal testing.
Returns
-------
data : xarray.DataArray
A grid of Earth relief for internal tests.
"""
fname = which("@static_earth_relief.nc", download="c")
return xr.open_dataarray(fname)
|
59,163 | def test_read_visium_counts():
# Test that checks the read_visium function
h5_pth = ROOT / '../visium_data/1.0.0'
spec_genome_v3 = sc.read_visium(h5_pth, genome='GRCh38')
nospec_genome_v3 = sc.read_visium(h5_pth)
assert_anndata_equal(spec_genome_v3, nospec_genome_v3)
| def test_read_visium_counts():
# Test that checks the read_visium function
visium_pth = ROOT / '../visium_data/1.0.0'
spec_genome_v3 = sc.read_visium(visium_pth, genome='GRCh38')
nospec_genome_v3 = sc.read_visium(visium_pth)
assert_anndata_equal(spec_genome_v3, nospec_genome_v3)
|
13,207 | def send_spam_user_email(recipient, deposit=None, community=None):
"""Send email notification to blocked user after spam detection."""
msg = Message(
_("Your Zenodo activity has been automatically marked as Spam."),
sender=current_app.config.get('SUPPORT_EMAIL'),
recipients=[recipient],
)
msg.body = render_template(
"zenodo_spam/email/spam_user_email.tpl",
community=community,
deposit=deposit
)
send_email.delay(msg.__dict__)
| def send_spam_user_email(recipient, deposit=None, community=None):
"""Send email notification to blocked user after spam detection."""
msg = Message(
_("Your Zenodo activity has been automatically marked as Spam."),
sender=current_app.config.get('SUPPORT_EMAIL'),
recipients=[recipient],
)
msg.body = render_template(
"zenodo_spam/email/spam_user_email.tpl",
community=community,
deposit=deposit
)
send_email.delay(msg.__dict__)
|
Subsets and Splits