id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
24,512 | def pick_card_member(
config: dict, author: str, team: str, card_assignments: dict, approvers: List[str] = None
) -> Tuple[Any, Any]:
"""Return a member to assign to the created issue.
In practice, it returns one trello user which is not the PR author or an approver, for the given team.
For it to work, you need a `trello_users_$team` table in your ddev configuration,
with keys being github users and values being their corresponding trello IDs (not names).
For example::
[trello_users_integrations]
john = "xxxxxxxxxxxxxxxxxxxxx"
alice = "yyyyyyyyyyyyyyyyyyyy"
"""
if approvers is None:
approvers = []
users = config.get(f'trello_users_{team}')
if not users:
return None, None
if team not in card_assignments:
# initialize map team -> user -> QA cards assigned
team_members = list(users)
random.shuffle(team_members)
card_assignments[team] = dict.fromkeys(team_members, 0)
member = min(
[member for member in card_assignments[team] if member != author and member not in approvers],
key=card_assignments[team].get,
)
card_assignments[team][member] += 1
return member, users[member]
| def pick_card_member(
config: dict, author: str, team: str, card_assignments: dict, approvers: Optional[List[str]] = None
) -> Tuple[Any, Any]:
"""Return a member to assign to the created issue.
In practice, it returns one trello user which is not the PR author or an approver, for the given team.
For it to work, you need a `trello_users_$team` table in your ddev configuration,
with keys being github users and values being their corresponding trello IDs (not names).
For example::
[trello_users_integrations]
john = "xxxxxxxxxxxxxxxxxxxxx"
alice = "yyyyyyyyyyyyyyyyyyyy"
"""
if approvers is None:
approvers = []
users = config.get(f'trello_users_{team}')
if not users:
return None, None
if team not in card_assignments:
# initialize map team -> user -> QA cards assigned
team_members = list(users)
random.shuffle(team_members)
card_assignments[team] = dict.fromkeys(team_members, 0)
member = min(
[member for member in card_assignments[team] if member != author and member not in approvers],
key=card_assignments[team].get,
)
card_assignments[team][member] += 1
return member, users[member]
|
3,305 | def save_userreport(project, report, start_time=None):
if start_time is None:
start_time = timezone.now()
# XXX(dcramer): enforce case insensitivity by coercing this to a lowercase string
report["event_id"] = report["event_id"].lower()
report["project_id"] = project.id
event = eventstore.get_event_by_id(project.id, report["event_id"])
# TODO(dcramer): we should probably create the user if they dont
# exist, and ideally we'd also associate that with the event
euser = find_event_user(report, event)
if euser and not euser.name and report.get("name"):
euser.update(name=report["name"])
if euser:
report["event_user_id"] = euser.id
if event:
# if the event is more than 30 minutes old, we dont allow updates
# as it might be abusive
if event.datetime < start_time - timedelta(minutes=30):
raise Conflict("Feedback for this event cannot be modified.")
report["environment_id"] = event.get_environment().id
report["group_id"] = event.group.id
try:
with transaction.atomic():
report_instance = UserReport.objects.create(**report)
except IntegrityError:
# There was a duplicate, so just overwrite the existing
# row with the new one. The only way this ever happens is
# if someone is messing around with the API, or doing
# something wrong with the SDK, but this behavior is
# more reasonable than just hard erroring and is more
# expected.
existing_report = UserReport.objects.get(
project_id=report["project_id"], event_id=report["event_id"]
)
# if the existing report was submitted more than 5 minutes ago, we dont
# allow updates as it might be abusive (replay attacks)
if existing_report.date_added < timezone.now() - timedelta(minutes=5):
raise Conflict("Feedback for this event cannot be modified.")
existing_report.update(
name=report.get("name", ""),
email=report["email"],
comments=report["comments"],
date_added=timezone.now(),
event_user_id=euser.id if euser else None,
)
report_instance = existing_report
else:
if report_instance.group_id:
report_instance.notify()
user_feedback_received.send(
project=Project.objects.get(id=report_instance.project_id),
group=Group.objects.get(id=report_instance.group_id),
sender=save_userreport,
)
return report_instance
| def save_userreport(project, report, start_time=None):
if start_time is None:
start_time = timezone.now()
# XXX(dcramer): enforce case insensitivity by coercing this to a lowercase string
report["event_id"] = report["event_id"].lower()
report["project_id"] = project.id
event = eventstore.get_event_by_id(project.id, report["event_id"])
# TODO(dcramer): we should probably create the user if they dont
# exist, and ideally we'd also associate that with the event
euser = find_event_user(report, event)
if euser and not euser.name and report.get("name"):
euser.update(name=report["name"])
if euser:
report["event_user_id"] = euser.id
if event:
# if the event is more than 30 minutes old, we dont allow updates
# as it might be abusive
if event.datetime < start_time - timedelta(minutes=30):
raise Conflict("Feedback for this event cannot be modified.")
report["environment_id"] = event.get_environment().id
report["group_id"] = event.group_id
try:
with transaction.atomic():
report_instance = UserReport.objects.create(**report)
except IntegrityError:
# There was a duplicate, so just overwrite the existing
# row with the new one. The only way this ever happens is
# if someone is messing around with the API, or doing
# something wrong with the SDK, but this behavior is
# more reasonable than just hard erroring and is more
# expected.
existing_report = UserReport.objects.get(
project_id=report["project_id"], event_id=report["event_id"]
)
# if the existing report was submitted more than 5 minutes ago, we dont
# allow updates as it might be abusive (replay attacks)
if existing_report.date_added < timezone.now() - timedelta(minutes=5):
raise Conflict("Feedback for this event cannot be modified.")
existing_report.update(
name=report.get("name", ""),
email=report["email"],
comments=report["comments"],
date_added=timezone.now(),
event_user_id=euser.id if euser else None,
)
report_instance = existing_report
else:
if report_instance.group_id:
report_instance.notify()
user_feedback_received.send(
project=Project.objects.get(id=report_instance.project_id),
group=Group.objects.get(id=report_instance.group_id),
sender=save_userreport,
)
return report_instance
|
32,606 | def project_list_command(client: Client, args: Dict) -> CommandResults:
limit = int(args.get('limit', 50))
project_key = args.get('project_key')
page = arg_to_number(args.get('page', 1))
check_args(limit, page)
page_size = min(100, limit)
params = {
'page': page,
'pagelen': page_size
}
response = client.get_project_list_request(params, project_key)
if project_key:
results = [response]
readable_name = f'The information about project {project_key.upper()}'
else:
results = check_pagination(client, response, limit)
readable_name = f'List of the projects in {client.workspace}'
human_readable = []
for value in results:
d = {'Key': value.get('key'),
'Name': value.get('name'),
'Description': value.get('description'),
'IsPrivate': value.get('is_private')}
human_readable.append(d)
headers = ['Key', 'Name', 'Description', 'IsPrivate']
readable_output = tableToMarkdown(
name=readable_name,
t=human_readable,
removeNull=True,
headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='Bitbucket.Project',
outputs=results,
raw_response=results
)
| def project_list_command(client: Client, args: Dict) -> CommandResults:
limit = int(args.get('limit', 50))
project_key = args.get('project_key')
page = arg_to_number(args.get('page', 1))
check_args(limit, page)
page_size = min(100, limit)
params = {
'page': page,
'pagelen': page_size
}
response = client.get_project_list_request(params, project_key)
if project_key:
results = [response]
readable_name = f'The information about project {project_key.upper()}'
else:
results = check_pagination(client, response, limit)
readable_name = f'List of projects in {client.workspace}'
human_readable = []
for value in results:
d = {'Key': value.get('key'),
'Name': value.get('name'),
'Description': value.get('description'),
'IsPrivate': value.get('is_private')}
human_readable.append(d)
headers = ['Key', 'Name', 'Description', 'IsPrivate']
readable_output = tableToMarkdown(
name=readable_name,
t=human_readable,
removeNull=True,
headers=headers
)
return CommandResults(
readable_output=readable_output,
outputs_prefix='Bitbucket.Project',
outputs=results,
raw_response=results
)
|
48,731 | def get_provider_info(conn_type_to_hook, connection_types):
"""
Retrieves provider information and adds information to hooks/connection
:param conn_type_to_hook: dictionary of mapping for connections -> hooks
:param connection_types: array of connection types
:return: dictionary metadata about all providers installed
"""
def ignore(_):
pass
def update_or_replace_connection_type(core_connection_types, provider_connection_types):
"""
Updates or replaces all core connection types with those that come from a provider.
If same connection type is found in core types it is replaced with the provider one,
otherwise the provider connection type is appended to the list.
:param core_connection_types: all core connection tepes
:type core_connection_types List[Tuple]
:param provider_connection_types: provider connection types
:type provider_connection_types List[Tuple]
:return: None
"""
for provider_connection_type in provider_connection_types:
for index, core_connection_type in enumerate(connection_types):
if core_connection_type[0] == provider_connection_type[0]:
connection_types[index] = provider_connection_type
break
core_connection_types.append(provider_connection_type)
try:
from airflow import providers
except ImportError:
print("No providers are available!")
return {}
providers_path = providers.__path__
providers_name = providers.__name__
provider_dict = {}
for (_, name, ispkg) in pkgutil.walk_packages(path=providers_path,
prefix=providers_name + ".",
onerror=ignore):
try:
if ispkg:
provider_info_module = importlib.import_module(".provider_info", package=name)
print("Adding provider info for {}".format(name))
conn_type_to_hook.update(provider_info_module.CONN_TYPE_TO_HOOK)
update_or_replace_connection_type(connection_types, provider_info_module.connection_types)
provider_metadata = {
'name': provider_info_module.PROVIDER_NAME,
'version': provider_info_module.PROVIDER_VERSION,
'url': provider_info_module.PROVIDER_URL,
'docs': provider_info_module.PROVIDER_DOCS,
}
provider_dict[provider_info_module.PROVIDER_NAME] = provider_metadata
print(provider_metadata)
except ModuleNotFoundError:
pass
except Exception as e: # noqa pylint: disable=broad-except
print("Provider {} could not be loaded because of {}".format(name, e))
return providers
| def get_provider_info(conn_type_to_hook, connection_types):
"""
Retrieves provider information and adds information to hooks/connection
:param conn_type_to_hook: dictionary of mapping for connections -> hooks
:param connection_types: array of connection types
:return: dictionary metadata about all providers installed
"""
def ignore(_):
pass
def update_or_replace_connection_type(core_connection_types, provider_connection_types):
"""
Updates or replaces all core connection types with those that come from a provider.
If same connection type is found in core types it is replaced with the provider one,
otherwise the provider connection type is appended to the list.
:param core_connection_types: all core connection tepes
:type core_connection_types List[Tuple]
:param provider_connection_types: provider connection types
:type provider_connection_types List[Tuple]
:return: None
"""
for provider_connection_type in provider_connection_types:
for index, core_connection_type in enumerate(connection_types):
if core_connection_type[0] == provider_connection_type[0]:
connection_types[index] = provider_connection_type
break
core_connection_types.append(provider_connection_type)
try:
from airflow import providers
except ImportError:
print("No providers are available!")
return {}
providers_path = providers.__path__
providers_name = providers.__name__
provider_dict = {}
for (_, name, ispkg) in pkgutil.walk_packages(path=providers_path,
prefix=providers_name + ".",
onerror=ignore):
if not ispkg:
continue
try:
provider_info_module = importlib.import_module(".provider_info", package=name)
print("Adding provider info for {}".format(name))
conn_type_to_hook.update(provider_info_module.CONN_TYPE_TO_HOOK)
update_or_replace_connection_type(connection_types, provider_info_module.connection_types)
provider_metadata = {
'name': provider_info_module.PROVIDER_NAME,
'version': provider_info_module.PROVIDER_VERSION,
'url': provider_info_module.PROVIDER_URL,
'docs': provider_info_module.PROVIDER_DOCS,
}
provider_dict[provider_info_module.PROVIDER_NAME] = provider_metadata
print(provider_metadata)
except ModuleNotFoundError:
pass
except Exception as e: # noqa pylint: disable=broad-except
print("Provider {} could not be loaded because of {}".format(name, e))
return providers
|
45,917 | def homography_warp(
patch_src: torch.Tensor,
src_homo_dst: torch.Tensor,
dsize: Tuple[int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
normalized_coordinates: bool = True,
normalized_homography: bool = True,
) -> torch.Tensor:
r"""Warp image patches or tensors by normalized 2D homographies.
See :class:`~kornia.geometry.warp.HomographyWarper` for details.
Args:
patch_src: The image or tensor to warp. Should be from source of shape
if homography normalized :math:`(N, C, H, W)`.
if homography not normalized :math:`(B, C, H, W)`
src_homo_dst: The homography or stack of homographies from destination to source of shape
if homography normalized :math:`(N, 3, 3)`
if homography not normalized :math:`(B, 3, 3)`.
dsize:
if homography normalized: The height and width of the image to warp.
if homography not normalized: size of the output image (height, width).
mode: interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values ``'zeros'`` | ``'border'`` | ``'reflection'``.
align_corners: interpolation flag.
normalized_coordinates: Whether the homography assumes [-1, 1] normalized coordinates or not.
normalized_homography: show is homography normalized.
Return:
Patch sampled at locations from source to destination.
Example_1:
>>> input = torch.rand(1, 3, 32, 32)
>>> homography = torch.eye(3).view(1, 3, 3)
>>> output = homography_warp(input, homography, (32, 32))
Example_2
>>> img = torch.rand(1, 4, 5, 6)
>>> H = torch.eye(3)[None]
>>> out = homography_warp(img, H, (4, 2), align_corners=True, normalized_homography=False)
>>> print(out.shape)
torch.Size([1, 4, 4, 2])
"""
if normalized_homography:
if not src_homo_dst.device == patch_src.device:
raise TypeError(
"Patch and homography must be on the same device. \
Got patch.device: {} src_H_dst.device: {}.".format(
patch_src.device, src_homo_dst.device
)
)
height, width = dsize
grid = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates)
warped_grid = warp_grid(grid, src_homo_dst)
return F.grid_sample(patch_src, warped_grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
else:
mode = 'bilinear'
align_corners = True
if not isinstance(patch_src, torch.Tensor):
raise TypeError(f"Input src type is not a torch.Tensor. Got {type(patch_src)}")
if not isinstance(src_homo_dst, torch.Tensor):
raise TypeError(f"Input M type is not a torch.Tensor. Got {type(src_homo_dst)}")
if not len(patch_src.shape) == 4:
raise ValueError(f"Input src must be a BxCxHxW tensor. Got {patch_src.shape}")
if not (len(src_homo_dst.shape) == 3 and src_homo_dst.shape[-2:] == (3, 3)):
raise ValueError(f"Input M must be a Bx3x3 tensor. Got {src_homo_dst.shape}")
B, _, H, W = patch_src.size()
h_out, w_out = dsize
# we normalize the 3x3 transformation matrix and convert to 3x4
dst_norm_trans_src_norm: torch.Tensor = normalize_homography(src_homo_dst, (H, W), (h_out, w_out)) # Bx3x3
src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) # Bx3x3
# this piece of code substitutes F.affine_grid since it does not support 3x3
grid = (
create_meshgrid(h_out, w_out, normalized_coordinates=True, device=patch_src.device).to(patch_src.dtype).repeat(B, 1, 1, 1))
grid = transform_points(src_norm_trans_dst_norm[:, None, None], grid)
return F.grid_sample(patch_src, grid, align_corners=align_corners, mode=mode, padding_mode=padding_mode)
| def homography_warp(
patch_src: torch.Tensor,
src_homo_dst: torch.Tensor,
dsize: Tuple[int, int],
mode: str = 'bilinear',
padding_mode: str = 'zeros',
align_corners: bool = False,
normalized_coordinates: bool = True,
normalized_homography: bool = True,
) -> torch.Tensor:
r"""Warp image patches or tensors by normalized 2D homographies.
See :class:`~kornia.geometry.warp.HomographyWarper` for details.
Args:
patch_src: The image or tensor to warp. Should be from source of shape
if homography normalized :math:`(N, C, H, W)`.
if homography not normalized :math:`(B, C, H, W)`
src_homo_dst: The homography or stack of homographies from destination to source of shape
if homography normalized :math:`(N, 3, 3)`
if homography not normalized :math:`(B, 3, 3)`.
dsize:
if homography normalized: The height and width of the image to warp.
if homography not normalized: size of the output image (height, width).
mode: interpolation mode to calculate output values ``'bilinear'`` | ``'nearest'``.
padding_mode: padding mode for outside grid values ``'zeros'`` | ``'border'`` | ``'reflection'``.
align_corners: interpolation flag.
normalized_coordinates: Whether the homography assumes [-1, 1] normalized coordinates or not.
normalized_homography: show is homography normalized.
Return:
Patch sampled at locations from source to destination.
Example_1:
>>> input = torch.rand(1, 3, 32, 32)
>>> homography = torch.eye(3).view(1, 3, 3)
>>> output = homography_warp(input, homography, (32, 32))
Example_2
>>> img = torch.rand(1, 4, 5, 6)
>>> H = torch.eye(3)[None]
>>> out = homography_warp(img, H, (4, 2), align_corners=True, normalized_homography=False)
>>> print(out.shape)
torch.Size([1, 4, 4, 2])
"""
if normalized_homography:
if not src_homo_dst.device == patch_src.device:
raise TypeError(
"Patch and homography must be on the same device. \
Got patch.device: {} src_H_dst.device: {}.".format(
patch_src.device, src_homo_dst.device
)
)
height, width = dsize
grid = create_meshgrid(height, width, normalized_coordinates=normalized_coordinates, device=patch_src.device, dtype=patch_src.dtype)
warped_grid = warp_grid(grid, src_homo_dst)
return F.grid_sample(patch_src, warped_grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners)
else:
mode = 'bilinear'
align_corners = True
if not isinstance(patch_src, torch.Tensor):
raise TypeError(f"Input src type is not a torch.Tensor. Got {type(patch_src)}")
if not isinstance(src_homo_dst, torch.Tensor):
raise TypeError(f"Input M type is not a torch.Tensor. Got {type(src_homo_dst)}")
if not len(patch_src.shape) == 4:
raise ValueError(f"Input src must be a BxCxHxW tensor. Got {patch_src.shape}")
if not (len(src_homo_dst.shape) == 3 and src_homo_dst.shape[-2:] == (3, 3)):
raise ValueError(f"Input M must be a Bx3x3 tensor. Got {src_homo_dst.shape}")
B, _, H, W = patch_src.size()
h_out, w_out = dsize
# we normalize the 3x3 transformation matrix and convert to 3x4
dst_norm_trans_src_norm: torch.Tensor = normalize_homography(src_homo_dst, (H, W), (h_out, w_out)) # Bx3x3
src_norm_trans_dst_norm = _torch_inverse_cast(dst_norm_trans_src_norm) # Bx3x3
# this piece of code substitutes F.affine_grid since it does not support 3x3
grid = (
create_meshgrid(h_out, w_out, normalized_coordinates=True, device=patch_src.device).to(patch_src.dtype).repeat(B, 1, 1, 1))
grid = transform_points(src_norm_trans_dst_norm[:, None, None], grid)
return F.grid_sample(patch_src, grid, align_corners=align_corners, mode=mode, padding_mode=padding_mode)
|
43,164 | def initialize(ip_config, num_servers=1, num_workers=0,
max_queue_size=MAX_QUEUE_SIZE, net_type='tensorpipe',
num_worker_threads=1):
"""Initialize DGL's distributed module
This function initializes DGL's distributed module. It acts differently in server
or client modes. In the server mode, it runs the server code and never returns.
In the client mode, it builds connections with servers for communication and
creates worker processes for distributed sampling. `num_workers` specifies
the number of sampling worker processes per trainer process.
Users also have to provide the number of server processes on each machine in order
to connect to all the server processes in the cluster of machines correctly.
Parameters
----------
ip_config: str
File path of ip_config file
num_servers : int
The number of server processes on each machine. This argument is deprecated in DGL 0.7.0.
num_workers: int
Number of worker process on each machine. The worker processes are used
for distributed sampling. This argument is deprecated in DGL 0.7.0.
max_queue_size : int
Maximal size (bytes) of client queue buffer (~20 GB on default).
Note that the 20 GB is just an upper-bound and DGL uses zero-copy and
it will not allocate 20GB memory at once.
net_type : str, optional
Networking type. Currently the valid option is ``'socket'`` or ``'tensorpipe'``.
Default: ``'tensorpipe'``
num_worker_threads: int
The number of threads in a worker process.
Note
----
Users have to invoke this API before any DGL's distributed API and framework-specific
distributed API. For example, when used with Pytorch, users have to invoke this function
before Pytorch's `pytorch.distributed.init_process_group`.
"""
if os.environ.get('DGL_ROLE', 'client') == 'server':
from .dist_graph import DistGraphServer
assert os.environ.get('DGL_SERVER_ID') is not None, \
'Please define DGL_SERVER_ID to run DistGraph server'
assert os.environ.get('DGL_IP_CONFIG') is not None, \
'Please define DGL_IP_CONFIG to run DistGraph server'
assert os.environ.get('DGL_NUM_SERVER') is not None, \
'Please define DGL_NUM_SERVER to run DistGraph server'
assert os.environ.get('DGL_NUM_CLIENT') is not None, \
'Please define DGL_NUM_CLIENT to run DistGraph server'
assert os.environ.get('DGL_CONF_PATH') is not None, \
'Please define DGL_CONF_PATH to run DistGraph server'
formats = os.environ.get('DGL_GRAPH_FORMAT', 'csc').split(',')
formats = [f.strip() for f in formats]
rpc.reset()
keep_alive = bool(int(os.environ.get('DGL_KEEP_ALIVE', 0)))
serv = DistGraphServer(int(os.environ.get('DGL_SERVER_ID')),
os.environ.get('DGL_IP_CONFIG'),
int(os.environ.get('DGL_NUM_SERVER')),
int(os.environ.get('DGL_NUM_CLIENT')),
os.environ.get('DGL_CONF_PATH'),
graph_format=formats,
keep_alive=keep_alive,
net_type=net_type)
serv.start()
sys.exit()
else:
if os.environ.get('DGL_NUM_SAMPLER') is not None:
num_workers = int(os.environ.get('DGL_NUM_SAMPLER'))
else:
num_workers = 0
if os.environ.get('DGL_NUM_SERVER') is not None:
num_servers = int(os.environ.get('DGL_NUM_SERVER'))
else:
num_servers = 1
group_id = int(os.environ.get('DGL_GROUP_ID', 0))
rpc.reset()
global SAMPLER_POOL
global NUM_SAMPLER_WORKERS
is_standalone = os.environ.get(
'DGL_DIST_MODE', 'standalone') == 'standalone'
if num_workers > 0 and not is_standalone:
SAMPLER_POOL = CustomPool(num_workers, (ip_config, num_servers, max_queue_size,
net_type, 'sampler', num_worker_threads,
group_id))
else:
SAMPLER_POOL = None
NUM_SAMPLER_WORKERS = num_workers
if not is_standalone:
assert num_servers is not None and num_servers > 0, \
'The number of servers per machine must be specified with a positive number.'
connect_to_server(ip_config, num_servers, max_queue_size, net_type, group_id=group_id)
init_role('default')
init_kvstore(ip_config, num_servers, 'default')
| def initialize(ip_config, num_servers=1, num_workers=0,
max_queue_size=MAX_QUEUE_SIZE, net_type='tensorpipe',
num_worker_threads=1):
"""Initialize DGL's distributed module
This function initializes DGL's distributed module. It acts differently in server
or client modes. In the server mode, it runs the server code and never returns.
In the client mode, it builds connections with servers for communication and
creates worker processes for distributed sampling. `num_workers` specifies
the number of sampling worker processes per trainer process.
Users also have to provide the number of server processes on each machine in order
to connect to all the server processes in the cluster of machines correctly.
Parameters
----------
ip_config: str
File path of ip_config file
num_servers : int
The number of server processes on each machine. This argument is deprecated in DGL 0.7.0.
num_workers: int
Number of worker process on each machine. The worker processes are used
for distributed sampling. This argument is deprecated in DGL 0.7.0.
max_queue_size : int
Maximal size (bytes) of client queue buffer (~20 GB on default).
Note that the 20 GB is just an upper-bound and DGL uses zero-copy and
it will not allocate 20GB memory at once.
net_type : str, optional
Networking type. Valid options are: ``'socket'``, ``'tensorpipe'``.
Default: ``'tensorpipe'``
num_worker_threads: int
The number of threads in a worker process.
Note
----
Users have to invoke this API before any DGL's distributed API and framework-specific
distributed API. For example, when used with Pytorch, users have to invoke this function
before Pytorch's `pytorch.distributed.init_process_group`.
"""
if os.environ.get('DGL_ROLE', 'client') == 'server':
from .dist_graph import DistGraphServer
assert os.environ.get('DGL_SERVER_ID') is not None, \
'Please define DGL_SERVER_ID to run DistGraph server'
assert os.environ.get('DGL_IP_CONFIG') is not None, \
'Please define DGL_IP_CONFIG to run DistGraph server'
assert os.environ.get('DGL_NUM_SERVER') is not None, \
'Please define DGL_NUM_SERVER to run DistGraph server'
assert os.environ.get('DGL_NUM_CLIENT') is not None, \
'Please define DGL_NUM_CLIENT to run DistGraph server'
assert os.environ.get('DGL_CONF_PATH') is not None, \
'Please define DGL_CONF_PATH to run DistGraph server'
formats = os.environ.get('DGL_GRAPH_FORMAT', 'csc').split(',')
formats = [f.strip() for f in formats]
rpc.reset()
keep_alive = bool(int(os.environ.get('DGL_KEEP_ALIVE', 0)))
serv = DistGraphServer(int(os.environ.get('DGL_SERVER_ID')),
os.environ.get('DGL_IP_CONFIG'),
int(os.environ.get('DGL_NUM_SERVER')),
int(os.environ.get('DGL_NUM_CLIENT')),
os.environ.get('DGL_CONF_PATH'),
graph_format=formats,
keep_alive=keep_alive,
net_type=net_type)
serv.start()
sys.exit()
else:
if os.environ.get('DGL_NUM_SAMPLER') is not None:
num_workers = int(os.environ.get('DGL_NUM_SAMPLER'))
else:
num_workers = 0
if os.environ.get('DGL_NUM_SERVER') is not None:
num_servers = int(os.environ.get('DGL_NUM_SERVER'))
else:
num_servers = 1
group_id = int(os.environ.get('DGL_GROUP_ID', 0))
rpc.reset()
global SAMPLER_POOL
global NUM_SAMPLER_WORKERS
is_standalone = os.environ.get(
'DGL_DIST_MODE', 'standalone') == 'standalone'
if num_workers > 0 and not is_standalone:
SAMPLER_POOL = CustomPool(num_workers, (ip_config, num_servers, max_queue_size,
net_type, 'sampler', num_worker_threads,
group_id))
else:
SAMPLER_POOL = None
NUM_SAMPLER_WORKERS = num_workers
if not is_standalone:
assert num_servers is not None and num_servers > 0, \
'The number of servers per machine must be specified with a positive number.'
connect_to_server(ip_config, num_servers, max_queue_size, net_type, group_id=group_id)
init_role('default')
init_kvstore(ip_config, num_servers, 'default')
|
27,726 | def test_files_by_name_are_correct(testdir):
testdir.maketxtfile("foo")
testdir.maketxtfile(custom="foobar")
assert len(testdir.created_files) == 2
| def test_files_by_name_are_correct(testdir):
testdir.maketxtfile("foo")
testdir.maketxtfile(custom="foobar")
assert testdir.created_files == {testdir.tmpdir.join("test_files_by_name_are_correct.py"), testdir.tmpdir.join("custom.py")}
|
39,702 | def main():
module = ForemanSubnetModule(
argument_spec=dict(
updated_name=dict(),
),
foreman_spec=dict(
name=dict(required=True),
description=dict(),
network_type=dict(choices=['IPv4', 'IPv6'], default='IPv4'),
dns_primary=dict(),
dns_secondary=dict(),
domains=dict(type='entity_list'),
gateway=dict(),
network=dict(required=True),
cidr=dict(type='int'),
mask=dict(),
from_ip=dict(flat_name='from'),
to_ip=dict(flat_name='to'),
boot_mode=dict(choices=['DHCP', 'Static'], default='DHCP'),
ipam=dict(choices=['DHCP', 'Internal DB', 'Random DB', 'EUI-64', 'None'], default='DHCP'),
dhcp_proxy=dict(type='entity', flat_name='dhcp_id', resource_type='smart_proxies'),
httpboot_proxy=dict(type='entity', flat_name='httpboot_id', resource_type='smart_proxies'),
tftp_proxy=dict(type='entity', flat_name='tftp_id', resource_type='smart_proxies'),
discovery_proxy=dict(type='entity', flat_name='discovery_id', resource_type='smart_proxies'),
dns_proxy=dict(type='entity', flat_name='dns_id', resource_type='smart_proxies'),
template_proxy=dict(type='entity', flat_name='template_id', resource_type='smart_proxies'),
remote_execution_proxies=dict(type='entity_list', resource_type='smart_proxies'),
vlanid=dict(type='int'),
mtu=dict(type='int'),
),
required_plugins=[('discovery', ['discovery_proxy'])],
)
if not HAS_IPADDRESS:
module.fail_json(msg=missing_required_lib("ipaddress"), exception=IPADDRESS_IMP_ERR)
module_params = module.foreman_params
if not module.desired_absent:
if module_params['network_type'] == 'IPv4':
if 'mask' not in module_params and 'cidr' not in module_params:
module.fail_json('When specifying IPv4 networks, either "mask" or "cidr" is required.')
IPNetwork = ipaddress.IPv4Network
else:
IPNetwork = ipaddress.IPv6Network
if 'mask' in module_params and 'cidr' not in module_params:
module_params['cidr'] = IPNetwork(u'%s/%s' % (module_params['network'], module_params['mask'])).prefixlen
elif 'mask' not in module_params and 'cidr' in module_params:
module_params['mask'] = str(IPNetwork(u'%s/%s' % (module_params['network'], module_params['cidr'])).netmask)
with module.api_connection():
module.run()
| def main():
module = ForemanSubnetModule(
argument_spec=dict(
updated_name=dict(),
),
foreman_spec=dict(
name=dict(required=True),
description=dict(),
network_type=dict(choices=['IPv4', 'IPv6'], default='IPv4'),
dns_primary=dict(),
dns_secondary=dict(),
domains=dict(type='entity_list'),
gateway=dict(),
network=dict(required=True),
cidr=dict(type='int'),
mask=dict(),
from_ip=dict(flat_name='from'),
to_ip=dict(flat_name='to'),
boot_mode=dict(choices=['DHCP', 'Static'], default='DHCP'),
ipam=dict(choices=['DHCP', 'Internal DB', 'Random DB', 'EUI-64', 'None'], default='DHCP'),
dhcp_proxy=dict(type='entity', flat_name='dhcp_id', resource_type='smart_proxies'),
httpboot_proxy=dict(type='entity', flat_name='httpboot_id', resource_type='smart_proxies'),
tftp_proxy=dict(type='entity', flat_name='tftp_id', resource_type='smart_proxies'),
discovery_proxy=dict(type='entity', flat_name='discovery_id', resource_type='smart_proxies'),
dns_proxy=dict(type='entity', flat_name='dns_id', resource_type='smart_proxies'),
template_proxy=dict(type='entity', flat_name='template_id', resource_type='smart_proxies'),
remote_execution_proxies=dict(type='entity_list', resource_type='smart_proxies'),
vlanid=dict(type='int'),
mtu=dict(type='int'),
),
required_plugins=[('discovery', ['discovery_proxy'])],
)
if not HAS_IPADDRESS:
module.fail_json(msg=missing_required_lib("ipaddress"), exception=IPADDRESS_IMP_ERR)
module_params = module.foreman_params
if not module.desired_absent:
if module_params['network_type'] == 'IPv4':
if 'mask' not in module_params and 'cidr' not in module_params:
module.fail_json(msg='When specifying IPv4 networks, either "mask" or "cidr" is required.')
IPNetwork = ipaddress.IPv4Network
else:
IPNetwork = ipaddress.IPv6Network
if 'mask' in module_params and 'cidr' not in module_params:
module_params['cidr'] = IPNetwork(u'%s/%s' % (module_params['network'], module_params['mask'])).prefixlen
elif 'mask' not in module_params and 'cidr' in module_params:
module_params['mask'] = str(IPNetwork(u'%s/%s' % (module_params['network'], module_params['cidr'])).netmask)
with module.api_connection():
module.run()
|
8,871 | def subreddit_sorting(bot, trigger, s, sorting):
if sorting == 'new':
submissions = list(s.new())
elif sorting == 'top':
submissions = list(s.top())
elif sorting == 'hot':
submissions = list(s.hot())
elif sorting == 'controversial':
submissions = list(s.controversial())
elif sorting == 'gilded':
submissions = list(s.gilded())
elif sorting == 'rising':
submissions = list(s.rising())
elif sorting == 'sticky':
try:
submissions = [s.sticky()]
except prawcore.exceptions.NotFound:
bot.say("r/" + s.display_name + " appears to not have a stickied post!")
return
elif sorting == 'random':
submissions = [s.random()] or []
else:
return
if not len(submissions):
bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!")
return NOLIMIT
if sorting != 'sticky':
submissions_filter = []
for submission in submissions:
if not submission.stickied:
submissions_filter.append(submission)
submissions = submissions_filter
submission = submissions[0]
link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission)
say_post_info(
bot, trigger, re.match(post_url, link).group(1), False, True)
| def subreddit_sorting(bot, trigger, s, sorting):
if sorting == 'new':
submissions = list(s.new())
elif sorting == 'top':
submissions = list(s.top())
elif sorting == 'hot':
submissions = list(s.hot())
elif sorting == 'controversial':
submissions = list(s.controversial())
elif sorting == 'gilded':
submissions = list(s.gilded())
elif sorting == 'rising':
submissions = list(s.rising())
elif sorting == 'sticky':
try:
submissions = [s.sticky()]
except prawcore.exceptions.NotFound:
bot.say("r/" + s.display_name + " appears to not have a stickied post!")
return
elif sorting == 'random':
submissions = [s.random()] or []
else:
raise ValueError("Unknown sorting type '%s'" % sorting)
if not len(submissions):
bot.say("r/" + s.display_name + ' ' + sorting + " appears to have no items!")
return NOLIMIT
if sorting != 'sticky':
submissions_filter = []
for submission in submissions:
if not submission.stickied:
submissions_filter.append(submission)
submissions = submissions_filter
submission = submissions[0]
link = "https://reddit.com/r/" + s.display_name + "/comments/" + str(submission)
say_post_info(
bot, trigger, re.match(post_url, link).group(1), False, True)
|
3,786 | def test_validate_index():
# The indexing tests in the official array API test suite test that the
# array object correctly handles the subset of indices that are required
# by the spec. But the NumPy array API implementation specifically
# disallows any index not required by the spec, via Array._validate_index.
# This test focuses on testing that non-valid indices are correctly
# rejected. See
# https://data-apis.org/array-api/latest/API_specification/indexing.html
# and the docstring of Array._validate_index for the exact indexing
# behavior that should be allowed. This does not test indices that are
# already invalid in NumPy itself because Array will generally just pass
# such indices directly to the underlying np.ndarray.
a = ones((3, 4))
# Out of bounds slices are not allowed
assert_raises(IndexError, lambda: a[:4])
assert_raises(IndexError, lambda: a[:-4])
assert_raises(IndexError, lambda: a[:3:-1])
assert_raises(IndexError, lambda: a[:-5:-1])
assert_raises(IndexError, lambda: a[3:])
assert_raises(IndexError, lambda: a[-4:])
assert_raises(IndexError, lambda: a[3::-1])
assert_raises(IndexError, lambda: a[-4::-1])
assert_raises(IndexError, lambda: a[...,:5])
assert_raises(IndexError, lambda: a[...,:-5])
assert_raises(IndexError, lambda: a[...,:4:-1])
assert_raises(IndexError, lambda: a[...,:-6:-1])
assert_raises(IndexError, lambda: a[...,4:])
assert_raises(IndexError, lambda: a[...,-5:])
assert_raises(IndexError, lambda: a[...,4::-1])
assert_raises(IndexError, lambda: a[...,-5::-1])
# Boolean indices cannot be part of a larger tuple index
assert_raises(IndexError, lambda: a[a[:,0]==1,0])
assert_raises(IndexError, lambda: a[a[:,0]==1,...])
assert_raises(IndexError, lambda: a[..., a[0]==1])
assert_raises(IndexError, lambda: a[[True, True, True]])
assert_raises(IndexError, lambda: a[(True, True, True),])
# Integer array indices are not allowed (except for 0-D)
idx = asarray([[0, 1]])
assert_raises(IndexError, lambda: a[idx])
assert_raises(IndexError, lambda: a[idx,])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
# np.newaxis is not allowed
assert_raises(IndexError, lambda: a[None])
assert_raises(IndexError, lambda: a[None, ...])
assert_raises(IndexError, lambda: a[..., None])
# Multiaxis indices must contain exactly as many indices as dimensions
assert_raises(IndexError, lambda: a[()])
assert_raises(IndexError, lambda: a[0,])
| def test_validate_index():
# The indexing tests in the official array API test suite test that the
# array object correctly handles the subset of indices that are required
# by the spec. But the NumPy array API implementation specifically
# disallows any index not required by the spec, via Array._validate_index.
# This test focuses on testing that non-valid indices are correctly
# rejected. See
# https://data-apis.org/array-api/latest/API_specification/indexing.html
# and the docstring of Array._validate_index for the exact indexing
# behavior that should be allowed. This does not test indices that are
# already invalid in NumPy itself because Array will generally just pass
# such indices directly to the underlying np.ndarray.
a = ones((3, 4))
# Out of bounds slices are not allowed
assert_raises(IndexError, lambda: a[:4])
assert_raises(IndexError, lambda: a[:-4])
assert_raises(IndexError, lambda: a[:3:-1])
assert_raises(IndexError, lambda: a[:-5:-1])
assert_raises(IndexError, lambda: a[3:])
assert_raises(IndexError, lambda: a[-4:])
assert_raises(IndexError, lambda: a[3::-1])
assert_raises(IndexError, lambda: a[-4::-1])
assert_raises(IndexError, lambda: a[...,:5])
assert_raises(IndexError, lambda: a[...,:-5])
assert_raises(IndexError, lambda: a[...,:4:-1])
assert_raises(IndexError, lambda: a[...,:-6:-1])
assert_raises(IndexError, lambda: a[...,4:])
assert_raises(IndexError, lambda: a[...,-5:])
assert_raises(IndexError, lambda: a[...,4::-1])
assert_raises(IndexError, lambda: a[...,-5::-1])
# Boolean indices cannot be part of a larger tuple index
assert_raises(IndexError, lambda: a[a[:,0]==1,0])
assert_raises(IndexError, lambda: a[a[:,0]==1,...])
assert_raises(IndexError, lambda: a[..., a[0]==1])
assert_raises(IndexError, lambda: a[[True, True, True]])
assert_raises(IndexError, lambda: a[(True, True, True),])
# Integer array indices are not allowed (except for 0-D)
idx = asarray([[0, 1]])
assert_raises(IndexError, lambda: a[idx])
assert_raises(IndexError, lambda: a[idx,])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[(0, 1), (0, 1)])
assert_raises(IndexError, lambda: a[[0, 1]])
assert_raises(IndexError, lambda: a[np.array([[0, 1]])])
# np.newaxis is not allowed
assert_raises(IndexError, lambda: a[None])
assert_raises(IndexError, lambda: a[None, ...])
assert_raises(IndexError, lambda: a[..., None])
# Multiaxis indices must contain exactly as many indices as dimensions
assert_raises(IndexError, lambda: a[()])
assert_raises(IndexError, lambda: a[0])
|
54,336 | def check_multiple_raters(is_train, loader_params):
if any([isinstance(class_suffix, list) for class_suffix in loader_params["target_suffix"]]):
print(
"\nAnnotations from multiple raters will be used during model training, one annotation from one rater "
"randomly selected at each iteration.\n")
if not is_train:
print(
"\nERROR: Please provide only one annotation per class in 'target_suffix' when not training a model.\n")
exit()
| def check_multiple_raters(is_train, loader_params):
if any([isinstance(class_suffix, list) for class_suffix in loader_params["target_suffix"]]):
print(
"\nAnnotations from multiple raters will be used during model training, one annotation from one rater "
"randomly selected at each iteration.\n")
if not is_train:
print(
"\nERROR: Please provide only one annotation per class in 'target_suffix' when not training a model.\n")
exit()
|
30,021 | def rk4(derivs, y0, t):
"""
Integrate 1-D or N-D system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
Example::
>>> ### 2D system
>>> def derivs(x):
... d1 = x[0] + 2*x[1]
... d2 = -3*x[0] + 4*x[1]
... return (d1, d2)
>>> dt = 0.0005
>>> t = arange(0.0, 2.0, dt)
>>> y0 = (1,2)
>>> yout = rk4(derivs6, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
This would then require re-adding the time variable to the signature of derivs.
Args:
derivs: the derivative of the system and has the signature ``dy = derivs(yi)``
y0: initial state vector
t: sample times
Returns:
yout: Runge-Kutta approximation of the ODE
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
for i in np.arange(len(t) - 1):
this = t[i]
dt = t[i + 1] - this
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0))
k2 = np.asarray(derivs(y0 + dt2 * k1))
k3 = np.asarray(derivs(y0 + dt2 * k2))
k4 = np.asarray(derivs(y0 + dt * k3))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
# We only care about the final timestep and we cleave off action value which will be zero
return yout[-1][:4]
| def rk4(derivs, y0, t):
"""
Integrate 1-D or N-D system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
Example::
>>> ### 2D system
>>> def derivs(x):
... d1 = x[0] + 2*x[1]
... d2 = -3*x[0] + 4*x[1]
... return (d1, d2)
>>> dt = 0.0005
>>> t = arange(0.0, 2.0, dt)
>>> y0 = (1,2)
>>> yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
This would then require re-adding the time variable to the signature of derivs.
Args:
derivs: the derivative of the system and has the signature ``dy = derivs(yi)``
y0: initial state vector
t: sample times
Returns:
yout: Runge-Kutta approximation of the ODE
"""
try:
Ny = len(y0)
except TypeError:
yout = np.zeros((len(t),), np.float_)
else:
yout = np.zeros((len(t), Ny), np.float_)
yout[0] = y0
for i in np.arange(len(t) - 1):
this = t[i]
dt = t[i + 1] - this
dt2 = dt / 2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0))
k2 = np.asarray(derivs(y0 + dt2 * k1))
k3 = np.asarray(derivs(y0 + dt2 * k2))
k4 = np.asarray(derivs(y0 + dt * k3))
yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)
# We only care about the final timestep and we cleave off action value which will be zero
return yout[-1][:4]
|
55,120 | def tape_text(
tape, wire_order=None, show_all_wires=False, decimals=None, max_length=100, tape_offset=None
):
"""Text based diagram for a Quantum Tape.
Args:
tape (QuantumTape): the operations and measurements to draw
Keyword Args:
wire_order (Sequence[Any]): the order (from top to bottom) to print the wires of the circuit
show_all_wires (bool): If True, all wires, including empty wires, are printed.
decimals (int): How many decimal points to include when formatting operation parameters.
Default ``None`` will omit parameters from operation labels.
decimals (Int) : how many decimal points to display in the operation label. If ``None``,
no parameters will be displayed.
max_length (Int) : Maximum length of a individual line. After this length, the diagram will
begin anew beneath the previous lines.
tape_offset (list[Int]): Used to offset numbering when labelling nested tapes. Used internally for
recursive calls.
Returns:
str : String based graphic of the circuit.
**Example:**
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.QFT(wires=(0,1,2))
qml.RX(1.234, wires=0)
qml.RY(1.234, wires=1)
qml.RZ(1.234, wires=2)
qml.Toffoli(wires=(0,1,"aux"))
qml.expval(qml.PauliZ("aux"))
qml.state()
>>> print(draw_text(tape))
0: ─╭QFT──RX─╭C─┤ State
1: ─├QFT──RY─├C─┤ State
2: ─╰QFT──RZ─│──┤ State
aux: ──────────╰X─┤ <Z> State
.. UsageDetails::
**Decimals:**
>>> print(draw_text(tape, decimals=2))
0: ─╭QFT──RX(1.23)─╭C─┤ State
1: ─├QFT──RY(1.23)─├C─┤ State
2: ─╰QFT──RZ(1.23)─│──┤ State
aux: ────────────────╰X─┤ <Z> State
**Max Length:**
The ``max_length`` keyword wraps long circuits
>>> rng = np.random.default_rng(seed=42)
>>> shape = qml.StronglyEntanglingLayers.shape(n_wires=5, n_layers=5)
>>> params = rng.random(shape)
>>> tape2 = qml.StronglyEntanglingLayers(params, wires=range(5)).expand()
>>> print(tape_text(tape2, max_length=60))
0: ──Rot─╭C──────────╭X──Rot─╭C───────╭X──Rot──────╭C────╭X
1: ──Rot─╰X─╭C───────│───Rot─│──╭C────│──╭X────Rot─│──╭C─│─
2: ──Rot────╰X─╭C────│───Rot─╰X─│──╭C─│──│─────Rot─│──│──╰C
3: ──Rot───────╰X─╭C─│───Rot────╰X─│──╰C─│─────Rot─╰X─│────
4: ──Rot──────────╰X─╰C──Rot───────╰X────╰C────Rot────╰X───
───Rot───────────╭C─╭X──Rot──────╭C──────────────╭X─┤
──╭X────Rot──────│──╰C─╭X────Rot─╰X───╭C─────────│──┤
──│────╭X────Rot─│─────╰C───╭X────Rot─╰X───╭C────│──┤
──╰C───│─────Rot─│──────────╰C───╭X────Rot─╰X─╭C─│──┤
───────╰C────Rot─╰X──────────────╰C────Rot────╰X─╰C─┤
**Wire Order:**
>>> print(tape_text(tape, wire_order=["aux", 2, 1, 0]))
aux: ──────────╭X─┤ <Z> State
2: ─╭QFT──RZ─│──┤ State
1: ─├QFT──RY─├C─┤ State
0: ─╰QFT──RX─╰C─┤ State
**Show all wires:**
>>> print(tape_text(tape, wire_order=["a", "b", "aux", 0,1,2], show_all_wires=True))
a: ─────────────┤ State
b: ─────────────┤ State
aux: ──────────╭X─┤ <Z> State
0: ─╭QFT──RX─├C─┤ State
1: ─├QFT──RY─╰C─┤ State
2: ─╰QFT──RZ────┤ State
"""
if tape_offset is None:
tape_offset = [0] # use a list so it's a mutable data structure
tape_cache = []
wire_map = convert_wire_order(
tape.operations + tape.measurements, wire_order=wire_order, show_all_wires=show_all_wires
)
n_wires = len(wire_map)
if n_wires == 0:
return ""
totals = [f"{wire}: " for wire in wire_map]
line_length = max(len(s) for s in totals)
totals = [s.rjust(line_length, " ") for s in totals]
# Used to store lines that are hitting the maximum length
finished_lines = []
layers_list = [
drawable_layers(tape.operations, wire_map=wire_map),
drawable_layers(tape.measurements, wire_map=wire_map),
]
add_list = [_add_op, _add_measurement]
fillers = ["─", " "]
enders = [True, False] # add "─┤" after all operations
for layers, add, filler, ender in zip(layers_list, add_list, fillers, enders):
for layer in layers:
layer_str = [filler] * n_wires
for op in layer:
# Currently can't use `isinstance(op, QuantumTape)` due to circular imports
if hasattr(op, "measurements"): # isa tape
layer_str = _add_grouping_symbols(op, layer_str, wire_map)
label = f"Tape:{tape_offset[0]+len(tape_cache)}"
for w in op.wires:
layer_str[wire_map[w]] += label
tape_cache.append(op)
else:
layer_str = add(op, layer_str, wire_map, decimals)
max_label_len = max(len(s) for s in layer_str)
layer_str = [s.ljust(max_label_len, filler) for s in layer_str]
line_length += max_label_len + 1 # one for the filler character
if line_length > max_length:
# move totals into finished_lines and reset totals
finished_lines += totals
finished_lines[-1] += "\n"
totals = [filler] * n_wires
line_length = 1 + max_label_len
totals = [filler.join([t, s]) for t, s in zip(totals, layer_str)]
if ender:
totals = [s + "─┤" for s in totals]
# Recursively handle nested tapes #
tape_totals = "\n".join(finished_lines + totals)
current_tape_offset = tape_offset[0]
tape_offset[0] += len(tape_cache)
for i, nested_tape in enumerate(tape_cache):
label = f"\nTape:{i+current_tape_offset}"
tape_str = tape_text(
nested_tape, wire_order, show_all_wires, decimals, max_length, tape_offset
)
tape_totals = "\n".join([tape_totals, label, tape_str])
return tape_totals
| def tape_text(
tape, wire_order=None, show_all_wires=False, decimals=None, max_length=100, tape_offset=None
):
"""Text based diagram for a Quantum Tape.
Args:
tape (QuantumTape): the operations and measurements to draw
Keyword Args:
wire_order (Sequence[Any]): the order (from top to bottom) to print the wires of the circuit
show_all_wires (bool): If True, all wires, including empty wires, are printed.
decimals (int): How many decimal points to include when formatting operation parameters.
Default ``None`` will omit parameters from operation labels.
max_length (Int) : Maximum length of a individual line. After this length, the diagram will
begin anew beneath the previous lines.
tape_offset (list[Int]): Used to offset numbering when labelling nested tapes. Used internally for
recursive calls.
Returns:
str : String based graphic of the circuit.
**Example:**
.. code-block:: python
with qml.tape.QuantumTape() as tape:
qml.QFT(wires=(0,1,2))
qml.RX(1.234, wires=0)
qml.RY(1.234, wires=1)
qml.RZ(1.234, wires=2)
qml.Toffoli(wires=(0,1,"aux"))
qml.expval(qml.PauliZ("aux"))
qml.state()
>>> print(draw_text(tape))
0: ─╭QFT──RX─╭C─┤ State
1: ─├QFT──RY─├C─┤ State
2: ─╰QFT──RZ─│──┤ State
aux: ──────────╰X─┤ <Z> State
.. UsageDetails::
**Decimals:**
>>> print(draw_text(tape, decimals=2))
0: ─╭QFT──RX(1.23)─╭C─┤ State
1: ─├QFT──RY(1.23)─├C─┤ State
2: ─╰QFT──RZ(1.23)─│──┤ State
aux: ────────────────╰X─┤ <Z> State
**Max Length:**
The ``max_length`` keyword wraps long circuits
>>> rng = np.random.default_rng(seed=42)
>>> shape = qml.StronglyEntanglingLayers.shape(n_wires=5, n_layers=5)
>>> params = rng.random(shape)
>>> tape2 = qml.StronglyEntanglingLayers(params, wires=range(5)).expand()
>>> print(tape_text(tape2, max_length=60))
0: ──Rot─╭C──────────╭X──Rot─╭C───────╭X──Rot──────╭C────╭X
1: ──Rot─╰X─╭C───────│───Rot─│──╭C────│──╭X────Rot─│──╭C─│─
2: ──Rot────╰X─╭C────│───Rot─╰X─│──╭C─│──│─────Rot─│──│──╰C
3: ──Rot───────╰X─╭C─│───Rot────╰X─│──╰C─│─────Rot─╰X─│────
4: ──Rot──────────╰X─╰C──Rot───────╰X────╰C────Rot────╰X───
───Rot───────────╭C─╭X──Rot──────╭C──────────────╭X─┤
──╭X────Rot──────│──╰C─╭X────Rot─╰X───╭C─────────│──┤
──│────╭X────Rot─│─────╰C───╭X────Rot─╰X───╭C────│──┤
──╰C───│─────Rot─│──────────╰C───╭X────Rot─╰X─╭C─│──┤
───────╰C────Rot─╰X──────────────╰C────Rot────╰X─╰C─┤
**Wire Order:**
>>> print(tape_text(tape, wire_order=["aux", 2, 1, 0]))
aux: ──────────╭X─┤ <Z> State
2: ─╭QFT──RZ─│──┤ State
1: ─├QFT──RY─├C─┤ State
0: ─╰QFT──RX─╰C─┤ State
**Show all wires:**
>>> print(tape_text(tape, wire_order=["a", "b", "aux", 0,1,2], show_all_wires=True))
a: ─────────────┤ State
b: ─────────────┤ State
aux: ──────────╭X─┤ <Z> State
0: ─╭QFT──RX─├C─┤ State
1: ─├QFT──RY─╰C─┤ State
2: ─╰QFT──RZ────┤ State
"""
if tape_offset is None:
tape_offset = [0] # use a list so it's a mutable data structure
tape_cache = []
wire_map = convert_wire_order(
tape.operations + tape.measurements, wire_order=wire_order, show_all_wires=show_all_wires
)
n_wires = len(wire_map)
if n_wires == 0:
return ""
totals = [f"{wire}: " for wire in wire_map]
line_length = max(len(s) for s in totals)
totals = [s.rjust(line_length, " ") for s in totals]
# Used to store lines that are hitting the maximum length
finished_lines = []
layers_list = [
drawable_layers(tape.operations, wire_map=wire_map),
drawable_layers(tape.measurements, wire_map=wire_map),
]
add_list = [_add_op, _add_measurement]
fillers = ["─", " "]
enders = [True, False] # add "─┤" after all operations
for layers, add, filler, ender in zip(layers_list, add_list, fillers, enders):
for layer in layers:
layer_str = [filler] * n_wires
for op in layer:
# Currently can't use `isinstance(op, QuantumTape)` due to circular imports
if hasattr(op, "measurements"): # isa tape
layer_str = _add_grouping_symbols(op, layer_str, wire_map)
label = f"Tape:{tape_offset[0]+len(tape_cache)}"
for w in op.wires:
layer_str[wire_map[w]] += label
tape_cache.append(op)
else:
layer_str = add(op, layer_str, wire_map, decimals)
max_label_len = max(len(s) for s in layer_str)
layer_str = [s.ljust(max_label_len, filler) for s in layer_str]
line_length += max_label_len + 1 # one for the filler character
if line_length > max_length:
# move totals into finished_lines and reset totals
finished_lines += totals
finished_lines[-1] += "\n"
totals = [filler] * n_wires
line_length = 1 + max_label_len
totals = [filler.join([t, s]) for t, s in zip(totals, layer_str)]
if ender:
totals = [s + "─┤" for s in totals]
# Recursively handle nested tapes #
tape_totals = "\n".join(finished_lines + totals)
current_tape_offset = tape_offset[0]
tape_offset[0] += len(tape_cache)
for i, nested_tape in enumerate(tape_cache):
label = f"\nTape:{i+current_tape_offset}"
tape_str = tape_text(
nested_tape, wire_order, show_all_wires, decimals, max_length, tape_offset
)
tape_totals = "\n".join([tape_totals, label, tape_str])
return tape_totals
|
25,177 | def infer_typing_newtype(node, context_itton=None):
"""Infer a typing.TypeVar(...) or typing.NewType(...) call"""
try:
func = next(node.func.infer(context=context_itton))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if func.qname() != "typing.NewType":
raise UseInferenceDefault
if not node.args:
raise UseInferenceDefault
derived = node.args[0].as_string().strip("'")
base = node.args[1].as_string().strip("'")
node = extract_node(TYPING_NEWTYPE_TEMPLATE.format(derived=derived, base=base))
return node.infer(context=context_itton)
| def infer_typing_newtype(node, context_itton=None):
"""Infer a typing.NewType(...) call"""
try:
func = next(node.func.infer(context=context_itton))
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
if func.qname() != "typing.NewType":
raise UseInferenceDefault
if not node.args:
raise UseInferenceDefault
derived = node.args[0].as_string().strip("'")
base = node.args[1].as_string().strip("'")
node = extract_node(TYPING_NEWTYPE_TEMPLATE.format(derived=derived, base=base))
return node.infer(context=context_itton)
|
34,432 | def test_not_importing_conversation_tests_in_project(tmpdir_factory):
root = tmpdir_factory.mktemp("Parent Bot")
config = {"imports": ["bots/Bot A"]}
config_path = str(root / "config.yml")
utils.dump_obj_as_yaml_to_file(config_path, config)
story_data_file = root / "bots" / "Bot A" / "data" / "stories.md"
story_data_file.write("""## story""", ensure=True)
conversation_tests_file = (
root / "bots" / "Bot A" / "tests" / "conversation_tests.md"
)
conversation_tests_file.write(
"""## story test""", ensure=True,
)
selector = MultiProjectImporter(config_path)
# Conversation tests should not be included in story paths
expected = [str(story_data_file)]
actual = selector._story_paths
assert expected == actual
| def test_not_importing_conversation_tests_in_project(tmpdir_factory):
root = tmpdir_factory.mktemp("Parent Bot")
config = {"imports": ["bots/Bot A"]}
config_path = str(root / "config.yml")
utils.dump_obj_as_yaml_to_file(config_path, config)
story_data_file = root / "bots" / "Bot A" / "data" / "stories.md"
story_data_file.write("""## story""", ensure=True)
conversation_tests_file = (
root / "bots" / "Bot A" / DEFAULT_E2E_TESTS_PATH / "conversation_tests.md"
)
conversation_tests_file.write(
"""## story test""", ensure=True,
)
selector = MultiProjectImporter(config_path)
# Conversation tests should not be included in story paths
expected = [str(story_data_file)]
actual = selector._story_paths
assert expected == actual
|
32,310 | def test_module(get_events: DefenderGetEvents) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type get_events: ``DefenderGetEvents``
:param get_events: the get_events instance
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
message: str = ''
try:
get_events.client.request.params = {'limit': 1}
get_events.run()
message = 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'authenticate' in str(e):
message = AUTH_ERROR_MSG
else:
raise e
return message
| def test_module(get_events: DefenderGetEvents) -> str:
"""Tests API connectivity and authentication'
Returning 'ok' indicates that the integration works like it is supposed to.
Connection to the service is successful.
Raises exceptions if something goes wrong.
:type get_events: ``DefenderGetEvents``
:param get_events: the get_events instance
:return: 'ok' if test passed, anything else will fail the test.
:rtype: ``str``
"""
message: str = ''
try:
get_events.client.request.params = {'limit': 1}
get_events.run()
message = 'ok'
except DemistoException as e:
if 'Forbidden' in str(e) or 'authenticate' in str(e):
message = AUTH_ERROR_MSG
else:
raise
return message
|
39,026 | def timedelta_isoformat(td: datetime.timedelta) -> str:
"""
ISO 8601 encoding for time deltas.
"""
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'{"-" if td.days < 0 else ""}P{abs(td.days)}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S'
| def timedelta_isoformat(td: datetime.timedelta) -> str:
"""
ISO 8601 encoding for Python timedelta object.
"""
minutes, seconds = divmod(td.seconds, 60)
hours, minutes = divmod(minutes, 60)
return f'{"-" if td.days < 0 else ""}P{abs(td.days)}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S'
|
31,840 | def checkpoint_add_objects_batch_command(client: Client, object_type: str, ipaddress, name):
printable_result = {}
readable_output = ''
ipaddress = argToList(ipaddress, ',')
name = argToList(name, ',')
add_list = []
for ip, n in zip(ipaddress, name):
tmp_dict = {'name': n, 'ip-address': ip}
add_list.append(tmp_dict)
result = current_result = client.add_objects_batch(object_type, add_list)
if result:
printable_result = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for add-objects-batch command:',
printable_result)
command_results = CommandResults(
outputs_prefix='CheckPoint.add_objects_batch',
outputs_key_field='uid',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results
| def checkpoint_add_objects_batch_command(client: Client, object_type: str, ipaddress, name):
context_data = {}
readable_output = ''
ipaddress = argToList(ipaddress, ',')
name = argToList(name, ',')
add_list = []
for ip, n in zip(ipaddress, name):
tmp_dict = {'name': n, 'ip-address': ip}
add_list.append(tmp_dict)
result = current_result = client.add_objects_batch(object_type, add_list)
if result:
printable_result = {'task-id': result.get('task-id')}
readable_output = tableToMarkdown('CheckPoint data for add-objects-batch command:',
printable_result)
command_results = CommandResults(
outputs_prefix='CheckPoint.add_objects_batch',
outputs_key_field='uid',
readable_output=readable_output,
outputs=printable_result,
raw_response=result
)
return command_results
|
32,087 | def main(): # pragma: no cover
# """
# Executes an integration command
# """
params = demisto.params()
Client.severity = params.get('severity', '').upper()
Client.query = params.get('query', Client.query)
Client.tag = params.get('feedTags', params.get('tag', Client.tag))
Client.tlp_color = params.get('tlp_color')
client = Client(params)
commands = {
'test-module': module_test,
'xdr-iocs-enable': iocs_command,
'xdr-iocs-disable': iocs_command,
'xdr-iocs-push': tim_insert_jsons,
}
command = demisto.command()
try:
if command == 'fetch-indicators':
fetch_indicators(client, params.get('autoSync', False))
elif command == 'xdr-iocs-set-sync-time':
set_sync_time(demisto.args()['time'])
elif command == 'xdr-iocs-create-sync-file':
get_sync_file()
elif command in commands:
commands[command](client)
elif command == 'xdr-iocs-sync':
xdr_iocs_sync_command(client, demisto.args().get('firstTime') == 'true')
else:
raise NotImplementedError(command)
except Exception as error:
return_error(str(error), error)
| def main(): # pragma: no cover
# """
# Executes an integration command
# """
params = demisto.params()
Client.severity = params.get('severity', '').upper()
Client.query = params.get('query', Client.query)
Client.tag = params.get('feedTags', params.get('tag', Client.tag))
Client.tlp_color = params.get('tlp_color')
client = Client(params)
commands = {
'test-module': module_test,
'xdr-iocs-enable': iocs_command,
'xdr-iocs-disable': iocs_command,
'xdr-iocs-push': tim_insert_jsons,
}
command = demisto.command()
try:
if command == 'fetch-indicators':
fetch_indicators(client, params.get('autoSync', False))
elif command == 'xdr-iocs-set-sync-time':
set_sync_time(demisto.args()['time'])
elif command == 'xdr-iocs-create-sync-file':
get_sync_file()
elif command in commands:
commands[command](client)
elif command == 'xdr-iocs-sync':
xdr_iocs_sync_command(client, demisto.args().get('firstTime') == 'true')
else:
raise NotImplementedError(command)
except Exception as error:
return_error(str(error), error)
|
22,269 | def key_type_token_mapper(key, key_type, token, route_extra, url):
global db_conn
# print 'key %s key_type %s token %s route_extra %s url %s\n' % (key, key_type, token, route_extra, url)
if key and key_type and token:
# sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread. The object was created in thread id x and this is thread id y.
# So try upto 2 times
for i in range(2):
# Order by rowid gives us the last row added
try:
row = db_conn.execute("select host, port from %s where key=? and key_type=? and token=? order by rowid desc limit 1" % (DATABASE_TABLE_NAME), (key, key_type, token)).fetchone()
if row:
rval = '%s:%s' % (tuple(row))
return rval.encode()
break
except sqlite3.ProgrammingError:
db_conn = sqlite3.connect(realtime_db_file)
continue
break
return None
| def key_type_token_mapper(key, key_type, token, route_extra, url):
global db_conn
# print 'key %s key_type %s token %s route_extra %s url %s\n' % (key, key_type, token, route_extra, url)
if key and key_type and token:
# sqlite3.ProgrammingError: SQLite objects created in a thread can only be used in that same thread. The object was created in thread id x and this is thread id y.
# So try upto 2 times
for i in range(2):
# Order by rowid gives us the last row added
try:
row = db_conn.execute("SELECT host, port FROM %s WHERE key=? AND key_type=? AND token=? ORDER BY rowid DESC LIMIT 1" % (DATABASE_TABLE_NAME), (key, key_type, token)).fetchone()
if row:
rval = '%s:%s' % (tuple(row))
return rval.encode()
break
except sqlite3.ProgrammingError:
db_conn = sqlite3.connect(realtime_db_file)
continue
break
return None
|
17,648 | def _get_flexible_source_candidates_for_submodule(ds, sm):
"""Assemble candidates from where to install a submodule
Even if a URL for submodule is provided explicitly -- first tries urls under
parent's module tracking branch remote.
Additional candidate URLs can be generated based on templates specified as
configuration variables with the pattern
`datalad.get.subdataset-source-candidate-<name>`
where `name` is an arbitrary identifier.
A template string assigned to such a variable can utilize the Python format
mini language and may reference a number of properties that are inferred
from the parent dataset's knowledge about the target subdataset. Properties
include any submodule property specified in the respective .gitmodules
record. For convenience, an existing `datalad-id` record is made available
under the shortened name `id`.
Additionally, the URL of any configured remote that contains the respective
submodule commit is available as `remote-<name>` properties, where `name`
is the configured remote name.
Parameters
----------
ds : Dataset
Parent dataset of to-be-installed subdataset.
sm : dict
Submodule record as produced by `subdatasets()`.
Returns
-------
list of tuples
Where each tuples consists of a name and a URL. Names are not unique
and either derived from the name of the respective remote, template
configuration variable, or 'origin' for the candidate URL that was
obtained from the .gitmodule record.
"""
# short cuts
ds_repo = ds.repo
sm_url = sm.get('gitmodule_url', None)
sm_path = op.relpath(sm['path'], start=sm['parentds'])
clone_urls = []
# CANDIDATE: tracking remote of the current branch
tracking_remote, tracking_branch = ds_repo.get_tracking_branch()
candidate_remotes = [tracking_remote] if tracking_remote else []
# if we have a remote, let's check the location of that remote
# for the presence of the desired submodule
last_commit = ds_repo.get_last_commit_hexsha(sm_path)
if last_commit:
# CANDIDATE: any remote that has the commit when the submodule was
# last modified
# ideally should also give preference to the remotes which have
# the same branch checked out I guess
candidate_remotes += list(_get_remotes_having_commit(ds_repo, last_commit))
# prepare a dict to generate URL candidates from templates
sm_candidate_props = {
k[10:].replace('datalad-id', 'id'): v
for k, v in sm.items()
if k.startswith('gitmodule_')
}
for remote in unique(candidate_remotes):
remote_url = ds_repo.get_remote_url(remote, push=False)
# Directly on parent's ds url
if remote_url:
# make remotes and their URLs available to template rendering
sm_candidate_props['remoteurl-{}'.format(remote)] = remote_url
# attempt: submodule checkout at parent remote URL
# We might need to quote sm_path portion, e.g. for spaces etc
if isinstance(RI(remote_url), URL):
sm_path_url = urlquote(sm_path)
else:
sm_path_url = sm_path
clone_urls.extend(
('50{}'.format(remote), url)
for url in _get_flexible_source_candidates(
# alternate suffixes are tested by `clone` anyways
sm_path_url, remote_url, alternate_suffix=False)
)
# attempt: provided (configured?) submodule URL
# TODO: consider supporting DataLadRI here? or would confuse
# git and we wouldn't want that (i.e. not allow pure git clone
# --recursive)
if sm_url:
clone_urls.extend(
('60{}'.format(remote), url)
for url in _get_flexible_source_candidates(
sm_url,
remote_url,
alternate_suffix=False)
)
prio_candidate_expr = re.compile('[0-9][0-9].*')
for name, tmpl in [(c[40:], ds_repo.config[c])
for c in ds_repo.config.keys()
if c.startswith(
'datalad.get.subdataset-source-candidate-')]:
url = tmpl.format(**sm_candidate_props)
# we don't want "flexible_source_candidates" here, this is
# configuration that can be made arbitrarily precise from the
# outside. Additional guesswork can only make it slower
clone_urls.append((
# assign a default priority, if a config doesn't have one
name if prio_candidate_expr.match(name) else '70{}'.format(name),
url))
# CANDIDATE: the actual configured gitmodule URL
if sm_url:
clone_urls.extend(
('99local', url)
for url in _get_flexible_source_candidates(
sm_url,
ds.path,
alternate_suffix=False)
# avoid inclusion of submodule location itself
if url != sm['path']
)
# sort all candidates by there label, thereby allowing
# candidate provided by configuration to purposefully
# sort before or after automatically generated configuration
clone_urls = sorted(clone_urls, key=lambda x: x[0])
# take out any duplicate source candidates
# unique() takes out the duplicated at the tail end
clone_urls = unique(clone_urls, lambda x: x[1])
return clone_urls
| def _get_flexible_source_candidates_for_submodule(ds, sm):
"""Assemble candidates from where to install a submodule
Even if a URL for submodule is provided explicitly -- first tries urls under
parent's module tracking branch remote.
Additional candidate URLs can be generated based on templates specified as
configuration variables with the pattern
`datalad.get.subdataset-source-candidate-<name>`
where `name` is an arbitrary identifier.
A template string assigned to such a variable can utilize the Python format
mini language and may reference a number of properties that are inferred
from the parent dataset's knowledge about the target subdataset. Properties
include any submodule property specified in the respective .gitmodules
record. For convenience, an existing `datalad-id` record is made available
under the shortened name `id`.
Additionally, the URL of any configured remote that contains the respective
submodule commit is available as `remote-<name>` properties, where `name`
is the configured remote name.
Parameters
----------
ds : Dataset
Parent dataset of to-be-installed subdataset.
sm : dict
Submodule record as produced by `subdatasets()`.
Returns
-------
list of tuples
Where each tuples consists of a name and a URL. Names are not unique
and either derived from the name of the respective remote, template
configuration variable, or 'origin' for the candidate URL that was
obtained from the .gitmodule record.
"""
# short cuts
ds_repo = ds.repo
sm_url = sm.get('gitmodule_url', None)
sm_path = op.relpath(sm['path'], start=sm['parentds'])
clone_urls = []
# CANDIDATE: tracking remote of the current branch
tracking_remote, tracking_branch = ds_repo.get_tracking_branch()
candidate_remotes = [tracking_remote] if tracking_remote else []
# if we have a remote, let's check the location of that remote
# for the presence of the desired submodule
last_commit = ds_repo.get_last_commit_hexsha(sm_path)
if last_commit:
# CANDIDATE: any remote that has the commit when the submodule was
# last modified
# ideally should also give preference to the remotes which have
# the same branch checked out I guess
candidate_remotes += list(_get_remotes_having_commit(ds_repo, last_commit))
# prepare a dict to generate URL candidates from templates
sm_candidate_props = {
k[10:].replace('datalad-id', 'id'): v
for k, v in sm.items()
if k.startswith('gitmodule_')
}
for remote in unique(candidate_remotes):
remote_url = ds_repo.get_remote_url(remote, push=False)
# Directly on parent's ds url
if remote_url:
# make remotes and their URLs available to template rendering
sm_candidate_props['remoteurl-{}'.format(remote)] = remote_url
# attempt: submodule checkout at parent remote URL
# We might need to quote sm_path portion, e.g. for spaces etc
if isinstance(RI(remote_url), URL):
sm_path_url = urlquote(sm_path)
else:
sm_path_url = sm_path
clone_urls.extend(
('50{}'.format(remote), url)
for url in _get_flexible_source_candidates(
# alternate suffixes are tested by `clone` anyways
sm_path_url, remote_url, alternate_suffix=False)
)
# attempt: provided (configured?) submodule URL
# TODO: consider supporting DataLadRI here? or would confuse
# git and we wouldn't want that (i.e. not allow pure git clone
# --recursive)
if sm_url:
clone_urls.extend(
('60{}'.format(remote), url)
for url in _get_flexible_source_candidates(
sm_url,
remote_url,
alternate_suffix=False)
)
prio_candidate_expr = re.compile('[0-9][0-9].*')
for name, tmpl in [(c[40:], ds_repo.config[c])
for c in ds_repo.config.keys()
if c.startswith(
'datalad.get.subdataset-source-candidate-')]:
url = tmpl.format(**sm_candidate_props)
# we don't want "flexible_source_candidates" here, this is
# configuration that can be made arbitrarily precise from the
# outside. Additional guesswork can only make it slower
clone_urls.append((
# assign a default priority, if a config doesn't have one
name if prio_candidate_expr.match(name) else '70{}'.format(name),
url))
# CANDIDATE: the actual configured gitmodule URL
if sm_url:
clone_urls.extend(
('99local', url)
for url in _get_flexible_source_candidates(
sm_url,
ds.path,
alternate_suffix=False)
# avoid inclusion of submodule location itself
if url != sm['path']
)
# sort all candidates by there label, thereby allowing
# candidate provided by configuration to purposefully
# sort before or after automatically generated configuration
# sort all candidates by their label, thereby allowing
# take out any duplicate source candidates
# unique() takes out the duplicated at the tail end
clone_urls = unique(clone_urls, lambda x: x[1])
return clone_urls
|
35,634 | def create_feature_extractor(
model: nn.Module,
return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
tracer_kwargs: Dict = {},
suppress_diff_warning: bool = False) -> fx.GraphModule:
"""
Creates a new graph module that returns intermediate nodes from a given
model as dictionary with user specified keys as strings, and the requested
outputs as values. This is achieved by re-writing the computation graph of
the model via FX to return the desired nodes as outputs. All unused nodes
are removed, together with their corresponding parameters.
Desired output nodes must be specified as a ``.`` seperated
path walking the module hierarchy from top level module down to leaf
operation or leaf module. For more details on the node naming conventions
used here, please see the :ref:`relevant subheading <about-node-names>`
in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
Not all models will be FX traceable, although with some massaging they can
be made to cooperate. Here's a (not exhaustive) list of tips:
- If you don't need to trace through a particular, problematic
sub-module, turn it into a "leaf module" by passing a list of
``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
It will not be traced through, but rather, the resulting graph will
hold a reference to that module's forward method.
- Likewise, you may turn functions into leaf functions by passing a
list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
example below).
- Some inbuilt Python functions can be problematic. For instance,
``int`` will raise an error during tracing. You may wrap them in your
own function and then pass that in ``autowrap_functions`` as one of
the ``tracer_kwargs``.
For further information on FX see the
`torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.
Args:
model (nn.Module): model on which we will extract the features
return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
containing the names (or partial names - see note above)
of the nodes for which the activations will be returned. If it is
a ``Dict``, the keys are the node names, and the values
are the user-specified keys for the graph module's returned
dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
node specification strings directly to output names. In the case
that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
this should not be specified.
train_return_nodes (list or dict, optional): similar to
``return_nodes``. This can be used if the return nodes
for train mode are different than those from eval mode.
If this is specified, ``eval_return_nodes`` must also be specified,
and ``return_nodes`` should not be specified.
eval_return_nodes (list or dict, optional): similar to
``return_nodes``. This can be used if the return nodes
for train mode are different than those from eval mode.
If this is specified, ``train_return_nodes`` must also be specified,
and `return_nodes` should not be specified.
tracer_kwargs (dict, optional): a dictionary of keywork arguments for
``NodePathTracer`` (which passes them onto it's parent class
`torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
suppress_diff_warning (bool, optional): whether to suppress a warning
when there are discrepancies between the train and eval version of
the graph. Defaults to False.
Examples::
>>> # Feature extraction with resnet
>>> model = torchvision.models.resnet18()
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> model = create_feature_extractor(
>>> model, {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = model(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
>>> # Specifying leaf modules and leaf functions
>>> def leaf_function(x):
>>> # This would raise a TypeError if traced through
>>> return int(x)
>>>
>>> class LeafModule(torch.nn.Module):
>>> def forward(self, x):
>>> # This would raise a TypeError if traced through
>>> int(x.shape[0])
>>> return torch.nn.functional.relu(x + 4)
>>>
>>> class MyModule(torch.nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.conv = torch.nn.Conv2d(3, 1, 3)
>>> self.leaf_module = LeafModule()
>>>
>>> def forward(self, x):
>>> leaf_function(x.shape[0])
>>> x = self.conv(x)
>>> return self.leaf_module(x)
>>>
>>> model = create_feature_extractor(
>>> MyModule(), return_nodes=['leaf_module'],
>>> tracer_kwargs={'leaf_modules': [LeafModule],
>>> 'autowrap_functions': [leaf_function]})
"""
is_training = model.training
assert any(arg is not None for arg in [
return_nodes, train_return_nodes, eval_return_nodes]), (
"Either `return_nodes` or `train_return_nodes` and "
"`eval_return_nodes` together, should be specified")
assert not ((train_return_nodes is None) ^ (eval_return_nodes is None)), \
("If any of `train_return_nodes` and `eval_return_nodes` are "
"specified, then both should be specified")
assert ((return_nodes is None) ^ (train_return_nodes is None)), \
("If `train_return_nodes` and `eval_return_nodes` are specified, "
"then both should be specified")
# Put *_return_nodes into Dict[str, str] format
def to_strdict(n) -> Dict[str, str]:
if isinstance(n, list):
return {str(i): str(i) for i in n}
return {str(k): str(v) for k, v in n.items()}
if train_return_nodes is None:
return_nodes = to_strdict(return_nodes)
train_return_nodes = deepcopy(return_nodes)
eval_return_nodes = deepcopy(return_nodes)
else:
train_return_nodes = to_strdict(train_return_nodes)
eval_return_nodes = to_strdict(eval_return_nodes)
# Repeat the tracing and graph rewriting for train and eval mode
tracers = {}
graphs = {}
mode_return_nodes: Dict[str, Dict[str, str]] = {
'train': train_return_nodes,
'eval': eval_return_nodes
}
for mode in ['train', 'eval']:
if mode == 'train':
model.train()
elif mode == 'eval':
model.eval()
# Instantiate our NodePathTracer and use that to trace the model
tracer = NodePathTracer(**tracer_kwargs)
graph = tracer.trace(model)
name = model.__class__.__name__ if isinstance(
model, nn.Module) else model.__name__
graph_module = fx.GraphModule(tracer.root, graph, name)
available_nodes = list(tracer.node_to_qualname.values())
# FIXME We don't know if we should expect this to happen
assert len(set(available_nodes)) == len(available_nodes), \
"There are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues"
# Check that all outputs in return_nodes are present in the model
for query in mode_return_nodes[mode].keys():
# To check if a query is available we need to check that at least
# one of the available names starts with it up to a .
if not any([re.match(rf'^{query}(\.|$)', n) is not None
for n in available_nodes]):
raise ValueError(
f"node: '{query}' is not present in model. Hint: use "
"`get_graph_node_names` to make sure the "
"`return_nodes` you specified are present. It may even "
"be that you need to specify `train_return_nodes` and "
"`eval_return_nodes` separately.")
# Remove existing output nodes (train mode)
orig_output_nodes = []
for n in reversed(graph_module.graph.nodes):
if n.op == 'output':
orig_output_nodes.append(n)
assert len(orig_output_nodes)
for n in orig_output_nodes:
graph_module.graph.erase_node(n)
# Find nodes corresponding to return_nodes and make them into output_nodes
nodes = [n for n in graph_module.graph.nodes]
output_nodes = OrderedDict()
for n in reversed(nodes):
module_qualname = tracer.node_to_qualname.get(n)
if module_qualname is None:
# NOTE - Know cases where this happens:
# - Node representing creation of a tensor constant - probably
# not interesting as a return node
# - When packing outputs into a named tuple like in InceptionV3
continue
for query in mode_return_nodes[mode]:
depth = query.count('.')
if '.'.join(module_qualname.split('.')[:depth + 1]) == query:
output_nodes[mode_return_nodes[mode][query]] = n
mode_return_nodes[mode].pop(query)
break
output_nodes = OrderedDict(reversed(list(output_nodes.items())))
# And add them in the end of the graph
with graph_module.graph.inserting_after(nodes[-1]):
graph_module.graph.output(output_nodes)
# Remove unused modules / parameters
graph_module.graph.eliminate_dead_code()
graph_module.recompile()
# Keep track of the tracer and graph so we can choose the main one
tracers[mode] = tracer
graphs[mode] = graph
# Warn user if there are any discrepancies between the graphs of the
# train and eval modes
if not suppress_diff_warning:
_warn_graph_differences(tracers['train'], tracers['eval'])
# Build the final graph module
graph_module = DualGraphModule(
model, graphs['train'], graphs['eval'], class_name=name)
# Restore original training mode
model.train(is_training)
graph_module.train(is_training)
return graph_module
| def create_feature_extractor(
model: nn.Module,
return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
train_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
eval_return_nodes: Optional[Union[List[str], Dict[str, str]]] = None,
tracer_kwargs: Dict = {},
suppress_diff_warning: bool = False) -> fx.GraphModule:
"""
Creates a new graph module that returns intermediate nodes from a given
model as dictionary with user specified keys as strings, and the requested
outputs as values. This is achieved by re-writing the computation graph of
the model via FX to return the desired nodes as outputs. All unused nodes
are removed, together with their corresponding parameters.
Desired output nodes must be specified as a ``.`` separated
path walking the module hierarchy from top level module down to leaf
operation or leaf module. For more details on the node naming conventions
used here, please see the :ref:`relevant subheading <about-node-names>`
in the `documentation <https://pytorch.org/vision/stable/feature_extraction.html>`_.
Not all models will be FX traceable, although with some massaging they can
be made to cooperate. Here's a (not exhaustive) list of tips:
- If you don't need to trace through a particular, problematic
sub-module, turn it into a "leaf module" by passing a list of
``leaf_modules`` as one of the ``tracer_kwargs`` (see example below).
It will not be traced through, but rather, the resulting graph will
hold a reference to that module's forward method.
- Likewise, you may turn functions into leaf functions by passing a
list of ``autowrap_functions`` as one of the ``tracer_kwargs`` (see
example below).
- Some inbuilt Python functions can be problematic. For instance,
``int`` will raise an error during tracing. You may wrap them in your
own function and then pass that in ``autowrap_functions`` as one of
the ``tracer_kwargs``.
For further information on FX see the
`torch.fx documentation <https://pytorch.org/docs/stable/fx.html>`_.
Args:
model (nn.Module): model on which we will extract the features
return_nodes (list or dict, optional): either a ``List`` or a ``Dict``
containing the names (or partial names - see note above)
of the nodes for which the activations will be returned. If it is
a ``Dict``, the keys are the node names, and the values
are the user-specified keys for the graph module's returned
dictionary. If it is a ``List``, it is treated as a ``Dict`` mapping
node specification strings directly to output names. In the case
that ``train_return_nodes`` and ``eval_return_nodes`` are specified,
this should not be specified.
train_return_nodes (list or dict, optional): similar to
``return_nodes``. This can be used if the return nodes
for train mode are different than those from eval mode.
If this is specified, ``eval_return_nodes`` must also be specified,
and ``return_nodes`` should not be specified.
eval_return_nodes (list or dict, optional): similar to
``return_nodes``. This can be used if the return nodes
for train mode are different than those from eval mode.
If this is specified, ``train_return_nodes`` must also be specified,
and `return_nodes` should not be specified.
tracer_kwargs (dict, optional): a dictionary of keywork arguments for
``NodePathTracer`` (which passes them onto it's parent class
`torch.fx.Tracer <https://pytorch.org/docs/stable/fx.html#torch.fx.Tracer>`_).
suppress_diff_warning (bool, optional): whether to suppress a warning
when there are discrepancies between the train and eval version of
the graph. Defaults to False.
Examples::
>>> # Feature extraction with resnet
>>> model = torchvision.models.resnet18()
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> model = create_feature_extractor(
>>> model, {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = model(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
>>> # Specifying leaf modules and leaf functions
>>> def leaf_function(x):
>>> # This would raise a TypeError if traced through
>>> return int(x)
>>>
>>> class LeafModule(torch.nn.Module):
>>> def forward(self, x):
>>> # This would raise a TypeError if traced through
>>> int(x.shape[0])
>>> return torch.nn.functional.relu(x + 4)
>>>
>>> class MyModule(torch.nn.Module):
>>> def __init__(self):
>>> super().__init__()
>>> self.conv = torch.nn.Conv2d(3, 1, 3)
>>> self.leaf_module = LeafModule()
>>>
>>> def forward(self, x):
>>> leaf_function(x.shape[0])
>>> x = self.conv(x)
>>> return self.leaf_module(x)
>>>
>>> model = create_feature_extractor(
>>> MyModule(), return_nodes=['leaf_module'],
>>> tracer_kwargs={'leaf_modules': [LeafModule],
>>> 'autowrap_functions': [leaf_function]})
"""
is_training = model.training
assert any(arg is not None for arg in [
return_nodes, train_return_nodes, eval_return_nodes]), (
"Either `return_nodes` or `train_return_nodes` and "
"`eval_return_nodes` together, should be specified")
assert not ((train_return_nodes is None) ^ (eval_return_nodes is None)), \
("If any of `train_return_nodes` and `eval_return_nodes` are "
"specified, then both should be specified")
assert ((return_nodes is None) ^ (train_return_nodes is None)), \
("If `train_return_nodes` and `eval_return_nodes` are specified, "
"then both should be specified")
# Put *_return_nodes into Dict[str, str] format
def to_strdict(n) -> Dict[str, str]:
if isinstance(n, list):
return {str(i): str(i) for i in n}
return {str(k): str(v) for k, v in n.items()}
if train_return_nodes is None:
return_nodes = to_strdict(return_nodes)
train_return_nodes = deepcopy(return_nodes)
eval_return_nodes = deepcopy(return_nodes)
else:
train_return_nodes = to_strdict(train_return_nodes)
eval_return_nodes = to_strdict(eval_return_nodes)
# Repeat the tracing and graph rewriting for train and eval mode
tracers = {}
graphs = {}
mode_return_nodes: Dict[str, Dict[str, str]] = {
'train': train_return_nodes,
'eval': eval_return_nodes
}
for mode in ['train', 'eval']:
if mode == 'train':
model.train()
elif mode == 'eval':
model.eval()
# Instantiate our NodePathTracer and use that to trace the model
tracer = NodePathTracer(**tracer_kwargs)
graph = tracer.trace(model)
name = model.__class__.__name__ if isinstance(
model, nn.Module) else model.__name__
graph_module = fx.GraphModule(tracer.root, graph, name)
available_nodes = list(tracer.node_to_qualname.values())
# FIXME We don't know if we should expect this to happen
assert len(set(available_nodes)) == len(available_nodes), \
"There are duplicate nodes! Please raise an issue https://github.com/pytorch/vision/issues"
# Check that all outputs in return_nodes are present in the model
for query in mode_return_nodes[mode].keys():
# To check if a query is available we need to check that at least
# one of the available names starts with it up to a .
if not any([re.match(rf'^{query}(\.|$)', n) is not None
for n in available_nodes]):
raise ValueError(
f"node: '{query}' is not present in model. Hint: use "
"`get_graph_node_names` to make sure the "
"`return_nodes` you specified are present. It may even "
"be that you need to specify `train_return_nodes` and "
"`eval_return_nodes` separately.")
# Remove existing output nodes (train mode)
orig_output_nodes = []
for n in reversed(graph_module.graph.nodes):
if n.op == 'output':
orig_output_nodes.append(n)
assert len(orig_output_nodes)
for n in orig_output_nodes:
graph_module.graph.erase_node(n)
# Find nodes corresponding to return_nodes and make them into output_nodes
nodes = [n for n in graph_module.graph.nodes]
output_nodes = OrderedDict()
for n in reversed(nodes):
module_qualname = tracer.node_to_qualname.get(n)
if module_qualname is None:
# NOTE - Know cases where this happens:
# - Node representing creation of a tensor constant - probably
# not interesting as a return node
# - When packing outputs into a named tuple like in InceptionV3
continue
for query in mode_return_nodes[mode]:
depth = query.count('.')
if '.'.join(module_qualname.split('.')[:depth + 1]) == query:
output_nodes[mode_return_nodes[mode][query]] = n
mode_return_nodes[mode].pop(query)
break
output_nodes = OrderedDict(reversed(list(output_nodes.items())))
# And add them in the end of the graph
with graph_module.graph.inserting_after(nodes[-1]):
graph_module.graph.output(output_nodes)
# Remove unused modules / parameters
graph_module.graph.eliminate_dead_code()
graph_module.recompile()
# Keep track of the tracer and graph so we can choose the main one
tracers[mode] = tracer
graphs[mode] = graph
# Warn user if there are any discrepancies between the graphs of the
# train and eval modes
if not suppress_diff_warning:
_warn_graph_differences(tracers['train'], tracers['eval'])
# Build the final graph module
graph_module = DualGraphModule(
model, graphs['train'], graphs['eval'], class_name=name)
# Restore original training mode
model.train(is_training)
graph_module.train(is_training)
return graph_module
|
12,377 | def handle(name, cfg, cloud, log, _args):
"""
Call to handle apk_repos sections in cloud-config file.
@param name: The module name "apk-configure" from cloud.cfg
@param cfg: A nested dict containing the entire cloud config contents.
@param cloud: The L{CloudInit} object in use.
@param log: Pre-initialized Python logger object to use for logging.
@param _args: Any module arguments from cloud.cfg
"""
# If there is no "apk_repos" section in the configuration
# then do nothing.
apk_section = cfg.get('apk_repos')
if not apk_section:
LOG.debug(("Skipping module named %s,"
" no 'apk_repos' section found"), name)
return
validate_cloudconfig_schema(cfg, schema)
# If "preserve_repositories" is explicitly set to True in
# the configuration do nothing.
if util.is_true(apk_section.get('preserve_repositories'), False):
LOG.debug(("Skipping module named %s,"
" 'preserve_repositories' is set"), name)
return
# If there is no "alpine_repo" subsection of "apk_repos" present in the
# configuration then do nothing, as at least "version" is required to
# create valid repositories entries.
alpine_repo = apk_section.get('alpine_repo')
if not alpine_repo:
LOG.debug(("Skipping module named %s,"
" no 'alpine_repo' configuration found"), name)
return
# If there is no "version" value present in configuration then do nothing.
alpine_version = alpine_repo.get('version')
if not alpine_version:
LOG.debug(("Skipping module named %s,"
" 'version' not specified in alpine_repo"), name)
return
local_repo = apk_section.get('local_repo_base_url', '')
_write_repositories_file(alpine_repo, alpine_version, local_repo)
| def handle(name, cfg, cloud, log, _args):
"""
Call to handle apk_repos sections in cloud-config file.
@param name: The module name "apk-configure" from cloud.cfg
@param cfg: A nested dict containing the entire cloud config contents.
@param cloud: The L{CloudInit} object in use.
@param log: Pre-initialized Python logger object to use for logging.
@param _args: Any module arguments from cloud.cfg
"""
# If there is no "apk_repos" section in the configuration
# then do nothing.
apk_section = cfg.get('apk_repos')
if not apk_section:
LOG.debug(("Skipping module named %s,"
" no 'apk_repos' section found"), name)
return
validate_cloudconfig_schema(cfg, schema)
# If "preserve_repositories" is explicitly set to True in
# the configuration do nothing.
if util.get_cfg_option_bool(apk_section, 'preserve_repositories', False):
LOG.debug(("Skipping module named %s,"
" 'preserve_repositories' is set"), name)
return
# If there is no "alpine_repo" subsection of "apk_repos" present in the
# configuration then do nothing, as at least "version" is required to
# create valid repositories entries.
alpine_repo = apk_section.get('alpine_repo')
if not alpine_repo:
LOG.debug(("Skipping module named %s,"
" no 'alpine_repo' configuration found"), name)
return
# If there is no "version" value present in configuration then do nothing.
alpine_version = alpine_repo.get('version')
if not alpine_version:
LOG.debug(("Skipping module named %s,"
" 'version' not specified in alpine_repo"), name)
return
local_repo = apk_section.get('local_repo_base_url', '')
_write_repositories_file(alpine_repo, alpine_version, local_repo)
|
55,841 | def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits):
proposer_reward = 0
for index, bit in zip(committee, committee_bits):
if not bit:
continue
inclusion_reward = compute_sync_committee_inclusion_reward(
spec, state, index, committee, committee_bits,
)
proposer_reward_denominator = (spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT) * spec.WEIGHT_DENOMINATOR // spec.PROPOSER_WEIGHT
proposer_reward += spec.Gwei((inclusion_reward * spec.WEIGHT_DENOMINATOR) // proposer_reward_denominator)
return proposer_reward
| def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits):
proposer_reward = 0
for index, bit in zip(committee, committee_bits):
if not bit:
continue
inclusion_reward = compute_sync_committee_inclusion_reward(
spec, state, index, committee, committee_bits,
)
proposer_reward_denominator = (
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
* spec.WEIGHT_DENOMINATOR
// spec.PROPOSER_WEIGHT
)
proposer_reward += spec.Gwei((inclusion_reward * spec.WEIGHT_DENOMINATOR) // proposer_reward_denominator)
return proposer_reward
|
34,461 | def _load_from_endpoint_config(endpoint_config: EndpointConfig) -> "LockStore":
"""Given the name of a `LockStore` module tries to retrieve it."""
try:
lock_store_class = common.class_from_module_path(endpoint_config.type)
return lock_store_class(endpoint_config=endpoint_config)
except (AttributeError, ImportError) as e:
raise Exception(
f"Could not find a class based on the module path "
f"'{endpoint_config.type}'. Failed to create a `LockStore` "
f"instance. Error: {e}"
)
| def _load_from_endpoint_config(endpoint_config: EndpointConfig) -> "LockStore":
"""Retrieve a `LockStore` based on its class name."""
try:
lock_store_class = common.class_from_module_path(endpoint_config.type)
return lock_store_class(endpoint_config=endpoint_config)
except (AttributeError, ImportError) as e:
raise Exception(
f"Could not find a class based on the module path "
f"'{endpoint_config.type}'. Failed to create a `LockStore` "
f"instance. Error: {e}"
)
|
51,451 | def _decode_datetime_with_pandas(flat_num_dates, units, calendar):
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime(
"Cannot decode times from a non-standard calendar, {!r}, using "
"pandas.".format(calendar)
)
delta, ref_date = _unpack_netcdf_time_units(units)
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using cftime
raise OutOfBoundsDatetime
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered", RuntimeWarning)
pd.to_timedelta(flat_num_dates.min(), delta) + ref_date
pd.to_timedelta(flat_num_dates.max(), delta) + ref_date
# To avoid integer overflow when converting to nanosecond units for integer
# dtypes smaller than np.int64 cast all integer-dtype arrays to np.int64
# (GH 2002).
if flat_num_dates.dtype.kind == "i":
flat_num_dates = flat_num_dates.astype(np.int64)
# Cast input ordinals to integers of nanoseconds because pd.to_timedelta
# works much faster when dealing with integers (GH 1399).
flat_num_dates_ns_int = (flat_num_dates * _NS_PER_TIME_DELTA[delta]).astype(
np.int64
)
# Use pd.to_timedelta to safely cast integer values to timedeltas,
# and add those to a Timestamp to safely produce a DatetimeIndex. This
# ensures that we do not encounter integer overflow at any point in the
# process without raising OutOfBoundsDatetime.
return (pd.to_timedelta(flat_num_dates_ns_int, "ns") + ref_date).values
| def _decode_datetime_with_pandas(flat_num_dates, units, calendar):
if calendar not in _STANDARD_CALENDARS:
raise OutOfBoundsDatetime(
"Cannot decode times from a non-standard calendar, {!r}, using "
"pandas.".format(calendar)
)
delta, ref_date = _unpack_netcdf_time_units(units)
delta = _netcdf_to_numpy_timeunit(delta)
try:
ref_date = pd.Timestamp(ref_date)
except ValueError:
# ValueError is raised by pd.Timestamp for non-ISO timestamp
# strings, in which case we fall back to using cftime
raise OutOfBoundsDatetime
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered", RuntimeWarning)
pd.to_timedelta(flat_num_dates.min(), delta)
pd.to_timedelta(flat_num_dates.max(), delta)
# To avoid integer overflow when converting to nanosecond units for integer
# dtypes smaller than np.int64 cast all integer-dtype arrays to np.int64
# (GH 2002).
if flat_num_dates.dtype.kind == "i":
flat_num_dates = flat_num_dates.astype(np.int64)
# Cast input ordinals to integers of nanoseconds because pd.to_timedelta
# works much faster when dealing with integers (GH 1399).
flat_num_dates_ns_int = (flat_num_dates * _NS_PER_TIME_DELTA[delta]).astype(
np.int64
)
# Use pd.to_timedelta to safely cast integer values to timedeltas,
# and add those to a Timestamp to safely produce a DatetimeIndex. This
# ensures that we do not encounter integer overflow at any point in the
# process without raising OutOfBoundsDatetime.
return (pd.to_timedelta(flat_num_dates_ns_int, "ns") + ref_date).values
|
51,787 | def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.json" page at the location given in
cache_prefix. This page contains a link for each binary package (.yaml or
.json) under cache_prefix.
"""
tmpdir = tempfile.mkdtemp()
db_root_dir = os.path.join(tmpdir, 'db_root')
db = spack_db.Database(None, db_dir=db_root_dir,
enable_transaction_locking=False,
record_fields=['spec', 'ref_count', 'in_buildcache'])
try:
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if entry.endswith('.yaml') or entry.endswith('.json'))
except KeyError as inst:
msg = 'No packages at {0}: {1}'.format(cache_prefix, inst)
tty.warn(msg)
return
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = 'Encountered problem listing packages at {0}: {1}'.format(
cache_prefix, err)
tty.warn(msg)
return
tty.debug('Retrieving spec descriptor files from {0} to build index'.format(
cache_prefix))
for file_path in file_list:
try:
spec_url = url_util.join(cache_prefix, file_path)
tty.debug('fetching {0}'.format(spec_url))
_, _, spec_file = web_util.read_from_url(spec_url)
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
# Need full spec.json name or this gets confused with index.json.
if spec_url.endswith('spec.json'):
s = Spec.from_json(spec_file_contents)
db.add(s, None)
db.mark(s, 'in_buildcache', True)
elif spec_url.endswith('yaml'):
s = Spec.from_yaml(spec_file_contents)
db.add(s, None)
db.mark(s, 'in_buildcache', True)
except (URLError, web_util.SpackWebError) as url_err:
tty.error('Error reading specfile: {0}'.format(file_path))
tty.error(url_err)
try:
index_json_path = os.path.join(db_root_dir, 'index.json')
with open(index_json_path, 'w') as f:
db._write_to_file(f)
# Read the index back in and compute it's hash
with open(index_json_path) as f:
index_string = f.read()
index_hash = compute_hash(index_string)
# Write the hash out to a local file
index_hash_path = os.path.join(db_root_dir, 'index.json.hash')
with open(index_hash_path, 'w') as f:
f.write(index_hash)
# Push the index itself
web_util.push_to_url(
index_json_path,
url_util.join(cache_prefix, 'index.json'),
keep_original=False,
extra_args={'ContentType': 'application/json'})
# Push the hash
web_util.push_to_url(
index_hash_path,
url_util.join(cache_prefix, 'index.json.hash'),
keep_original=False,
extra_args={'ContentType': 'text/plain'})
except Exception as err:
msg = 'Encountered problem pushing package index to {0}: {1}'.format(
cache_prefix, err)
tty.warn(msg)
finally:
shutil.rmtree(tmpdir)
| def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.json" page at the location given in
cache_prefix. This page contains a link for each binary package (.yaml or
.json) under cache_prefix.
"""
tmpdir = tempfile.mkdtemp()
db_root_dir = os.path.join(tmpdir, 'db_root')
db = spack_db.Database(None, db_dir=db_root_dir,
enable_transaction_locking=False,
record_fields=['spec', 'ref_count', 'in_buildcache'])
try:
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if entry.endswith('.yaml') or entry.endswith('.json'))
except KeyError as inst:
msg = 'No packages at {0}: {1}'.format(cache_prefix, inst)
tty.warn(msg)
return
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = 'Encountered problem listing packages at {0}: {1}'.format(
cache_prefix, err)
tty.warn(msg)
return
tty.debug('Retrieving spec descriptor files from {0} to build index'.format(
cache_prefix))
for file_path in file_list:
try:
spec_url = url_util.join(cache_prefix, file_path)
tty.debug('fetching {0}'.format(spec_url))
_, _, spec_file = web_util.read_from_url(spec_url)
spec_file_contents = codecs.getreader('utf-8')(spec_file).read()
# Need full spec.json name or this gets confused with index.json.
if spec_url.endswith('spec.json'):
s = Spec.from_json(spec_file_contents)
elif spec_url.endswith('yaml'):
s = Spec.from_yaml(spec_file_contents)
db.add(s, None)
db.mark(s, 'in_buildcache', True)
except (URLError, web_util.SpackWebError) as url_err:
tty.error('Error reading specfile: {0}'.format(file_path))
tty.error(url_err)
try:
index_json_path = os.path.join(db_root_dir, 'index.json')
with open(index_json_path, 'w') as f:
db._write_to_file(f)
# Read the index back in and compute it's hash
with open(index_json_path) as f:
index_string = f.read()
index_hash = compute_hash(index_string)
# Write the hash out to a local file
index_hash_path = os.path.join(db_root_dir, 'index.json.hash')
with open(index_hash_path, 'w') as f:
f.write(index_hash)
# Push the index itself
web_util.push_to_url(
index_json_path,
url_util.join(cache_prefix, 'index.json'),
keep_original=False,
extra_args={'ContentType': 'application/json'})
# Push the hash
web_util.push_to_url(
index_hash_path,
url_util.join(cache_prefix, 'index.json.hash'),
keep_original=False,
extra_args={'ContentType': 'text/plain'})
except Exception as err:
msg = 'Encountered problem pushing package index to {0}: {1}'.format(
cache_prefix, err)
tty.warn(msg)
finally:
shutil.rmtree(tmpdir)
|
31,394 | def cisco_stealthwatch_list_tenants_command(client: Client,
tenant_id: str = None) -> CommandResults:
"""List all tenants (called domains on the Stealthwatch API)
Args:
client (Client): Cisco Stealthwatch Client
tenant_id (str): The id of the tenant to retrieve its information
Returns:
CommandResults: Raw response, outputs and readable outputs
"""
if tenant_id:
response = client.get_tenant(tenant_id)
outputs = response.get('data', [])
table = tableToMarkdown(f'Tenant (tenant_id: {tenant_id})', outputs,
headers=['id', 'displayName'], removeNull=True)
command_results = CommandResults(
outputs_prefix='CiscoStealthwatch.Tenant',
outputs_key_field='id',
raw_response=response,
outputs=outputs,
readable_output=table
)
else:
response = client.list_tenants()
outputs = []
for tenant in response.get('data', []):
outputs.append(tenant)
table = tableToMarkdown('Tenants:', outputs, headers=['id', 'displayName'], removeNull=True)
command_results = CommandResults(
outputs_prefix='CiscoStealthwatch.Tenant',
outputs_key_field='id',
raw_response=response,
outputs=outputs,
readable_output=table
)
return command_results
| def cisco_stealthwatch_list_tenants_command(client: Client,
tenant_id: str = None) -> CommandResults:
"""List all tenants (called domains on the Stealthwatch API)
Args:
client (Client): Cisco Stealthwatch Client
tenant_id (str): The id of the tenant to retrieve its information
Returns:
CommandResults: Raw response, outputs and readable outputs
"""
if tenant_id:
response = client.get_tenant(tenant_id)
outputs = response.get('data', [])
table = tableToMarkdown(f'Results for tenant {tenant_id}:', outputs,
headers=['id', 'displayName'], removeNull=True)
command_results = CommandResults(
outputs_prefix='CiscoStealthwatch.Tenant',
outputs_key_field='id',
raw_response=response,
outputs=outputs,
readable_output=table
)
else:
response = client.list_tenants()
outputs = []
for tenant in response.get('data', []):
outputs.append(tenant)
table = tableToMarkdown('Tenants:', outputs, headers=['id', 'displayName'], removeNull=True)
command_results = CommandResults(
outputs_prefix='CiscoStealthwatch.Tenant',
outputs_key_field='id',
raw_response=response,
outputs=outputs,
readable_output=table
)
return command_results
|
35,563 | def fingerprint(logcan, sendcan):
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
ecu_rx_addrs = set()
if not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works through OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin, vin_rx_addr = cached_params.carVin, 0
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
# Give some time to let sendcan connect to boardd
time.sleep(0.2)
# Sending a message to the functional addresses solves skipped iso-tp frames
send_functional_tester_present(logcan, sendcan, bus)
_, vin_rx_addr, vin = get_vin(logcan, sendcan, bus)
ecu_rx_addrs = get_present_ecus(logcan, sendcan)
car_fw = get_fw_versions_ordered(logcan, sendcan, ecu_rx_addrs)
exact_fw_match, fw_candidates = match_fw_to_car(car_fw)
else:
vin, vin_rx_addr = VIN_UNKNOWN, 0
exact_fw_match, fw_candidates, car_fw = True, set(), []
if not is_valid_vin(vin):
cloudlog.event("Malformed VIN", vin=vin, error=True)
vin = VIN_UNKNOWN
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_legacy_fingerprint_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 100 # 1s
car_fingerprint = None
done = False
# drain CAN socket so we always get the latest messages
messaging.drain_sock_raw(logcan)
while not done:
a = get_one_can(logcan)
for can in a.can:
# The fingerprint dict is generated for all buses, this way the car interface
# can use it to detect a (valid) multipanda setup and initialize accordingly
if can.src < 128:
if can.src not in finger:
finger[can.src] = {}
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
# Ignore extended messages and VIN query response.
if can.src == b and can.address < 0x800 and can.address not in (0x7df, 0x7e0, 0x7e8):
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
if len(candidate_cars[b]) == 1 and frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
# bail if no cars left or we've been waiting for more than 2s
failed = (all(len(cc) == 0 for cc in candidate_cars.values()) and frame > frame_fingerprint) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
exact_match = True
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
exact_match = exact_fw_match
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.event("fingerprinted", car_fingerprint=car_fingerprint, source=source, fuzzy=not exact_match,
fw_count=len(car_fw), ecu_responses=list(ecu_rx_addrs), vin_rx_addr=vin_rx_addr, error=True)
return car_fingerprint, finger, vin, car_fw, source, exact_match
| def fingerprint(logcan, sendcan):
fixed_fingerprint = os.environ.get('FINGERPRINT', "")
skip_fw_query = os.environ.get('SKIP_FW_QUERY', False)
ecu_rx_addrs = set()
if not fixed_fingerprint and not skip_fw_query:
# Vin query only reliably works through OBDII
bus = 1
cached_params = Params().get("CarParamsCache")
if cached_params is not None:
cached_params = car.CarParams.from_bytes(cached_params)
if cached_params.carName == "mock":
cached_params = None
if cached_params is not None and len(cached_params.carFw) > 0 and cached_params.carVin is not VIN_UNKNOWN:
cloudlog.warning("Using cached CarParams")
vin, vin_rx_addr = cached_params.carVin, 0
car_fw = list(cached_params.carFw)
else:
cloudlog.warning("Getting VIN & FW versions")
# Allow some time to let sendcan connect to boardd
time.sleep(0.2)
# Sending a message to the functional addresses solves skipped iso-tp frames
send_functional_tester_present(logcan, sendcan, bus)
_, vin_rx_addr, vin = get_vin(logcan, sendcan, bus)
ecu_rx_addrs = get_present_ecus(logcan, sendcan)
car_fw = get_fw_versions_ordered(logcan, sendcan, ecu_rx_addrs)
exact_fw_match, fw_candidates = match_fw_to_car(car_fw)
else:
vin, vin_rx_addr = VIN_UNKNOWN, 0
exact_fw_match, fw_candidates, car_fw = True, set(), []
if not is_valid_vin(vin):
cloudlog.event("Malformed VIN", vin=vin, error=True)
vin = VIN_UNKNOWN
cloudlog.warning("VIN %s", vin)
Params().put("CarVin", vin)
finger = gen_empty_fingerprint()
candidate_cars = {i: all_legacy_fingerprint_cars() for i in [0, 1]} # attempt fingerprint on both bus 0 and 1
frame = 0
frame_fingerprint = 100 # 1s
car_fingerprint = None
done = False
# drain CAN socket so we always get the latest messages
messaging.drain_sock_raw(logcan)
while not done:
a = get_one_can(logcan)
for can in a.can:
# The fingerprint dict is generated for all buses, this way the car interface
# can use it to detect a (valid) multipanda setup and initialize accordingly
if can.src < 128:
if can.src not in finger:
finger[can.src] = {}
finger[can.src][can.address] = len(can.dat)
for b in candidate_cars:
# Ignore extended messages and VIN query response.
if can.src == b and can.address < 0x800 and can.address not in (0x7df, 0x7e0, 0x7e8):
candidate_cars[b] = eliminate_incompatible_cars(can, candidate_cars[b])
# if we only have one car choice and the time since we got our first
# message has elapsed, exit
for b in candidate_cars:
if len(candidate_cars[b]) == 1 and frame > frame_fingerprint:
# fingerprint done
car_fingerprint = candidate_cars[b][0]
# bail if no cars left or we've been waiting for more than 2s
failed = (all(len(cc) == 0 for cc in candidate_cars.values()) and frame > frame_fingerprint) or frame > 200
succeeded = car_fingerprint is not None
done = failed or succeeded
frame += 1
exact_match = True
source = car.CarParams.FingerprintSource.can
# If FW query returns exactly 1 candidate, use it
if len(fw_candidates) == 1:
car_fingerprint = list(fw_candidates)[0]
source = car.CarParams.FingerprintSource.fw
exact_match = exact_fw_match
if fixed_fingerprint:
car_fingerprint = fixed_fingerprint
source = car.CarParams.FingerprintSource.fixed
cloudlog.event("fingerprinted", car_fingerprint=car_fingerprint, source=source, fuzzy=not exact_match,
fw_count=len(car_fw), ecu_responses=list(ecu_rx_addrs), vin_rx_addr=vin_rx_addr, error=True)
return car_fingerprint, finger, vin, car_fw, source, exact_match
|
52,348 | def finalize(visit_id: int, webdriver: WebDriver,
extension_socket: clientsocket, sleep: int) -> None:
""" Informs the extension that a visit is done """
tab_restart_browser(webdriver)
# This doesn't seem to be instant
# so we should wait a bit before unsetting the visit_id
time.sleep(sleep)
msg = {"action": "Finalize", "visit_id": visit_id}
extension_socket.send(msg)
| def finalize(visit_id: int, webdriver: WebDriver,
extension_socket: clientsocket, sleep: int) -> None:
""" Informs the extension that a visit is done """
tab_restart_browser(webdriver)
# This doesn't immediately stop data saving from the current
# visit so we sleep briefly before unsetting the visit_id.
# so we should wait a bit before unsetting the visit_id
time.sleep(sleep)
msg = {"action": "Finalize", "visit_id": visit_id}
extension_socket.send(msg)
|
31,878 | def test_dbot_score():
"""
Given:
- response from MalwareBazaar on hash file
When:
- Running a file command
Then:
- creating a CommanResult containing DbotScore and relationship
"""
mock_response = util_load_json('test_data/response_data_dbot_score.json')
result = MalwareBazaar.file_process("094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d",
{}, mock_response)
indicator: CommonServerPython.Common.File = result.indicator
assert indicator.dbot_score.score == 3
| def test_dbot_score():
"""
Given:
- response from MalwareBazaar on hash file
When:
- Running a file command
Then:
- creating a CommanResult containing DbotScore and relationship
"""
mock_response = util_load_json('test_data/response_data_dbot_score.json')
result = MalwareBazaar.file_process("094fd325049b8a9cf6d3e5ef2a6d4cc6a567d7d49c35f8bb8dd9e3c6acf3d78d",
{}, mock_response)
indicator: Common.File = result.indicator
assert indicator.dbot_score.score == 3
|
52,935 | def parse_args(args: Optional[Sequence[str]] = None) -> dict:
"""Parse atomic-reactor CLI arguments.
:param args: iterable of strings to parse as CLI arguments. By default, sys.argv[1:]
:return: parsed arguments as a dict
"""
parser = argparse.ArgumentParser(prog=PROG, description=DESCRIPTION)
_add_global_args(parser)
# Subcommands (there is only one - 'task')
subcommands = parser.add_subparsers(title="subcommands", metavar="subcommand", required=True)
task_parser = subcommands.add_parser(
"task",
help="run a task",
description="Run a specific task in the container build process.",
)
_add_common_task_args(task_parser)
# The individual tasks
tasks = task_parser.add_subparsers(title="tasks", metavar="task", required=True)
orchestrator = tasks.add_parser(
"orchestrator",
help="orchestrate a build",
description="Orchestrate a binary container build.",
)
orchestrator.set_defaults(func=task.orchestrator)
worker = tasks.add_parser(
"worker",
help="run the worker task",
description="Run the worker task for a binary container build.",
)
worker.set_defaults(func=task.worker)
source_container_build = tasks.add_parser(
"source-container-build",
help="build a source container",
description="Build a source container.",
)
source_container_build.set_defaults(func=task.source_container_build)
source_container_exit = tasks.add_parser(
"source-container-exit",
help="Source container build exit task.",
description="Run plugins while exiting a source container build.",
)
source_container_exit.set_defaults(func=task.source_container_exit)
clone = tasks.add_parser(
"clone",
help="Clone source to build",
description="Source is cloned by this command and used by rest of the build tasks",
)
clone.set_defaults(func=task.clone)
binary_container_prebuild = tasks.add_parser(
"binary-container-prebuild",
help="binary container pre-build step",
description="Execute binary container pre-build steps.",
)
binary_container_prebuild.set_defaults(func=task.binary_container_prebuild)
binary_container_build = tasks.add_parser(
"binary-container-build",
help="build a binary container",
description="Build a binary container.",
)
binary_container_build.set_defaults(func=task.binary_container_build)
binary_container_build.add_argument('--platform', action="store",
help="platform on which to build container")
binary_container_postbuild = tasks.add_parser(
"binary-container-postbuild",
help="binary container post-build step",
description="Execute binary container post-build steps.",
)
binary_container_postbuild.set_defaults(func=task.binary_container_postbuild)
binary_container_exit = tasks.add_parser(
"binary-container-exit",
help="exit a binary container build",
description="Execute binary container exit steps.",
)
binary_container_exit.set_defaults(func=task.binary_container_exit)
return vars(parser.parse_args(args))
| def parse_args(args: Optional[Sequence[str]] = None) -> dict:
"""Parse atomic-reactor CLI arguments.
:param args: iterable of strings to parse as CLI arguments. By default, sys.argv[1:]
:return: parsed arguments as a dict
"""
parser = argparse.ArgumentParser(prog=PROG, description=DESCRIPTION)
_add_global_args(parser)
# Subcommands (there is only one - 'task')
subcommands = parser.add_subparsers(title="subcommands", metavar="subcommand", required=True)
task_parser = subcommands.add_parser(
"task",
help="run a task",
description="Run a specific task in the container build process.",
)
_add_common_task_args(task_parser)
# The individual tasks
tasks = task_parser.add_subparsers(title="tasks", metavar="task", required=True)
orchestrator = tasks.add_parser(
"orchestrator",
help="orchestrate a build",
description="Orchestrate a binary container build.",
)
orchestrator.set_defaults(func=task.orchestrator)
worker = tasks.add_parser(
"worker",
help="run the worker task",
description="Run the worker task for a binary container build.",
)
worker.set_defaults(func=task.worker)
source_container_build = tasks.add_parser(
"source-container-build",
help="build a source container",
description="Build a source container.",
)
source_container_build.set_defaults(func=task.source_container_build)
source_container_exit = tasks.add_parser(
"source-container-exit",
help="exit a source container build",
description="Execute source container exit steps.",
)
source_container_exit.set_defaults(func=task.source_container_exit)
clone = tasks.add_parser(
"clone",
help="Clone source to build",
description="Source is cloned by this command and used by rest of the build tasks",
)
clone.set_defaults(func=task.clone)
binary_container_prebuild = tasks.add_parser(
"binary-container-prebuild",
help="binary container pre-build step",
description="Execute binary container pre-build steps.",
)
binary_container_prebuild.set_defaults(func=task.binary_container_prebuild)
binary_container_build = tasks.add_parser(
"binary-container-build",
help="build a binary container",
description="Build a binary container.",
)
binary_container_build.set_defaults(func=task.binary_container_build)
binary_container_build.add_argument('--platform', action="store",
help="platform on which to build container")
binary_container_postbuild = tasks.add_parser(
"binary-container-postbuild",
help="binary container post-build step",
description="Execute binary container post-build steps.",
)
binary_container_postbuild.set_defaults(func=task.binary_container_postbuild)
binary_container_exit = tasks.add_parser(
"binary-container-exit",
help="exit a binary container build",
description="Execute binary container exit steps.",
)
binary_container_exit.set_defaults(func=task.binary_container_exit)
return vars(parser.parse_args(args))
|
47,093 | def is_training_run_on_sagemaker():
return True if "SAGEMAKER_JOB_NAME" in os.environ and os.environ["DISABLE_TELEMETRY"] is False else False
| def is_training_run_on_sagemaker():
return ("SAGEMAKER_JOB_NAME" in os.environ and not DISABLE_TELEMETRY)
|
28,203 | def tprint(string, dt=1, tag='default'):
""" Print progress of a loop every dt seconds. """
ptime = _tprint_times.get(tag, 0)
if (time.time() - ptime) > dt:
print(string)
_tprint_times[tag] = time.time()
| def tprint(string, dt=1, tag='default'):
"""Print progress of a loop every ``dt`` seconds."""
ptime = _tprint_times.get(tag, 0)
if (time.time() - ptime) > dt:
print(string)
_tprint_times[tag] = time.time()
|
7,158 | def autolevel(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Auto-level image using local histogram.
This filter locally stretches the histogram of grey values to cover the
entire range of values from "white" to "black".
Parameters
----------
image : 2-D array (integer, float or boolean)
Input image.
selem : 2-D array (integer, float or boolean)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer, float or boolean)
If None, a new array is allocated.
mask : ndarray (integer, float or boolean)
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import autolevel
>>> img = data.camera()
>>> auto = autolevel(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._autolevel, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
| def autolevel(image, selem, out=None, mask=None, shift_x=False, shift_y=False):
"""Auto-level image using local histogram.
This filter locally stretches the histogram of gray values to cover the
entire range of values from "white" to "black".
Parameters
----------
image : 2-D array (integer, float or boolean)
Input image.
selem : 2-D array (integer, float or boolean)
The neighborhood expressed as a 2-D array of 1's and 0's.
out : 2-D array (integer, float or boolean)
If None, a new array is allocated.
mask : ndarray (integer, float or boolean)
Mask array that defines (>0) area of the image included in the local
neighborhood. If None, the complete image is used (default).
shift_x, shift_y : int
Offset added to the structuring element center point. Shift is bounded
to the structuring element sizes (center must be inside the given
structuring element).
Returns
-------
out : 2-D array (same dtype as input image)
Output image.
Examples
--------
>>> from skimage import data
>>> from skimage.morphology import disk
>>> from skimage.filters.rank import autolevel
>>> img = data.camera()
>>> auto = autolevel(img, disk(5))
"""
return _apply_scalar_per_pixel(generic_cy._autolevel, image, selem,
out=out, mask=mask,
shift_x=shift_x, shift_y=shift_y)
|
38,511 | def vertexes_of_convex_domain(A: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Find the vertexes of a convex domain specified as an intersection of half spaces.
The function assumes the domain is defined by inequalities on the form
A * x + b <= 0
For more information, see scipy.spatial functions HalfspaceIntersection.
The function has been tested for 2d and 3d domains.
Error messages from HalfspaceIntersection are quite possibly from from qhull.
These will often reflect errors in the way the matrix A and b are set up
(e.g. sign errors that imply that the inequalities do not form a closed domain).
Parameters
----------
A : np.ndarray, size num_planes x num_dim
Matrix of normal vectors (in rows) for the half planes. Should be oriented
so that A * x + b < 0
b : np.ndarray, size num_planes
Constants used to define inequalities of the half spaces. Should be scaled
so that A * x + b < 0.
"""
b = b.reshape((-1, 1))
# First, find an interior point of the half space. For this we could have used
# the function half_space_interior_point, but that function is heavily geared
# towards 3d domains, so we prefer the simpler option below.
# Find the point that minimizes the distance from all half planes; this should
# be a point in the middle (somehow defined) of the domain.
fun = lambda x: np.linalg.norm(A.dot(x.reshape((-1, 1))) + b)
# Use scipy optimization to find an interior point to the half space.
interior_point = optimize.minimize(fun, np.zeros(A.shape[1])).x
# Set up constraints on the format that scipy.spatial HalfspaceIntersection
# expects
constraints = np.hstack((A, b))
# Get hold of domain (this will call qhull)
domain = HalfspaceIntersection(constraints, interior_point)
# Return intersections in the expected format (thus the transpose)
return domain.intersections.T
| def vertexes_of_convex_domain(A: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Find the vertexes of a convex domain specified as an intersection of half spaces.
The function assumes the domain is defined by inequalities on the form
A * x + b <= 0
For more information, see scipy.spatial functions HalfspaceIntersection.
The function has been tested for 2d and 3d domains.
Error messages from HalfspaceIntersection are quite possibly from qhull.
These will often reflect errors in the way the matrix A and b are set up
(e.g. sign errors that imply that the inequalities do not form a closed domain).
Parameters
----------
A : np.ndarray, size num_planes x num_dim
Matrix of normal vectors (in rows) for the half planes. Should be oriented
so that A * x + b < 0
b : np.ndarray, size num_planes
Constants used to define inequalities of the half spaces. Should be scaled
so that A * x + b < 0.
"""
b = b.reshape((-1, 1))
# First, find an interior point of the half space. For this we could have used
# the function half_space_interior_point, but that function is heavily geared
# towards 3d domains, so we prefer the simpler option below.
# Find the point that minimizes the distance from all half planes; this should
# be a point in the middle (somehow defined) of the domain.
fun = lambda x: np.linalg.norm(A.dot(x.reshape((-1, 1))) + b)
# Use scipy optimization to find an interior point to the half space.
interior_point = optimize.minimize(fun, np.zeros(A.shape[1])).x
# Set up constraints on the format that scipy.spatial HalfspaceIntersection
# expects
constraints = np.hstack((A, b))
# Get hold of domain (this will call qhull)
domain = HalfspaceIntersection(constraints, interior_point)
# Return intersections in the expected format (thus the transpose)
return domain.intersections.T
|
43,354 | def run_cli(filename, template, rules, regions, override_spec, build_graph, mandatory_rules=None):
"""Process args and run"""
template_obj = Template(filename, template, regions)
customMatches = cfnlint.custom_rules.check_custom_rules('custom_rules.txt', template_obj)
if override_spec:
cfnlint.helpers.override_specs(override_spec)
if build_graph:
template_obj.build_graph()
return customMatches + run_checks(filename, template, rules, regions, mandatory_rules)
| def run_cli(filename, template, rules, regions, override_spec, build_graph, mandatory_rules=None):
"""Process args and run"""
template_obj = Template(filename, template, regions)
custom_matches = cfnlint.custom_rules.check('custom_rules.txt', template_obj)
if override_spec:
cfnlint.helpers.override_specs(override_spec)
if build_graph:
template_obj.build_graph()
return customMatches + run_checks(filename, template, rules, regions, mandatory_rules)
|
766 | def get_fnames(name='small_64D'):
"""Provide filenames of some datasets or other useful parametrisations.
Parameters
----------
name : str
the filename/s of which dataset to return, one of:
'small_64D' small region of interest nifti,bvecs,bvals 64 directions
'small_101D' small region of interest nifti,bvecs,bvals 101 directions
'aniso_vox' volume with anisotropic voxel size as Nifti
'fornix' 300 tracks in Trackvis format (from Pittsburgh
Brain Competition)
'gqi_vectors' the scanner wave vectors needed for a GQI acquisitions
of 101 directions tested on Siemens 3T Trio
'small_25' small ROI (10x8x2) DTI data (b value 2000, 25 directions)
'test_piesno' slice of N=8, K=14 diffusion data
'reg_c' small 2D image used for validating registration
'reg_o' small 2D image used for validation registration
'cb_2' two vectorized cingulum bundles
Returns
-------
fnames : tuple
filenames for dataset
Examples
----------
>>> import numpy as np
>>> from dipy.io.image import load_nifti
>>> from dipy.data import get_fnames
>>> fimg, fbvals, fbvecs = get_fnames('small_101D')
>>> bvals=np.loadtxt(fbvals)
>>> bvecs=np.loadtxt(fbvecs).T
>>> data, affine = load_nifti(fimg)
>>> data.shape == (6, 10, 10, 102)
True
>>> bvals.shape == (102,)
True
>>> bvecs.shape == (102, 3)
True
"""
if name == 'small_64D':
fbvals = pjoin(DATA_DIR, 'small_64D.bval')
fbvecs = pjoin(DATA_DIR, 'small_64D.bvec')
fimg = pjoin(DATA_DIR, 'small_64D.nii')
return fimg, fbvals, fbvecs
if name == '55dir_grad.bvec':
return pjoin(DATA_DIR, '55dir_grad.bvec')
if name == 'small_101D':
fbvals = pjoin(DATA_DIR, 'small_101D.bval')
fbvecs = pjoin(DATA_DIR, 'small_101D.bvec')
fimg = pjoin(DATA_DIR, 'small_101D.nii.gz')
return fimg, fbvals, fbvecs
if name == 'aniso_vox':
return pjoin(DATA_DIR, 'aniso_vox.nii.gz')
if name == 'ascm_test':
return pjoin(DATA_DIR, 'ascm_out_test.nii.gz')
if name == 'fornix':
return pjoin(DATA_DIR, 'tracks300.trk')
if name == 'gqi_vectors':
return pjoin(DATA_DIR, 'ScannerVectors_GQI101.txt')
if name == 'dsi515btable':
return pjoin(DATA_DIR, 'dsi515_b_table.txt')
if name == 'dsi4169btable':
return pjoin(DATA_DIR, 'dsi4169_b_table.txt')
if name == 'grad514':
return pjoin(DATA_DIR, 'grad_514.txt')
if name == "small_25":
fbvals = pjoin(DATA_DIR, 'small_25.bval')
fbvecs = pjoin(DATA_DIR, 'small_25.bvec')
fimg = pjoin(DATA_DIR, 'small_25.nii.gz')
return fimg, fbvals, fbvecs
if name == 'small_25_streamlines':
fstreamlines = pjoin(DATA_DIR, 'EuDX_small_25.trk')
return fstreamlines
if name == "S0_10":
fimg = pjoin(DATA_DIR, 'S0_10slices.nii.gz')
return fimg
if name == "test_piesno":
fimg = pjoin(DATA_DIR, 'test_piesno.nii.gz')
return fimg
if name == "reg_c":
return pjoin(DATA_DIR, 'C.npy')
if name == "reg_o":
return pjoin(DATA_DIR, 'circle.npy')
if name == 'cb_2':
return pjoin(DATA_DIR, 'cb_2.npz')
if name == "t1_coronal_slice":
return pjoin(DATA_DIR, 't1_coronal_slice.npy')
if name == 'scil_b0':
files, folder = fetch_scil_b0()
files = files['datasets_multi-site_all_companies.zip'][2]
files = [pjoin(folder, f) for f in files]
return [f for f in files if os.path.isfile(f)]
if name == 'stanford_hardi':
files, folder = fetch_stanford_hardi()
fraw = pjoin(folder, 'HARDI150.nii.gz')
fbval = pjoin(folder, 'HARDI150.bval')
fbvec = pjoin(folder, 'HARDI150.bvec')
return fraw, fbval, fbvec
if name == 'taiwan_ntu_dsi':
files, folder = fetch_taiwan_ntu_dsi()
fraw = pjoin(folder, 'DSI203.nii.gz')
fbval = pjoin(folder, 'DSI203.bval')
fbvec = pjoin(folder, 'DSI203.bvec')
return fraw, fbval, fbvec
if name == 'sherbrooke_3shell':
files, folder = fetch_sherbrooke_3shell()
fraw = pjoin(folder, 'HARDI193.nii.gz')
fbval = pjoin(folder, 'HARDI193.bval')
fbvec = pjoin(folder, 'HARDI193.bvec')
return fraw, fbval, fbvec
if name == 'isbi2013_2shell':
files, folder = fetch_isbi2013_2shell()
fraw = pjoin(folder, 'phantom64.nii.gz')
fbval = pjoin(folder, 'phantom64.bval')
fbvec = pjoin(folder, 'phantom64.bvec')
return fraw, fbval, fbvec
if name == 'stanford_labels':
files, folder = fetch_stanford_labels()
return pjoin(folder, "aparc-reduced.nii.gz")
if name == 'syn_data':
files, folder = fetch_syn_data()
t1_name = pjoin(folder, 't1.nii.gz')
b0_name = pjoin(folder, 'b0.nii.gz')
return t1_name, b0_name
if name == 'stanford_t1':
files, folder = fetch_stanford_t1()
return pjoin(folder, 't1.nii.gz')
if name == 'stanford_pve_maps':
files, folder = fetch_stanford_pve_maps()
f_pve_csf = pjoin(folder, 'pve_csf.nii.gz')
f_pve_gm = pjoin(folder, 'pve_gm.nii.gz')
f_pve_wm = pjoin(folder, 'pve_wm.nii.gz')
return f_pve_csf, f_pve_gm, f_pve_wm
if name == 'ivim':
files, folder = fetch_ivim()
fraw = pjoin(folder, 'ivim.nii.gz')
fbval = pjoin(folder, 'ivim.bval')
fbvec = pjoin(folder, 'ivim.bvec')
return fraw, fbval, fbvec
if name == 'tissue_data':
files, folder = fetch_tissue_data()
t1_name = pjoin(folder, 'tissue_data', 't1_brain.nii.gz')
t1d_name = pjoin(folder, 'tissue_data', 't1_brain_denoised.nii.gz')
ap_name = pjoin(folder, 'tissue_data', 'power_map.nii.gz')
return t1_name, t1d_name, ap_name
if name == 'cfin_multib':
files, folder = fetch_cfin_multib()
t1_name = pjoin(folder, 'T1.nii')
fraw = pjoin(folder, '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.nii')
fbval = pjoin(folder,
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bval')
fbvec = pjoin(folder,
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bvec')
return fraw, fbval, fbvec, t1_name
if name == 'target_tractrogram_hcp':
files, folder = fetch_target_tractogram_hcp()
return pjoin(folder, 'target_tractogram_hcp', 'hcp_tractogram',
'streamlines.trk')
if name == 'bundle_atlas_hcp842':
files, folder = fetch_bundle_atlas_hcp842()
return get_bundle_atlas_hcp842()
| def get_fnames(name='small_64D'):
"""Provides full paths to example datasets.
Parameters
----------
name : str
the filename/s of which dataset to return, one of:
'small_64D' small region of interest nifti,bvecs,bvals 64 directions
'small_101D' small region of interest nifti,bvecs,bvals 101 directions
'aniso_vox' volume with anisotropic voxel size as Nifti
'fornix' 300 tracks in Trackvis format (from Pittsburgh
Brain Competition)
'gqi_vectors' the scanner wave vectors needed for a GQI acquisitions
of 101 directions tested on Siemens 3T Trio
'small_25' small ROI (10x8x2) DTI data (b value 2000, 25 directions)
'test_piesno' slice of N=8, K=14 diffusion data
'reg_c' small 2D image used for validating registration
'reg_o' small 2D image used for validation registration
'cb_2' two vectorized cingulum bundles
Returns
-------
fnames : tuple
filenames for dataset
Examples
----------
>>> import numpy as np
>>> from dipy.io.image import load_nifti
>>> from dipy.data import get_fnames
>>> fimg, fbvals, fbvecs = get_fnames('small_101D')
>>> bvals=np.loadtxt(fbvals)
>>> bvecs=np.loadtxt(fbvecs).T
>>> data, affine = load_nifti(fimg)
>>> data.shape == (6, 10, 10, 102)
True
>>> bvals.shape == (102,)
True
>>> bvecs.shape == (102, 3)
True
"""
if name == 'small_64D':
fbvals = pjoin(DATA_DIR, 'small_64D.bval')
fbvecs = pjoin(DATA_DIR, 'small_64D.bvec')
fimg = pjoin(DATA_DIR, 'small_64D.nii')
return fimg, fbvals, fbvecs
if name == '55dir_grad.bvec':
return pjoin(DATA_DIR, '55dir_grad.bvec')
if name == 'small_101D':
fbvals = pjoin(DATA_DIR, 'small_101D.bval')
fbvecs = pjoin(DATA_DIR, 'small_101D.bvec')
fimg = pjoin(DATA_DIR, 'small_101D.nii.gz')
return fimg, fbvals, fbvecs
if name == 'aniso_vox':
return pjoin(DATA_DIR, 'aniso_vox.nii.gz')
if name == 'ascm_test':
return pjoin(DATA_DIR, 'ascm_out_test.nii.gz')
if name == 'fornix':
return pjoin(DATA_DIR, 'tracks300.trk')
if name == 'gqi_vectors':
return pjoin(DATA_DIR, 'ScannerVectors_GQI101.txt')
if name == 'dsi515btable':
return pjoin(DATA_DIR, 'dsi515_b_table.txt')
if name == 'dsi4169btable':
return pjoin(DATA_DIR, 'dsi4169_b_table.txt')
if name == 'grad514':
return pjoin(DATA_DIR, 'grad_514.txt')
if name == "small_25":
fbvals = pjoin(DATA_DIR, 'small_25.bval')
fbvecs = pjoin(DATA_DIR, 'small_25.bvec')
fimg = pjoin(DATA_DIR, 'small_25.nii.gz')
return fimg, fbvals, fbvecs
if name == 'small_25_streamlines':
fstreamlines = pjoin(DATA_DIR, 'EuDX_small_25.trk')
return fstreamlines
if name == "S0_10":
fimg = pjoin(DATA_DIR, 'S0_10slices.nii.gz')
return fimg
if name == "test_piesno":
fimg = pjoin(DATA_DIR, 'test_piesno.nii.gz')
return fimg
if name == "reg_c":
return pjoin(DATA_DIR, 'C.npy')
if name == "reg_o":
return pjoin(DATA_DIR, 'circle.npy')
if name == 'cb_2':
return pjoin(DATA_DIR, 'cb_2.npz')
if name == "t1_coronal_slice":
return pjoin(DATA_DIR, 't1_coronal_slice.npy')
if name == 'scil_b0':
files, folder = fetch_scil_b0()
files = files['datasets_multi-site_all_companies.zip'][2]
files = [pjoin(folder, f) for f in files]
return [f for f in files if os.path.isfile(f)]
if name == 'stanford_hardi':
files, folder = fetch_stanford_hardi()
fraw = pjoin(folder, 'HARDI150.nii.gz')
fbval = pjoin(folder, 'HARDI150.bval')
fbvec = pjoin(folder, 'HARDI150.bvec')
return fraw, fbval, fbvec
if name == 'taiwan_ntu_dsi':
files, folder = fetch_taiwan_ntu_dsi()
fraw = pjoin(folder, 'DSI203.nii.gz')
fbval = pjoin(folder, 'DSI203.bval')
fbvec = pjoin(folder, 'DSI203.bvec')
return fraw, fbval, fbvec
if name == 'sherbrooke_3shell':
files, folder = fetch_sherbrooke_3shell()
fraw = pjoin(folder, 'HARDI193.nii.gz')
fbval = pjoin(folder, 'HARDI193.bval')
fbvec = pjoin(folder, 'HARDI193.bvec')
return fraw, fbval, fbvec
if name == 'isbi2013_2shell':
files, folder = fetch_isbi2013_2shell()
fraw = pjoin(folder, 'phantom64.nii.gz')
fbval = pjoin(folder, 'phantom64.bval')
fbvec = pjoin(folder, 'phantom64.bvec')
return fraw, fbval, fbvec
if name == 'stanford_labels':
files, folder = fetch_stanford_labels()
return pjoin(folder, "aparc-reduced.nii.gz")
if name == 'syn_data':
files, folder = fetch_syn_data()
t1_name = pjoin(folder, 't1.nii.gz')
b0_name = pjoin(folder, 'b0.nii.gz')
return t1_name, b0_name
if name == 'stanford_t1':
files, folder = fetch_stanford_t1()
return pjoin(folder, 't1.nii.gz')
if name == 'stanford_pve_maps':
files, folder = fetch_stanford_pve_maps()
f_pve_csf = pjoin(folder, 'pve_csf.nii.gz')
f_pve_gm = pjoin(folder, 'pve_gm.nii.gz')
f_pve_wm = pjoin(folder, 'pve_wm.nii.gz')
return f_pve_csf, f_pve_gm, f_pve_wm
if name == 'ivim':
files, folder = fetch_ivim()
fraw = pjoin(folder, 'ivim.nii.gz')
fbval = pjoin(folder, 'ivim.bval')
fbvec = pjoin(folder, 'ivim.bvec')
return fraw, fbval, fbvec
if name == 'tissue_data':
files, folder = fetch_tissue_data()
t1_name = pjoin(folder, 'tissue_data', 't1_brain.nii.gz')
t1d_name = pjoin(folder, 'tissue_data', 't1_brain_denoised.nii.gz')
ap_name = pjoin(folder, 'tissue_data', 'power_map.nii.gz')
return t1_name, t1d_name, ap_name
if name == 'cfin_multib':
files, folder = fetch_cfin_multib()
t1_name = pjoin(folder, 'T1.nii')
fraw = pjoin(folder, '__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.nii')
fbval = pjoin(folder,
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bval')
fbvec = pjoin(folder,
'__DTI_AX_ep2d_2_5_iso_33d_20141015095334_4.bvec')
return fraw, fbval, fbvec, t1_name
if name == 'target_tractrogram_hcp':
files, folder = fetch_target_tractogram_hcp()
return pjoin(folder, 'target_tractogram_hcp', 'hcp_tractogram',
'streamlines.trk')
if name == 'bundle_atlas_hcp842':
files, folder = fetch_bundle_atlas_hcp842()
return get_bundle_atlas_hcp842()
|
16,076 | def process_turn_on_params(
siren: SirenEntity, params: SirenTurnOnServiceParameters
) -> SirenTurnOnServiceParameters:
"""
Process turn_on service params.
Filters out unsupported params and validates the rest.
"""
supported_features = siren.supported_features or 0
if not supported_features & SUPPORT_TONES:
params.pop(ATTR_TONE, None)
elif (tone := params.get(ATTR_TONE)) is not None:
# Raise an exception if the specified tone isn't available
is_tone_dict_value = bool(
isinstance(siren.available_tones, dict)
and tone in siren.available_tones.values()
)
if not (
siren.available_tones
and (tone in siren.available_tones or is_tone_dict_value)
):
raise ValueError(
f"Invalid tone specified for entity {siren.entity_id}: {tone}, "
"check the available_tones attribute for valid tones to pass in"
)
# If available tones is a dict, and the tone provided is a dict value, we need
# to transform it to the corresponding dict key before returning
if is_tone_dict_value:
assert isinstance(siren.available_tones, dict)
params[ATTR_TONE] = next(
key for key, value in siren.available_tones.items() if value == tone
)
if not supported_features & SUPPORT_DURATION:
params.pop(ATTR_DURATION, None)
if not supported_features & SUPPORT_VOLUME_SET:
params.pop(ATTR_VOLUME_LEVEL, None)
return params
| def process_turn_on_params(
siren: SirenEntity, params: SirenTurnOnServiceParameters
) -> SirenTurnOnServiceParameters:
"""
Process turn_on service params.
Filters out unsupported params and validates the rest.
"""
supported_features = siren.supported_features or 0
if not supported_features & SUPPORT_TONES:
params.pop(ATTR_TONE, None)
elif (tone := params.get(ATTR_TONE)) is not None:
# Raise an exception if the specified tone isn't available
is_tone_dict_value = bool(
isinstance(siren.available_tones, dict)
and tone in siren.available_tones.values()
)
if not (
siren.available_tones
or tone not in siren.available_tones or not is_tone_dict_value
):
raise ValueError(
f"Invalid tone specified for entity {siren.entity_id}: {tone}, "
"check the available_tones attribute for valid tones to pass in"
)
# If available tones is a dict, and the tone provided is a dict value, we need
# to transform it to the corresponding dict key before returning
if is_tone_dict_value:
assert isinstance(siren.available_tones, dict)
params[ATTR_TONE] = next(
key for key, value in siren.available_tones.items() if value == tone
)
if not supported_features & SUPPORT_DURATION:
params.pop(ATTR_DURATION, None)
if not supported_features & SUPPORT_VOLUME_SET:
params.pop(ATTR_VOLUME_LEVEL, None)
return params
|
29,721 | def _running_process_matches(handle):
"""Check whether the current process is same as of handle's
Parameters
----------
handle: ``pyvnml.nvml.LP_struct_c_nvmlDevice_t``
NVML handle to CUDA device
Returns
-------
out: bool
``True`` if device handle's has a CUDA context on the running process,
or ``False`` otherwise.
"""
init_once()
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for proc in running_processes:
if os.getpid() == proc.pid:
return True
return False
| def _running_process_matches(handle):
"""Check whether the current process is same as of handle's
Parameters
----------
handle: ``pyvnml.nvml.LP_struct_c_nvmlDevice_t``
NVML handle to CUDA device
Returns
-------
out: bool
``True`` if device handle's has a CUDA context on the running process,
or ``False`` otherwise.
"""
init_once()
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
return any(os.getpid() == proc.pid for proc in running_processes)
|
49,619 | def auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):
"""Determine automatic chunks
This takes in a chunks value that contains ``"auto"`` values in certain
dimensions and replaces those values with concrete dimension sizes that try
to get chunks to be of a certain size in bytes, provided by the ``limit=``
keyword. If multiple dimensions are marked as ``"auto"`` then they will
all respond to meet the desired byte limit, trying to respect the aspect
ratio of their dimensions in ``previous_chunks=``, if given.
Parameters
----------
chunks: Tuple
A tuple of either dimensions or tuples of explicit chunk dimensions
Some entries should be "auto"
shape: Tuple[int]
limit: int, str
The maximum allowable size of a chunk in bytes
previous_chunks: Tuple[Tuple[int]]
See also
--------
normalize_chunks: for full docstring and parameters
"""
if previous_chunks is not None:
previous_chunks = tuple(
c if isinstance(c, tuple) else (c,) for c in previous_chunks
)
chunks = list(chunks)
autos = {i for i, c in enumerate(chunks) if c == "auto"}
if not autos:
return tuple(chunks)
if limit is None:
limit = config.get("array.chunk-size")
if isinstance(limit, str):
limit = parse_bytes(limit)
if dtype is None:
raise TypeError("dtype must be known for auto-chunking")
if dtype.hasobject:
raise NotImplementedError(
"Can not use auto rechunking with object dtype. "
"We are unable to estimate the size in bytes of object data"
)
for x in tuple(chunks) + tuple(shape):
if (
isinstance(x, Number)
and np.isnan(x)
or isinstance(x, tuple)
and np.isnan(x).any()
):
raise ValueError(
"Can not perform automatic rechunking with unknown "
"(nan) chunk sizes.%s" % unknown_chunk_message
)
limit = max(1, limit)
largest_block = np.prod(
[cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != "auto"]
)
if previous_chunks:
# Base ideal ratio on the median chunk size of the previous chunks
result = {a: np.median(previous_chunks[a]) for a in autos}
ideal_shape = []
for i, s in enumerate(shape):
chunk_frequencies = frequencies(previous_chunks[i])
mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])
if mode > 1 and count >= len(previous_chunks[i]) / 2:
ideal_shape.append(mode)
else:
ideal_shape.append(s)
# How much larger or smaller the ideal chunk size is relative to what we have now
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
last_multiplier = 0
last_autos = set()
while (
multiplier != last_multiplier or autos != last_autos
): # while things change
last_multiplier = multiplier # record previous values
last_autos = set(autos) # record previous values
# Expand or contract each of the dimensions appropriately
for a in sorted(autos):
if ideal_shape[a] == 0:
result[a] = 0
continue
proposed = result[a] * multiplier ** (1 / len(autos))
if proposed > shape[a]: # we've hit the shape boundary
autos.remove(a)
largest_block *= shape[a]
chunks[a] = shape[a]
del result[a]
else:
result[a] = round_to(proposed, ideal_shape[a])
# recompute how much multiplier we have left, repeat
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
for k, v in result.items():
chunks[k] = v
return tuple(chunks)
else:
# Check if dtype.itemsize is greater than 0
assert dtype.itemsize > 0, "dtype.itemsize must be > 0"
size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))
small = [i for i in autos if shape[i] < size]
if small:
for i in small:
chunks[i] = (shape[i],)
return auto_chunks(chunks, shape, limit, dtype)
for i in autos:
chunks[i] = round_to(size, shape[i])
return tuple(chunks)
| def auto_chunks(chunks, shape, limit, dtype, previous_chunks=None):
"""Determine automatic chunks
This takes in a chunks value that contains ``"auto"`` values in certain
dimensions and replaces those values with concrete dimension sizes that try
to get chunks to be of a certain size in bytes, provided by the ``limit=``
keyword. If multiple dimensions are marked as ``"auto"`` then they will
all respond to meet the desired byte limit, trying to respect the aspect
ratio of their dimensions in ``previous_chunks=``, if given.
Parameters
----------
chunks: Tuple
A tuple of either dimensions or tuples of explicit chunk dimensions
Some entries should be "auto"
shape: Tuple[int]
limit: int, str
The maximum allowable size of a chunk in bytes
previous_chunks: Tuple[Tuple[int]]
See also
--------
normalize_chunks: for full docstring and parameters
"""
if previous_chunks is not None:
previous_chunks = tuple(
c if isinstance(c, tuple) else (c,) for c in previous_chunks
)
chunks = list(chunks)
autos = {i for i, c in enumerate(chunks) if c == "auto"}
if not autos:
return tuple(chunks)
if limit is None:
limit = config.get("array.chunk-size")
if isinstance(limit, str):
limit = parse_bytes(limit)
if dtype is None:
raise TypeError("dtype must be known for auto-chunking")
if dtype.hasobject:
raise NotImplementedError(
"Can not use auto rechunking with object dtype. "
"We are unable to estimate the size in bytes of object data"
)
for x in tuple(chunks) + tuple(shape):
if (
isinstance(x, Number)
and np.isnan(x)
or isinstance(x, tuple)
and np.isnan(x).any()
):
raise ValueError(
"Can not perform automatic rechunking with unknown "
"(nan) chunk sizes.%s" % unknown_chunk_message
)
limit = max(1, limit)
largest_block = np.prod(
[cs if isinstance(cs, Number) else max(cs) for cs in chunks if cs != "auto"]
)
if previous_chunks:
# Base ideal ratio on the median chunk size of the previous chunks
result = {a: np.median(previous_chunks[a]) for a in autos}
ideal_shape = []
for i, s in enumerate(shape):
chunk_frequencies = frequencies(previous_chunks[i])
mode, count = max(chunk_frequencies.items(), key=lambda kv: kv[1])
if mode > 1 and count >= len(previous_chunks[i]) / 2:
ideal_shape.append(mode)
else:
ideal_shape.append(s)
# How much larger or smaller the ideal chunk size is relative to what we have now
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
last_multiplier = 0
last_autos = set()
while (
multiplier != last_multiplier or autos != last_autos
): # while things change
last_multiplier = multiplier # record previous values
last_autos = set(autos) # record previous values
# Expand or contract each of the dimensions appropriately
for a in sorted(autos):
if ideal_shape[a] == 0:
result[a] = 0
continue
proposed = result[a] * multiplier ** (1 / len(autos))
if proposed > shape[a]: # we've hit the shape boundary
autos.remove(a)
largest_block *= shape[a]
chunks[a] = shape[a]
del result[a]
else:
result[a] = round_to(proposed, ideal_shape[a])
# recompute how much multiplier we have left, repeat
multiplier = _compute_multiplier(limit, dtype, largest_block, result)
for k, v in result.items():
chunks[k] = v
return tuple(chunks)
else:
# Check if dtype.itemsize is greater than 0
if dtype.itemsize == 0:
raise ValueError(
"auto-chunking with dtype.itemsize == 0 is not supported, please pass in `chunks` explicitly"
)
size = (limit / dtype.itemsize / largest_block) ** (1 / len(autos))
small = [i for i in autos if shape[i] < size]
if small:
for i in small:
chunks[i] = (shape[i],)
return auto_chunks(chunks, shape, limit, dtype)
for i in autos:
chunks[i] = round_to(size, shape[i])
return tuple(chunks)
|
54,063 | def add_random_edge(edge_index, p: float = 0.2, force_undirected: bool = False,
num_nodes: Optional[Union[Tuple[int], int]] = None,
training: bool = True) -> Tuple[Tensor, Tensor]:
r"""Randomly adds edges of ratio :obj:`p` into the existing edges
:obj:`edge_index`.
The method returns (1) the retained :obj:`edge_index`, (2) the added
edge indices.
Args:
edge_index (LongTensor): The edge indices.
p (float, optional): Ratio of added edges to the existing edges.
(default: :obj:`0.2`)
force_undirected (bool, optional): If set to :obj:`True`, will either
drop or keep both edges of an undirected edge.
(default: :obj:`False`)
num_nodes (int, Tuple[int], optional): The overall number of nodes,
*i.e.* :obj:`max_val + 1`, or the number of source and
destination nodes, *i.e.* :obj:`(max_src_val + 1, max_dst_val + 1)`
of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
:rtype: (:class:`LongTensor`, :class:`LongTensor`)
Examples:
>>> # Standard case
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3],
[1, 0, 2, 1, 3, 2, 0, 2, 1]])
>>> added_edges
tensor([[2, 1, 3],
[0, 2, 1]])
>>> # The returned graph is kept undirected
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... force_undirected=True)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3, 0, 2, 1],
[1, 0, 2, 1, 3, 2, 0, 2, 1, 2, 1, 3]])
>>> added_edges
tensor([[2, 1, 3, 0, 2, 1],
[0, 2, 1, 2, 1, 3]])
>>> # For bipartite graphs
>>> edge_index = torch.tensor([[0, 1, 2, 3, 4, 5],
... [2, 3, 1, 4, 2, 1]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... num_nodes=(6, 5))
>>> edge_index
tensor([[0, 1, 2, 3, 4, 5, 3, 4, 1],
[2, 3, 1, 4, 2, 1, 1, 3, 2]])
>>> added_edges
tensor([[3, 4, 1],
[1, 3, 2]])
"""
if p < 0. or p > 1.:
raise ValueError(f'Ratio of added edges has to be between 0 and 1 '
f'(got {p}')
device = edge_index.device
if not training or p == 0.0:
edge_index_to_add = torch.tensor([], device=device).view(2, 0)
return edge_index, edge_index_to_add
num_nodes = (num_nodes,
num_nodes) if not isinstance(num_nodes, tuple) else num_nodes
num_src_nodes = num_nodes[0] or edge_index[0].max().item() + 1
num_dst_nodes = num_nodes[1] or edge_index[1].max().item() + 1
num_edges_to_add = round(edge_index.size(1) * p)
row = torch.randint(0, num_src_nodes, size=(num_edges_to_add, ))
col = torch.randint(0, num_dst_nodes, size=(num_edges_to_add, ))
if force_undirected:
edge_index_to_add = torch.stack(
[torch.cat([row, col], dim=0),
torch.cat([col, row], dim=0)], dim=0).to(device)
else:
edge_index_to_add = torch.stack([row, col], dim=0).to(device)
edge_index = torch.cat([edge_index, edge_index_to_add], dim=1)
return edge_index, edge_index_to_add
| def add_random_edge(edge_index, p: float = 0.2, force_undirected: bool = False,
num_nodes: Optional[Union[Tuple[int], int]] = None,
training: bool = True) -> Tuple[Tensor, Tensor]:
r"""Randomly adds edges of ratio :obj:`p` into the existing edges
:obj:`edge_index`.
The method returns (1) the retained :obj:`edge_index`, (2) the added
edge indices.
Args:
edge_index (LongTensor): The edge indices.
p (float, optional): Ratio of added edges to the existing edges.
(default: :obj:`0.2`)
force_undirected (bool, optional): If set to :obj:`True`, will either
drop or keep both edges of an undirected edge.
(default: :obj:`False`)
num_nodes (int, Tuple[int], optional): The overall number of nodes,
*i.e.* :obj:`max_val + 1`, or the number of source and
destination nodes, *i.e.* :obj:`(max_src_val + 1, max_dst_val + 1)`
of :attr:`edge_index`. (default: :obj:`None`)
training (bool, optional): If set to :obj:`False`, this operation is a
no-op. (default: :obj:`True`)
:rtype: (:class:`LongTensor`, :class:`LongTensor`)
Examples:
>>> # Standard case
>>> edge_index = torch.tensor([[0, 1, 1, 2, 2, 3],
... [1, 0, 2, 1, 3, 2]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3],
[1, 0, 2, 1, 3, 2, 0, 2, 1]])
>>> added_edges
tensor([[2, 1, 3],
[0, 2, 1]])
>>> # The returned graph is kept undirected
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... force_undirected=True)
>>> edge_index
tensor([[0, 1, 1, 2, 2, 3, 2, 1, 3, 0, 2, 1],
[1, 0, 2, 1, 3, 2, 0, 2, 1, 2, 1, 3]])
>>> added_edges
tensor([[2, 1, 3, 0, 2, 1],
[0, 2, 1, 2, 1, 3]])
>>> # For bipartite graphs
>>> edge_index = torch.tensor([[0, 1, 2, 3, 4, 5],
... [2, 3, 1, 4, 2, 1]])
>>> edge_index, added_edges = add_random_edge(edge_index, p=0.5,
... num_nodes=(6, 5))
>>> edge_index
tensor([[0, 1, 2, 3, 4, 5, 3, 4, 1],
[2, 3, 1, 4, 2, 1, 1, 3, 2]])
>>> added_edges
tensor([[3, 4, 1],
[1, 3, 2]])
"""
if p < 0. or p > 1.:
raise ValueError(f'Ratio of added edges has to be between 0 and 1 '
f'(got {p}')
device = edge_index.device
if not training or p == 0.0:
edge_index_to_add = torch.tensor([], device=device).view(2, 0)
return edge_index, edge_index_to_add
num_nodes = (num_nodes,
num_nodes) if not isinstance(num_nodes, tuple) else num_nodes
num_src_nodes = maybe_num_nodes(edge_index, num_nodes[0])
num_dst_nodes = maybe_num_nodes(edge_index, num_nodes[1])
num_edges_to_add = round(edge_index.size(1) * p)
row = torch.randint(0, num_src_nodes, size=(num_edges_to_add, ))
col = torch.randint(0, num_dst_nodes, size=(num_edges_to_add, ))
if force_undirected:
edge_index_to_add = torch.stack(
[torch.cat([row, col], dim=0),
torch.cat([col, row], dim=0)], dim=0).to(device)
else:
edge_index_to_add = torch.stack([row, col], dim=0).to(device)
edge_index = torch.cat([edge_index, edge_index_to_add], dim=1)
return edge_index, edge_index_to_add
|
53,846 | def register_providers():
_register_one_provider("Microsoft.BotService/botServices", "2021-03-01", True)
_register_one_provider("Microsoft.Automation/automationAccounts", "2020-01-13-preview", True)
_register_one_provider('Microsoft.AppConfiguration/configurationStores', '2020-06-01', True)
_register_one_provider("Microsoft.Batch/batchAccounts", "2020-03-01", True)
# "Microsoft.Cache/redisEnterprise", "2021-03-01", True
_register_one_provider('Microsoft.CognitiveServices/accounts', '2021-04-30', True)
_register_one_provider('Microsoft.Compute/diskAccesses', '2020-09-30', True)
_register_one_provider('Microsoft.ContainerRegistry/registries', '2019-12-01-preview', True)
_register_one_provider('Microsoft.DBforMySQL/servers', '2018-06-01', False, '2017-12-01-preview')
_register_one_provider('Microsoft.DBforMariaDB/servers', '2018-06-01', False)
_register_one_provider('Microsoft.DBforPostgreSQL/servers', '2018-06-01', False, '2017-12-01-preview')
_register_one_provider('Microsoft.Devices/IotHubs', '2020-03-01', True)
_register_one_provider('Microsoft.DocumentDB/databaseAccounts', '2019-08-01-preview', False, '2020-03-01')
_register_one_provider('Microsoft.DigitalTwins/digitalTwinsInstances', '2020-12-01', True)
_register_one_provider('Microsoft.EventGrid/topics', '2020-04-01-preview', True)
_register_one_provider('Microsoft.EventGrid/domains', '2020-04-01-preview', True)
_register_one_provider("Microsoft.EventHub/namespaces", "2018-01-01-preview", True)
_register_one_provider("Microsoft.HealthcareApis/services", "2020-03-30", True)
_register_one_provider('microsoft.insights/privateLinkScopes', '2019-10-17-preview', True)
_register_one_provider('Microsoft.KeyVault/managedHSMs', '2021-04-01-preview', True)
_register_one_provider('Microsoft.Keyvault/vaults', '2019-09-01', False)
_register_one_provider("Microsoft.Media/mediaservices", "2020-05-01", True)
# "Microsoft.Migrate/assessmentProjects", "2020-05-01-preview", False
# "Microsoft.Migrate/migrateProjects", "2020-06-01-preview", False
_register_one_provider('Microsoft.Network/applicationGateways', '2020-05-01', True)
# "Microsoft.OffAzure/masterSites", "2020-07-07", False
_register_one_provider("Microsoft.Purview/accounts", "2021-07-01", True)
_register_one_provider('Microsoft.PowerBI/privateLinkServicesForPowerBI', '2020-06-01', False)
_register_one_provider('Microsoft.Search/searchServices', '2020-08-01', True)
_register_one_provider("Microsoft.ServiceBus/namespaces", "2018-01-01-preview", True)
_register_one_provider('Microsoft.SignalRService/signalr', '2020-05-01', False)
_register_one_provider('Microsoft.Sql/servers', '2018-06-01-preview', True)
_register_one_provider('Microsoft.Storage/storageAccounts', '2019-06-01', True)
_register_one_provider("Microsoft.StorageSync/storageSyncServices", "2020-03-01", True)
_register_one_provider("Microsoft.Synapse/workspaces", "2019-06-01-preview", True)
_register_one_provider('Microsoft.Web/sites', '2019-08-01', False)
_register_one_provider("Microsoft.Web/hostingEnvironments", "2020-10-01", True)
| def register_providers():
_register_one_provider("Microsoft.Automation/automationAccounts", "2020-01-13-preview", True)
_register_one_provider('Microsoft.AppConfiguration/configurationStores', '2020-06-01', True)
_register_one_provider("Microsoft.Batch/batchAccounts", "2020-03-01", True)
_register_one_provider("Microsoft.BotService/botServices", "2021-03-01", True)
# "Microsoft.Cache/redisEnterprise", "2021-03-01", True
_register_one_provider('Microsoft.CognitiveServices/accounts', '2021-04-30', True)
_register_one_provider('Microsoft.Compute/diskAccesses', '2020-09-30', True)
_register_one_provider('Microsoft.ContainerRegistry/registries', '2019-12-01-preview', True)
_register_one_provider('Microsoft.DBforMySQL/servers', '2018-06-01', False, '2017-12-01-preview')
_register_one_provider('Microsoft.DBforMariaDB/servers', '2018-06-01', False)
_register_one_provider('Microsoft.DBforPostgreSQL/servers', '2018-06-01', False, '2017-12-01-preview')
_register_one_provider('Microsoft.Devices/IotHubs', '2020-03-01', True)
_register_one_provider('Microsoft.DocumentDB/databaseAccounts', '2019-08-01-preview', False, '2020-03-01')
_register_one_provider('Microsoft.DigitalTwins/digitalTwinsInstances', '2020-12-01', True)
_register_one_provider('Microsoft.EventGrid/topics', '2020-04-01-preview', True)
_register_one_provider('Microsoft.EventGrid/domains', '2020-04-01-preview', True)
_register_one_provider("Microsoft.EventHub/namespaces", "2018-01-01-preview", True)
_register_one_provider("Microsoft.HealthcareApis/services", "2020-03-30", True)
_register_one_provider('microsoft.insights/privateLinkScopes', '2019-10-17-preview', True)
_register_one_provider('Microsoft.KeyVault/managedHSMs', '2021-04-01-preview', True)
_register_one_provider('Microsoft.Keyvault/vaults', '2019-09-01', False)
_register_one_provider("Microsoft.Media/mediaservices", "2020-05-01", True)
# "Microsoft.Migrate/assessmentProjects", "2020-05-01-preview", False
# "Microsoft.Migrate/migrateProjects", "2020-06-01-preview", False
_register_one_provider('Microsoft.Network/applicationGateways', '2020-05-01', True)
# "Microsoft.OffAzure/masterSites", "2020-07-07", False
_register_one_provider("Microsoft.Purview/accounts", "2021-07-01", True)
_register_one_provider('Microsoft.PowerBI/privateLinkServicesForPowerBI', '2020-06-01', False)
_register_one_provider('Microsoft.Search/searchServices', '2020-08-01', True)
_register_one_provider("Microsoft.ServiceBus/namespaces", "2018-01-01-preview", True)
_register_one_provider('Microsoft.SignalRService/signalr', '2020-05-01', False)
_register_one_provider('Microsoft.Sql/servers', '2018-06-01-preview', True)
_register_one_provider('Microsoft.Storage/storageAccounts', '2019-06-01', True)
_register_one_provider("Microsoft.StorageSync/storageSyncServices", "2020-03-01", True)
_register_one_provider("Microsoft.Synapse/workspaces", "2019-06-01-preview", True)
_register_one_provider('Microsoft.Web/sites', '2019-08-01', False)
_register_one_provider("Microsoft.Web/hostingEnvironments", "2020-10-01", True)
|
436 | def choose_best_partition_for_topic(topic, key, shuffle_shards_per_key=5):
"""
Choose the best partition for topic through shuffle-sharding
- Map each key onto :shuffle_shards_per_key: partitions using consistent hashing
- Within those, pick the partition with the shortest backlog
Shuffle-sharding (as proponents put it) has the almost magical effect of simulating
one queue per key, isolating customers to a large extent from each others' workloads.
A :key: value of None is assigned to all partitions, and is thus a way of disabling
shuffle-sharding and going for the shortest of all partitions.
If :shuffle_shards_per_key: or is greater than the number of partitions
then the key is irrelevant and all keys are assigned to all partitions;
this has the same effect as using key=None.
For more on shuffle-sharding see:
https://aws.amazon.com/blogs/architecture/shuffle-sharding-massive-and-magical-fault-isolation/
"""
if topic not in _get_topic_to_pillow_map():
# None means there's no best, use the default
return None
backlog_lengths_by_partition = _get_backlog_lengths_by_partition(topic)
all_partitions = set(backlog_lengths_by_partition.keys())
if not key:
whitelist = all_partitions
else:
# map key to the partitions it's assigned to
whitelist = _n_choose_k_hash(
key=key,
n=max(all_partitions) + 1,
k=shuffle_shards_per_key,
) & all_partitions
_, best_partition = min(
(backlog_length, partition)
for partition, backlog_length in backlog_lengths_by_partition.items()
if partition in whitelist
)
return best_partition
| def choose_best_partition_for_topic(topic, key, shuffle_shards_per_key=5):
"""
Choose the best partition for topic through shuffle-sharding
- Map each key onto :shuffle_shards_per_key: partitions using consistent hashing
- Within those, pick the partition with the shortest backlog
Shuffle-sharding (as proponents put it) has the almost magical effect of simulating
one queue per key, isolating customers to a large extent from each others' workloads.
A :key: value of None is assigned to all partitions, and is thus a way of disabling
shuffle-sharding and going for the shortest of all partitions.
If :shuffle_shards_per_key: is greater than the number of partitions
then the key is irrelevant and all keys are assigned to all partitions;
this has the same effect as using key=None.
For more on shuffle-sharding see:
https://aws.amazon.com/blogs/architecture/shuffle-sharding-massive-and-magical-fault-isolation/
"""
if topic not in _get_topic_to_pillow_map():
# None means there's no best, use the default
return None
backlog_lengths_by_partition = _get_backlog_lengths_by_partition(topic)
all_partitions = set(backlog_lengths_by_partition.keys())
if not key:
whitelist = all_partitions
else:
# map key to the partitions it's assigned to
whitelist = _n_choose_k_hash(
key=key,
n=max(all_partitions) + 1,
k=shuffle_shards_per_key,
) & all_partitions
_, best_partition = min(
(backlog_length, partition)
for partition, backlog_length in backlog_lengths_by_partition.items()
if partition in whitelist
)
return best_partition
|
57,964 | def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
domains = argToList(args.get('domain'))
if len(domains) == 0:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return,
# each CommandResult will contain context standard for Domain
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
# INTEGRATION DEVELOPER TIP
# We want to convert the dates to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# INTEGRATION DEVELOPER TIP
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (Domain) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the sample Design Document
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the Domain Standard Context structure using Common.Domain and
# add dbot_score to it.
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('Domain', domain_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.Domain``, using ``domain`` as the key
# field.
# ``indicator`` is used to provide the context standard (Domain)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
| def domain_reputation_command(client: Client, args: Dict[str, Any], default_threshold: int) -> List[CommandResults]:
"""domain command: Returns domain reputation for a list of domains
:type client: ``Client``
:param Client: HelloWorld client to use
:type args: ``Dict[str, Any]``
:param args:
all command arguments, usually passed from ``demisto.args()``.
``args['domain']`` list of domains or a single domain
``args['threshold']`` threshold to determine whether a domain is malicious
:type default_threshold: ``int``
:param default_threshold:
default threshold to determine whether an domain is malicious
if threshold is not specified in the XSOAR arguments
:return:
A ``CommandResults`` object that is then passed to ``return_results``,
that contains Domains
:rtype: ``CommandResults``
"""
# INTEGRATION DEVELOPER TIP
# Reputation commands usually support multiple inputs (i.e. arrays), so
# they can be invoked once in XSOAR. In this case the API supports a single
# IP at a time, so we will cycle this for all the members of the array.
# We use argToList(), implemented in CommonServerPython.py to automatically
# return a list of a single element even if the provided input is a scalar.
domains = argToList(args.get('domain'))
if not domains:
raise ValueError('domain(s) not specified')
threshold = int(args.get('threshold', default_threshold))
# Initialize an empty list of CommandResults to return,
# each CommandResult will contain context standard for Domain
command_results: List[CommandResults] = []
for domain in domains:
domain_data = client.get_domain_reputation(domain)
domain_data['domain'] = domain
# INTEGRATION DEVELOPER TIP
# We want to convert the dates to ISO8601 as
# Cortex XSOAR customers and integrations use this format by default
if 'creation_date' in domain_data:
domain_data['creation_date'] = parse_domain_date(domain_data['creation_date'])
if 'expiration_date' in domain_data:
domain_data['expiration_date'] = parse_domain_date(domain_data['expiration_date'])
if 'updated_date' in domain_data:
domain_data['updated_date'] = parse_domain_date(domain_data['updated_date'])
# HelloWorld score to XSOAR reputation mapping
# See: https://xsoar.pan.dev/docs/integrations/dbot
# We are using Common.DBotScore as macros to simplify
# the mapping.
score = 0
reputation = int(domain_data.get('score', 0))
if reputation == 0:
score = Common.DBotScore.NONE # unknown
elif reputation >= threshold:
score = Common.DBotScore.BAD # bad
elif reputation >= threshold / 2:
score = Common.DBotScore.SUSPICIOUS # suspicious
else:
score = Common.DBotScore.GOOD # good
# INTEGRATION DEVELOPER TIP
# The context is bigger here than other commands, as it consists in 3
# parts: the vendor-specific context (HelloWorld), the standard-context
# (Domain) and the DBotScore.
# More information:
# https://xsoar.pan.dev/docs/integrations/context-and-outputs
# https://xsoar.pan.dev/docs/integrations/context-standards
# https://xsoar.pan.dev/docs/integrations/dbot
# Also check the sample Design Document
dbot_score = Common.DBotScore(
indicator=domain,
integration_name='HelloWorld',
indicator_type=DBotScoreType.DOMAIN,
score=score,
malicious_description=f'Hello World returned reputation {reputation}'
)
# Create the Domain Standard Context structure using Common.Domain and
# add dbot_score to it.
domain_standard_context = Common.Domain(
domain=domain,
creation_date=domain_data.get('creation_date', None),
expiration_date=domain_data.get('expiration_date', None),
updated_date=domain_data.get('updated_date', None),
organization=domain_data.get('org', None),
name_servers=domain_data.get('name_servers', None),
registrant_name=domain_data.get('name', None),
registrant_country=domain_data.get('country', None),
registrar_name=domain_data.get('registrar', None),
dbot_score=dbot_score
)
# In this case we want to use an custom markdown to specify the table title,
# but otherwise ``CommandResults()`` will call ``tableToMarkdown()``
# automatically
readable_output = tableToMarkdown('Domain', domain_data)
# INTEGRATION DEVELOPER TIP
# The output key will be ``HelloWorld.Domain``, using ``domain`` as the key
# field.
# ``indicator`` is used to provide the context standard (Domain)
command_results.append(CommandResults(
readable_output=readable_output,
outputs_prefix='HelloWorld.Domain',
outputs_key_field='domain',
outputs=domain_data,
indicator=domain_standard_context
))
return command_results
|
41,354 | def format_data(df, **kwargs):
"""Convert a `pd.Dataframe` or `pd.Series` to the required format"""
if isinstance(df, pd.Series):
df = df.to_frame()
# Check for R-style year columns, converting where necessary
def convert_r_columns(c):
try:
first = c[0]
second = c[1:]
if first == 'X':
try:
# bingo! was X2015 R-style, return the integer
return int(second)
except:
# nope, not an int, fall down to final return statement
pass
except:
# not a string/iterable/etc, fall down to final return statement
pass
return c
df.columns = map(convert_r_columns, df.columns)
# if `value` is given but not `variable`,
# melt value columns and use column name as `variable`
if 'value' in kwargs and 'variable' not in kwargs:
value = kwargs.pop('value')
value = value if islistable(value) else [value]
_df = df.set_index(list(set(df.columns) - set(value)))
dfs = []
for v in value:
if v not in df.columns:
raise ValueError('column `{}` does not exist!'.format(v))
vdf = _df[v].to_frame().rename(columns={v: 'value'})
vdf['variable'] = v
dfs.append(vdf.reset_index())
df = pd.concat(dfs).reset_index(drop=True)
# otherwise, rename columns or concat to IAMC-style or do a fill-by-value
for col, value in kwargs.items():
if col in df:
raise ValueError('conflict of kwarg with column `{}` in dataframe!'
.format(col))
if isstr(value) and value in df:
df.rename(columns={value: col}, inplace=True)
elif islistable(value) and all([c in df.columns for c in value]):
df[col] = df.apply(lambda x: concat_with_pipe(x, value), axis=1)
df.drop(value, axis=1, inplace=True)
elif isstr(value):
df[col] = value
else:
raise ValueError('invalid argument for casting `{}: {}`'
.format(col, value))
# all lower case
str_cols = [c for c in df.columns if isstr(c)]
df.rename(columns={c: str(c).lower() for c in str_cols}, inplace=True)
if 'notes' in df.columns: # this came from the database
logger().info('Ignoring notes column in dataframe')
df.drop(columns='notes', inplace=True)
col = df.columns[0] # first column has database copyright notice
df = df[~df[col].str.contains('database', case=False)]
if 'scenario' in df.columns and 'model' not in df.columns:
# model and scenario are jammed together in RCP data
scen = df['scenario']
df['model'] = scen.apply(lambda s: s.split('-')[0].strip())
df['scenario'] = scen.apply(
lambda s: '-'.join(s.split('-')[1:]).strip())
# reset the index if meaningful entries are included there
if not list(df.index.names) == [None]:
df.reset_index(inplace=True)
# format columns to lower-case and check that all required columns exist
if not set(IAMC_IDX).issubset(set(df.columns)):
missing = list(set(IAMC_IDX) - set(df.columns))
raise ValueError("missing required columns `{}`!".format(missing))
# check whether data in wide format (IAMC) or long format (`value` column)
if 'value' in df.columns:
# check if time column is given as `year` (int) or `time` (datetime)
cols = df.columns
if 'year' in cols:
time_col = 'year'
elif 'time' in cols:
time_col = 'time'
else:
msg = 'invalid time format, must have either `year` or `time`!'
raise ValueError(msg)
extra_cols = list(set(cols) - set(IAMC_IDX + [time_col, 'value']))
else:
# if in wide format, check if columns are years (int) or datetime
cols = set(df.columns) - set(IAMC_IDX)
year_cols, time_cols, extra_cols = [], [], []
for i in cols:
try:
int(i) # this is a year
year_cols.append(i)
except (ValueError, TypeError):
try:
dateutil.parser.parse(str(i)) # this is datetime
time_cols.append(i)
except ValueError:
extra_cols.append(i) # some other string
if year_cols and not time_cols:
time_col = 'year'
melt_cols = year_cols
elif not year_cols and time_cols:
time_col = 'time'
melt_cols = time_cols
else:
msg = 'invalid column format, must be either years or `datetime`!'
raise ValueError(msg)
df = pd.melt(df, id_vars=IAMC_IDX + extra_cols, var_name=time_col,
value_vars=sorted(melt_cols), value_name='value')
# cast value columns to numeric, drop NaN's, sort data
df['value'] = df['value'].astype('float64')
df.dropna(inplace=True)
# check for duplicates and return sorted data
idx_cols = IAMC_IDX + [time_col] + extra_cols
if any(df[idx_cols].duplicated()):
raise ValueError('duplicate rows in `data`!')
return sort_data(df, idx_cols), time_col, extra_cols
| def format_data(df, **kwargs):
"""Convert a `pd.Dataframe` or `pd.Series` to the required format"""
if isinstance(df, pd.Series):
df = df.to_frame()
# Check for R-style year columns, converting where necessary
def convert_r_columns(c):
try:
first = c[0]
second = c[1:]
if first == 'X':
try:
# bingo! was X2015 R-style, return the integer
return int(second)
except:
# nope, not an int, fall down to final return statement
pass
except:
# not a string/iterable/etc, fall down to final return statement
pass
return c
df.columns = df.columns.map(convert_r_columns)
# if `value` is given but not `variable`,
# melt value columns and use column name as `variable`
if 'value' in kwargs and 'variable' not in kwargs:
value = kwargs.pop('value')
value = value if islistable(value) else [value]
_df = df.set_index(list(set(df.columns) - set(value)))
dfs = []
for v in value:
if v not in df.columns:
raise ValueError('column `{}` does not exist!'.format(v))
vdf = _df[v].to_frame().rename(columns={v: 'value'})
vdf['variable'] = v
dfs.append(vdf.reset_index())
df = pd.concat(dfs).reset_index(drop=True)
# otherwise, rename columns or concat to IAMC-style or do a fill-by-value
for col, value in kwargs.items():
if col in df:
raise ValueError('conflict of kwarg with column `{}` in dataframe!'
.format(col))
if isstr(value) and value in df:
df.rename(columns={value: col}, inplace=True)
elif islistable(value) and all([c in df.columns for c in value]):
df[col] = df.apply(lambda x: concat_with_pipe(x, value), axis=1)
df.drop(value, axis=1, inplace=True)
elif isstr(value):
df[col] = value
else:
raise ValueError('invalid argument for casting `{}: {}`'
.format(col, value))
# all lower case
str_cols = [c for c in df.columns if isstr(c)]
df.rename(columns={c: str(c).lower() for c in str_cols}, inplace=True)
if 'notes' in df.columns: # this came from the database
logger().info('Ignoring notes column in dataframe')
df.drop(columns='notes', inplace=True)
col = df.columns[0] # first column has database copyright notice
df = df[~df[col].str.contains('database', case=False)]
if 'scenario' in df.columns and 'model' not in df.columns:
# model and scenario are jammed together in RCP data
scen = df['scenario']
df['model'] = scen.apply(lambda s: s.split('-')[0].strip())
df['scenario'] = scen.apply(
lambda s: '-'.join(s.split('-')[1:]).strip())
# reset the index if meaningful entries are included there
if not list(df.index.names) == [None]:
df.reset_index(inplace=True)
# format columns to lower-case and check that all required columns exist
if not set(IAMC_IDX).issubset(set(df.columns)):
missing = list(set(IAMC_IDX) - set(df.columns))
raise ValueError("missing required columns `{}`!".format(missing))
# check whether data in wide format (IAMC) or long format (`value` column)
if 'value' in df.columns:
# check if time column is given as `year` (int) or `time` (datetime)
cols = df.columns
if 'year' in cols:
time_col = 'year'
elif 'time' in cols:
time_col = 'time'
else:
msg = 'invalid time format, must have either `year` or `time`!'
raise ValueError(msg)
extra_cols = list(set(cols) - set(IAMC_IDX + [time_col, 'value']))
else:
# if in wide format, check if columns are years (int) or datetime
cols = set(df.columns) - set(IAMC_IDX)
year_cols, time_cols, extra_cols = [], [], []
for i in cols:
try:
int(i) # this is a year
year_cols.append(i)
except (ValueError, TypeError):
try:
dateutil.parser.parse(str(i)) # this is datetime
time_cols.append(i)
except ValueError:
extra_cols.append(i) # some other string
if year_cols and not time_cols:
time_col = 'year'
melt_cols = year_cols
elif not year_cols and time_cols:
time_col = 'time'
melt_cols = time_cols
else:
msg = 'invalid column format, must be either years or `datetime`!'
raise ValueError(msg)
df = pd.melt(df, id_vars=IAMC_IDX + extra_cols, var_name=time_col,
value_vars=sorted(melt_cols), value_name='value')
# cast value columns to numeric, drop NaN's, sort data
df['value'] = df['value'].astype('float64')
df.dropna(inplace=True)
# check for duplicates and return sorted data
idx_cols = IAMC_IDX + [time_col] + extra_cols
if any(df[idx_cols].duplicated()):
raise ValueError('duplicate rows in `data`!')
return sort_data(df, idx_cols), time_col, extra_cols
|
58,278 | def _create_size_placeholder(name, axis_wo_b, tag, batch_dim):
"""
:param str name:
:param int axis_wo_b:
:param Dim tag:
:param Dim|None batch_dim:
"""
from .basic import reuse_name_scope
with reuse_name_scope("extern_data/placeholders/%s" % name, absolute=True):
if batch_dim is not None and batch_dim.batch is not None:
batch = batch_dim.batch
elif tag.batch is not None:
batch = tag.batch
else:
batch = None
dyn_size_ext = Data(
"%s_dim%i_size" % (name, axis_wo_b), dtype=Data.size_dtype,
dim_tags=[batch_dim] if batch_dim else [], batch=batch)
dyn_size = tf_compat.v1.placeholder(
name=dyn_size_ext.name, dtype=dyn_size_ext.dtype, shape=dyn_size_ext.batch_shape)
dyn_size_ext.placeholder = dyn_size
if dyn_size_ext.batch:
tag.set_dyn_size_ext_for_batch_ctx(
batch=dyn_size_ext.batch, ctx=dyn_size_ext.control_flow_ctx, dyn_size_ext=dyn_size_ext)
else:
tag.dyn_size_ext = dyn_size_ext
if batch_dim and batch_dim.batch:
tag.batch = batch_dim.batch
tag.set_tag_on_size_tensor(dyn_size)
| def _create_size_placeholder(name, axis_wo_b, tag, batch_dim):
"""
:param str name:
:param int axis_wo_b:
:param Dim tag:
:param Dim|None batch_dim:
"""
from .basic import reuse_name_scope
with reuse_name_scope("extern_data/placeholders/%s" % name, absolute=True):
if batch_dim is not None and batch_dim.batch is not None:
batch = batch_dim.batch
elif tag.batch is not None:
batch = tag.batch
else:
batch = None
dyn_size_ext = Data(
"%s_dim%i_size" % (name, axis_wo_b), dtype=Data.size_dtype,
dim_tags=[batch_dim] if batch_dim else [],
batch=batch if batch_dim else None)
dyn_size = tf_compat.v1.placeholder(
name=dyn_size_ext.name, dtype=dyn_size_ext.dtype, shape=dyn_size_ext.batch_shape)
dyn_size_ext.placeholder = dyn_size
if dyn_size_ext.batch:
tag.set_dyn_size_ext_for_batch_ctx(
batch=dyn_size_ext.batch, ctx=dyn_size_ext.control_flow_ctx, dyn_size_ext=dyn_size_ext)
else:
tag.dyn_size_ext = dyn_size_ext
if batch_dim and batch_dim.batch:
tag.batch = batch_dim.batch
tag.set_tag_on_size_tensor(dyn_size)
|
5,012 | def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Vertically align boxes each specified by their ``(height, descent)`` pair.
(For simplicity of the description, the terminology used here assumes a
horizontal layout, but the function works equally for a vertical layout.)
Parameters
----------
hd_list
List of (height, xdescent) of boxes to be aligned.
height : float or None
Intended total height. If None, the maximum of the heights in *hd_list*
is used.
align : {'baseline', 'left', 'top', 'right', 'bottom', 'center'}
The alignment anchor of the boxes.
Returns
-------
height
The total height of the packing (if a value was originally passed in,
it is returned without checking that it is actually large enough).
descent
The descent of the packing.
offsets
The bottom offsets of the boxes.
"""
if height is None:
height = max(h for h, d in hd_list)
_api.check_in_list(
["baseline", "left", "top", "right", "bottom", "center"], align=align)
if align == "baseline":
height_descent = max(h - d for h, d in hd_list)
descent = max(d for h, d in hd_list)
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
return height, descent, offsets
| def _get_aligned_offsets(hd_list, height, align="baseline"):
"""
Align boxes each specified by their ``(height, descent)`` pair.
(For simplicity of the description, the terminology used here assumes a
horizontal layout, but the function works equally for a vertical layout.)
Parameters
----------
hd_list
List of (height, xdescent) of boxes to be aligned.
height : float or None
Intended total height. If None, the maximum of the heights in *hd_list*
is used.
align : {'baseline', 'left', 'top', 'right', 'bottom', 'center'}
The alignment anchor of the boxes.
Returns
-------
height
The total height of the packing (if a value was originally passed in,
it is returned without checking that it is actually large enough).
descent
The descent of the packing.
offsets
The bottom offsets of the boxes.
"""
if height is None:
height = max(h for h, d in hd_list)
_api.check_in_list(
["baseline", "left", "top", "right", "bottom", "center"], align=align)
if align == "baseline":
height_descent = max(h - d for h, d in hd_list)
descent = max(d for h, d in hd_list)
height = height_descent + descent
offsets = [0. for h, d in hd_list]
elif align in ["left", "top"]:
descent = 0.
offsets = [d for h, d in hd_list]
elif align in ["right", "bottom"]:
descent = 0.
offsets = [height - h + d for h, d in hd_list]
elif align == "center":
descent = 0.
offsets = [(height - h) * .5 + d for h, d in hd_list]
return height, descent, offsets
|
41,667 | def make_package(package, version=None):
import yaml
version = ('/' + version) if version else ''
url = f"https://pypi.org/pypi/{package}{version}/json"
with urllib.request.urlopen(url) as fd:
json_content = json.load(fd)
entry = get_sdist_url_entry(json_content)
download_url = entry['url']
sha256 = entry['digests']['sha256']
version = json_content['info']['version']
yaml_content = {
'package': {
'name': package,
'version': version
},
'source': {
'url': download_url,
'sha256': sha256
},
'test': {
'imports': [
package
]
}
}
if not (PACKAGES_ROOT / package).is_dir():
os.makedirs(PACKAGES_ROOT / package)
with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:
yaml.dump(yaml_content, fd, default_flow_style=False)
| def make_package(package, version=None):
import yaml
version = ('/' + version) if version is not None else ''
url = f"https://pypi.org/pypi/{package}{version}/json"
with urllib.request.urlopen(url) as fd:
json_content = json.load(fd)
entry = get_sdist_url_entry(json_content)
download_url = entry['url']
sha256 = entry['digests']['sha256']
version = json_content['info']['version']
yaml_content = {
'package': {
'name': package,
'version': version
},
'source': {
'url': download_url,
'sha256': sha256
},
'test': {
'imports': [
package
]
}
}
if not (PACKAGES_ROOT / package).is_dir():
os.makedirs(PACKAGES_ROOT / package)
with open(PACKAGES_ROOT / package / 'meta.yaml', 'w') as fd:
yaml.dump(yaml_content, fd, default_flow_style=False)
|
10,808 | def _remove_unneeded_phis(phimap):
"""Remove unneeded PHis from the phimap
"""
all_phis = []
for philist in phimap.values():
all_phis.extend(philist)
unneeded_phis = set()
# Find unneeded PHIs.
for phi in all_phis:
ivs = phi.value.incoming_values
# It's unneeded if the incomings are either undefined or
# the PHI node it self
if all(iv is ir.UNDEFINED or iv == phi.target for iv in ivs):
unneeded_phis.add(phi)
# Fix up references to unneeded PHIs
for phi in all_phis:
for unneed in unneeded_phis:
if unneed is not phi:
# If the unneeded PHI is in the current phi's incoming values
if unneed.target in phi.value.incoming_values:
# Replace the unneeded PHI with a UNDEFINED
idx = phi.value.incoming_values.index(unneed.target)
phi.value.incoming_values[idx] = ir.UNDEFINED
# Remove unneeded phis
for philist in phimap.values():
for unneeded in unneeded_phis:
if unneeded in philist:
philist.remove(unneeded)
| def _remove_unneeded_phis(phimap):
"""Remove unneeded PHis from the phimap
"""
all_phis = []
for philist in phimap.values():
all_phis.extend(philist)
unneeded_phis = set()
# Find unneeded PHIs.
for phi in all_phis:
ivs = phi.value.incoming_values
# It's unneeded if the incomings are either undefined or
# the PHI node it self
if all(iv is ir.UNDEFINED or iv == phi.target for iv in ivs):
unneeded_phis.add(phi)
# Fix up references to unneeded PHIs
for phi in all_phis:
for unneed in unneeded_phis:
if unneed is not phi:
# If the unneeded PHI is in the current phi's incoming values
if unneed.target in phi.value.incoming_values:
# Replace the unneeded PHI with an UNDEFINED
idx = phi.value.incoming_values.index(unneed.target)
phi.value.incoming_values[idx] = ir.UNDEFINED
# Remove unneeded phis
for philist in phimap.values():
for unneeded in unneeded_phis:
if unneeded in philist:
philist.remove(unneeded)
|
46,527 | def test_finality_from_genesis_rule_4(state):
test_state = deepcopy(state)
blocks = []
for epoch in range(6):
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
blocks += new_blocks
if epoch == 0:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 1:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 2:
check_finality(test_state, prev_state, True, False, False)
elif epoch >= 3:
# rule 4 of finaliy
check_finality(test_state, prev_state, True, True, True)
assert test_state.finalized_epoch == prev_state.current_justified_epoch
assert test_state.finalized_root == prev_state.current_justified_root
return state, blocks, test_state
| def test_finality_from_genesis_rule_4(state):
test_state = deepcopy(state)
blocks = []
for epoch in range(6):
prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
blocks += new_blocks
if epoch == 0:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 1:
check_finality(test_state, prev_state, False, False, False)
elif epoch == 2:
check_finality(test_state, prev_state, True, False, False)
elif epoch >= 3:
# rule 4 of finality
check_finality(test_state, prev_state, True, True, True)
assert test_state.finalized_epoch == prev_state.current_justified_epoch
assert test_state.finalized_root == prev_state.current_justified_root
return state, blocks, test_state
|
57,944 | def send_mail(args: dict, sg_from_email: str, sg_sender_name: str, sg):
message = Mail() # type: ignore[name-defined]
attach_ids = args.get('AttachIDs')
attach_names = args.get('AttachNames') or ""
if attach_ids:
process_attachments(message, attach_ids, attach_names)
categories = args.get('Categories')
if categories:
categories = categories.split(",")
for category in categories:
message.category = Category(category) # type: ignore[name-defined]
batch_id = args.get('BatchID')
if batch_id:
message.batch_id = BatchId(batch_id) # type: ignore[name-defined]
send_at = args.get('SendAt')
if send_at:
t = dateutil.parser.parse(send_at)
send_time = time.mktime(t.timetuple())
message.send_at = SendAt(int(send_time)) # type: ignore[name-defined]
asm = args.get('Asm')
if asm:
asm = asm if type(asm) is dict else json.loads(asm)
message.asm = Asm(GroupId(asm["group_id"]), GroupsToDisplay(asm["groups_to_display"])) # type: ignore[name-defined]
custom_args = args.get('CustomArgs')
if custom_args:
custom_args = custom_args if type(custom_args) is dict else json.loads(custom_args)
for key in custom_args:
message.custom_arg = CustomArg(key, custom_args[key]) # type: ignore[name-defined]
ip_pool_name = args.get('IPPoolName')
if ip_pool_name:
message.ip_pool_name = IpPoolName(ip_pool_name) # type: ignore[name-defined]
# Mail Tracking settings
tracking_settings = TrackingSettings() # type: ignore[name-defined]
click_tracking = args.get('ClickTracking')
if click_tracking:
click_tracking = click_tracking if type(click_tracking) is dict else json.loads(click_tracking)
is_enable = False if click_tracking["enable"] == 'False' else True
tracking_settings.click_tracking = ClickTracking(is_enable, # type: ignore[name-defined]
click_tracking["enable_text"])
open_tracking = args.get('OpenTracking')
if open_tracking:
open_tracking = open_tracking if type(open_tracking) is dict else json.loads(open_tracking)
is_enable = False if open_tracking["enable"] == 'False' else True
tracking_settings.open_tracking = OpenTracking( # type: ignore[name-defined]
is_enable,
OpenTrackingSubstitutionTag(open_tracking["substitution_tag"])) # type: ignore[name-defined]
sub_tracking = args.get('SubscriptionTracking')
if sub_tracking:
sub_tracking = sub_tracking if type(sub_tracking) is dict else json.loads(sub_tracking)
is_enable = False if sub_tracking["enable"] == 'False' else True
tracking_settings.subscription_tracking = SubscriptionTracking( # type: ignore[name-defined]
is_enable,
SubscriptionText(sub_tracking["text"]), # type: ignore[name-defined]
SubscriptionHtml(sub_tracking["html"]), # type: ignore[name-defined]
SubscriptionSubstitutionTag(sub_tracking["substitution_tag"])) # type: ignore[name-defined]
ganalytics = args.get('GAnalytics')
if ganalytics:
ganalytics = ganalytics if type(ganalytics) is dict else json.loads(ganalytics)
is_enable = False if ganalytics["enable"] == 'False' else True
tracking_settings.ganalytics = Ganalytics( # type: ignore[name-defined]
is_enable,
UtmSource(ganalytics["utm_source"]), # type: ignore[name-defined]
UtmMedium(ganalytics["utm_medium"]), # type: ignore[name-defined]
UtmTerm(ganalytics["utm_term"]), # type: ignore[name-defined]
UtmContent(ganalytics["utm_content"]), # type: ignore[name-defined]
UtmCampaign(ganalytics["utm_campaign"])) # type: ignore[name-defined]
message.tracking_settings = tracking_settings
# Mail Settings
mail_settings = MailSettings() # type: ignore[name-defined]
bcc_mail_set = args.get('BccSettings')
if bcc_mail_set:
bcc_mail_set = bcc_mail_set if type(bcc_mail_set) is dict else json.loads(bcc_mail_set)
is_enable = False if bcc_mail_set["enable"] == 'False' else True
mail_settings.bcc_settings = BccSettings( # type: ignore[name-defined]
is_enable,
BccSettingsEmail(bcc_mail_set["email"])) # type: ignore[name-defined]
footer = args.get('Footer')
if footer:
footer = footer if type(footer) is dict else json.loads(footer)
is_enable = False if footer["enable"] == 'False' else True
mail_settings.footer_settings = FooterSettings( # type: ignore[name-defined]
is_enable,
FooterText(footer["text"]), # type: ignore[name-defined]
FooterHtml(footer["html"])) # type: ignore[name-defined]
spam_check = args.get('SpamCheck')
if spam_check:
spam_check = spam_check if type(spam_check) is dict else json.loads(spam_check)
is_enable = False if spam_check["enable"] == 'False' else True
mail_settings.spam_check = SpamCheck( # type: ignore[name-defined]
is_enable,
SpamThreshold(spam_check["threshold"]), # type: ignore[name-defined]
SpamUrl(spam_check["post_to_url"])) # type: ignore[name-defined]
sandbox_mode = args.get('SandboxMode')
if sandbox_mode:
sandbox_mode = False if sandbox_mode == 'False' else True
mail_settings.sandbox_mode = SandBoxMode(sandbox_mode) # type: ignore[name-defined]
bypass_list_management = args.get('BypassListManagement')
if bypass_list_management:
bypass_list_management = False if bypass_list_management == 'False' else True
mail_settings.bypass_list_management = BypassListManagement(bypass_list_management) # type: ignore[name-defined]
message.mail_settings = mail_settings
headers = args.get('Headers')
if headers:
headers = headers if type(headers) is dict else json.loads(headers)
for key in headers:
message.header = Header(key, headers[key]) # type: ignore[name-defined]
template_id = args.get('TemplateID')
if template_id:
message.template_id = TemplateId(template_id) # type: ignore[name-defined]
subject = args.get('Subject')
message.subject = Subject(subject) # type: ignore[name-defined]
email_body = args.get('HtmlBody')
if email_body:
message.content = Content(MimeType.html, email_body) # type: ignore[name-defined]
raw_body = args.get('RawBody')
if raw_body:
message.content = Content(MimeType.text, raw_body) # type: ignore[name-defined]
reply_to_email = args.get('ReplyTo')
if reply_to_email:
message.reply_to = ReplyTo(reply_to_email, None) # type: ignore[name-defined]
elif reply_to_email == "":
return "Send-email failed: replyTo email is empty, please provide valid email"
message.from_email = From(sg_from_email, sg_sender_name) # type: ignore[name-defined]
to_emails = args.get('ToEmails')
to_emails = to_emails if isinstance(to_emails, list) else to_emails.split(",") # type: ignore[union-attr]
for email in to_emails:
message.to = To(email, None, p=0) # type: ignore[name-defined]
cc_emails = args.get('Cc')
if cc_emails:
cc_emails = cc_emails if isinstance(cc_emails, list) else cc_emails.split(",")
for email in cc_emails:
message.cc = Cc(email, None, p=0) # type: ignore[name-defined]
elif cc_emails == "":
return "Send-email failed: CC list is empty, please provide valid email"
bcc_emails = args.get('Bcc')
if bcc_emails:
bcc_emails = bcc_emails if isinstance(bcc_emails, list) else bcc_emails.split(",")
for email in bcc_emails:
message.bcc = Bcc(email, None, p=0) # type: ignore[name-defined]
elif bcc_emails == "":
return "Send-email failed: BCC list is empty, please provide valid email"
response = sg.send(message)
if response.status_code == 202:
return "Email Sent successfully"
else:
return "Failed to send email " + response.status_code
| def send_mail(args: dict, sg_from_email: str, sg_sender_name: str, sg):
message = Mail() # type: ignore[name-defined]
attach_ids = args.get('AttachIDs')
attach_names = args.get('AttachNames') or ""
if attach_ids:
process_attachments(message, attach_ids, attach_names)
categories = args.get('Categories')
if categories:
categories = categories.split(",")
for category in categories:
message.category = Category(category) # type: ignore[name-defined]
batch_id = args.get('BatchID')
if batch_id:
message.batch_id = BatchId(batch_id) # type: ignore[name-defined]
send_at = args.get('SendAt')
if send_at:
t = dateutil.parser.parse(send_at)
send_time = time.mktime(t.timetuple())
message.send_at = SendAt(int(send_time)) # type: ignore[name-defined]
asm = args.get('Asm')
if asm:
asm = asm if type(asm) is dict else json.loads(asm)
message.asm = Asm(GroupId(asm["group_id"]), GroupsToDisplay(asm["groups_to_display"])) # type: ignore[name-defined]
custom_args = args.get('CustomArgs')
if custom_args:
custom_args = custom_args if type(custom_args) is dict else json.loads(custom_args)
for key in custom_args:
message.custom_arg = CustomArg(key, custom_args[key]) # type: ignore[name-defined]
ip_pool_name = args.get('IPPoolName')
if ip_pool_name:
message.ip_pool_name = IpPoolName(ip_pool_name) # type: ignore[name-defined]
# Mail Tracking settings
tracking_settings = TrackingSettings() # type: ignore[name-defined]
click_tracking = args.get('ClickTracking')
if click_tracking:
click_tracking = click_tracking if type(click_tracking) is dict else json.loads(click_tracking)
is_enable = False if click_tracking["enable"] == 'False' else True
tracking_settings.click_tracking = ClickTracking(is_enable, # type: ignore[name-defined]
click_tracking["enable_text"])
open_tracking = args.get('OpenTracking')
if open_tracking:
open_tracking = open_tracking if type(open_tracking) is dict else json.loads(open_tracking)
is_enable = False if open_tracking["enable"] == 'False' else True
tracking_settings.open_tracking = OpenTracking( # type: ignore[name-defined]
is_enable,
OpenTrackingSubstitutionTag(open_tracking["substitution_tag"])) # type: ignore[name-defined]
sub_tracking = args.get('SubscriptionTracking')
if sub_tracking:
sub_tracking = sub_tracking if type(sub_tracking) is dict else json.loads(sub_tracking)
is_enable = False if sub_tracking["enable"] == 'False' else True
tracking_settings.subscription_tracking = SubscriptionTracking( # type: ignore[name-defined]
is_enable,
SubscriptionText(sub_tracking["text"]), # type: ignore[name-defined]
SubscriptionHtml(sub_tracking["html"]), # type: ignore[name-defined]
SubscriptionSubstitutionTag(sub_tracking["substitution_tag"])) # type: ignore[name-defined]
ganalytics = args.get('GAnalytics')
if ganalytics:
ganalytics = ganalytics if type(ganalytics) is dict else json.loads(ganalytics)
is_enable = False if ganalytics["enable"] == 'False' else True
tracking_settings.ganalytics = Ganalytics( # type: ignore[name-defined]
is_enable,
UtmSource(ganalytics["utm_source"]), # type: ignore[name-defined]
UtmMedium(ganalytics["utm_medium"]), # type: ignore[name-defined]
UtmTerm(ganalytics["utm_term"]), # type: ignore[name-defined]
UtmContent(ganalytics["utm_content"]), # type: ignore[name-defined]
UtmCampaign(ganalytics["utm_campaign"])) # type: ignore[name-defined]
message.tracking_settings = tracking_settings
# Mail Settings
mail_settings = MailSettings() # type: ignore[name-defined]
bcc_mail_set = args.get('BccSettings')
if bcc_mail_set:
bcc_mail_set = bcc_mail_set if type(bcc_mail_set) is dict else json.loads(bcc_mail_set)
is_enable = False if bcc_mail_set["enable"] == 'False' else True
mail_settings.bcc_settings = BccSettings( # type: ignore[name-defined]
is_enable,
BccSettingsEmail(bcc_mail_set["email"])) # type: ignore[name-defined]
footer = args.get('Footer')
if footer:
footer = footer if type(footer) is dict else json.loads(footer)
is_enable = False if footer["enable"] == 'False' else True
mail_settings.footer_settings = FooterSettings( # type: ignore[name-defined]
is_enable,
FooterText(footer["text"]), # type: ignore[name-defined]
FooterHtml(footer["html"])) # type: ignore[name-defined]
spam_check = args.get('SpamCheck')
if spam_check:
spam_check = spam_check if type(spam_check) is dict else json.loads(spam_check)
is_enable = False if spam_check["enable"] == 'False' else True
mail_settings.spam_check = SpamCheck( # type: ignore[name-defined]
is_enable,
SpamThreshold(spam_check["threshold"]), # type: ignore[name-defined]
SpamUrl(spam_check["post_to_url"])) # type: ignore[name-defined]
sandbox_mode = args.get('SandboxMode')
if sandbox_mode:
sandbox_mode = False if sandbox_mode == 'False' else True
mail_settings.sandbox_mode = SandBoxMode(sandbox_mode) # type: ignore[name-defined]
bypass_list_management = args.get('BypassListManagement')
if bypass_list_management:
bypass_list_management = False if bypass_list_management == 'False' else True
mail_settings.bypass_list_management = BypassListManagement(bypass_list_management) # type: ignore[name-defined]
message.mail_settings = mail_settings
headers = args.get('Headers')
if headers:
headers = headers if type(headers) is dict else json.loads(headers)
for key in headers:
message.header = Header(key, headers[key]) # type: ignore[name-defined]
template_id = args.get('TemplateID')
if template_id:
message.template_id = TemplateId(template_id) # type: ignore[name-defined]
subject = args.get('Subject')
message.subject = Subject(subject) # type: ignore[name-defined]
email_body = args.get('HtmlBody')
if email_body:
message.content = Content(MimeType.html, email_body) # type: ignore[name-defined]
raw_body = args.get('RawBody')
if raw_body:
message.content = Content(MimeType.text, raw_body) # type: ignore[name-defined]
reply_to_email = args.get('ReplyTo')
if reply_to_email:
message.reply_to = ReplyTo(reply_to_email, None) # type: ignore[name-defined]
else:
raise DemistoException('ReplyTo email is invalid, please provide a valid email.')
message.from_email = From(sg_from_email, sg_sender_name) # type: ignore[name-defined]
to_emails = args.get('ToEmails')
to_emails = to_emails if isinstance(to_emails, list) else to_emails.split(",") # type: ignore[union-attr]
for email in to_emails:
message.to = To(email, None, p=0) # type: ignore[name-defined]
cc_emails = args.get('Cc')
if cc_emails:
cc_emails = cc_emails if isinstance(cc_emails, list) else cc_emails.split(",")
for email in cc_emails:
message.cc = Cc(email, None, p=0) # type: ignore[name-defined]
elif cc_emails == "":
return "Send-email failed: CC list is empty, please provide valid email"
bcc_emails = args.get('Bcc')
if bcc_emails:
bcc_emails = bcc_emails if isinstance(bcc_emails, list) else bcc_emails.split(",")
for email in bcc_emails:
message.bcc = Bcc(email, None, p=0) # type: ignore[name-defined]
elif bcc_emails == "":
return "Send-email failed: BCC list is empty, please provide valid email"
response = sg.send(message)
if response.status_code == 202:
return "Email Sent successfully"
else:
return "Failed to send email " + response.status_code
|
40,025 | def maximum_weight_independent_set(edges: Iterable[Tuple[Variable, Variable]],
nodes: Optional[Iterable[Tuple[Variable, float]]] = None,
*,
strength: Optional[float] = None,
strength_multiplier: float = 2,
) -> BinaryQuadraticModel:
"""Return a binary quadratic model encoding a maximum-weight independent set problem.
Given a graph `G`, an independent set is a set of nodes such that the
subgraph of `G` induced by these nodes contains no edges.
A maximum-weight independent set is the independent set with the highest
total node weight.
Args:
edges: The edges of the graph as an iterable of two-tuples.
nodes: The nodes of the graph as an iterable of two-tuples where the
first element of the tuple is the node label and the second element
is the node weight. Nodes not specified are given a weight of ``1``.
strength: The strength of the quadratic biases. Must be strictly
greater than ``1`` in order to enforce the independent set
constraint. If not given, the strength is determined by the
``strength_multiplier``.
strength_multiplier: The strength of the quadratic biases is given by
the maximum node weight multiplied by ``strength_multiplier``.
Returns:
A binary quadratic model. The binary quadratic model will have
variables and interactions corresponding to ``nodes`` and ``edges``.
Examples:
>>> from dimod.generators import maximum_weight_independent_set
Get a maximum independent set binary quadratic model from a list of
edges and nodes.
>>> maximum_weight_independent_set([(0, 1)], [(0, .25), (1, .5), (2, 1)])
BinaryQuadraticModel({0: -0.25, 1: -0.5, 2: -1.0}, {(1, 0): 2.0}, 0.0, 'BINARY')
Get a maximum independent set binary quadratic model from a
:class:`networkx.Graph`.
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (1, 2)])
>>> G.add_nodes_from([0, 2], weight=.25)
>>> G.add_node(1, weight=.5)
>>> maximum_weight_independent_set(G.edges, G.nodes('weight'))
BinaryQuadraticModel({0: -0.25, 1: -0.5, 2: -0.25}, {(1, 0): 1.0, (2, 1): 1.0}, 0.0, 'BINARY')
"""
bqm = independent_set(edges)
objective = BinaryQuadraticModel(vartype=Vartype.BINARY)
objective.add_linear_from((v, 1) for v in bqm.variables)
if nodes is None:
max_weight = 1.
else:
for v, weight in nodes:
objective.set_linear(v, weight)
max_weight = objective.linear.max(default=1)
if strength is None:
bqm *= max_weight*strength_multiplier
bqm -= objective
else:
bqm *= strength
bqm -= objective
bqm.offset = 0 # otherwise subtracting the objective gives -0 offset
return bqm
| def maximum_weight_independent_set(edges: Iterable[Tuple[Variable, Variable]],
nodes: Optional[Iterable[Tuple[Variable, float]]] = None,
*,
strength: Optional[float] = None,
strength_multiplier: float = 2,
) -> BinaryQuadraticModel:
"""Return a binary quadratic model encoding a maximum-weight independent set problem.
Given a graph `G`, an independent set is a set of nodes such that the
subgraph of `G` induced by these nodes contains no edges.
A maximum-weight independent set is the independent set with the highest
total node weight.
Args:
edges: The edges of the graph as an iterable of two-tuples.
nodes: The nodes of the graph as an iterable of two-tuples where the
first element of the tuple is the node label and the second element
is the node weight. Nodes not specified are given a weight of ``1``.
strength: The strength of the quadratic biases. Must be strictly
greater than ``1`` in order to enforce the independent set
constraint. If not given, the strength is determined by the
``strength_multiplier``.
strength_multiplier: The strength of the quadratic biases is given by
the maximum node weight multiplied by ``strength_multiplier``.
Returns:
A binary quadratic model. The binary quadratic model will have
variables and interactions corresponding to ``nodes`` and ``edges``.
Examples:
>>> from dimod.generators import maximum_weight_independent_set
Get a maximum-weight independent set binary quadratic model from a list of
edges and nodes.
>>> maximum_weight_independent_set([(0, 1)], [(0, .25), (1, .5), (2, 1)])
BinaryQuadraticModel({0: -0.25, 1: -0.5, 2: -1.0}, {(1, 0): 2.0}, 0.0, 'BINARY')
Get a maximum independent set binary quadratic model from a
:class:`networkx.Graph`.
>>> import networkx as nx
>>> G = nx.Graph()
>>> G.add_edges_from([(0, 1), (1, 2)])
>>> G.add_nodes_from([0, 2], weight=.25)
>>> G.add_node(1, weight=.5)
>>> maximum_weight_independent_set(G.edges, G.nodes('weight'))
BinaryQuadraticModel({0: -0.25, 1: -0.5, 2: -0.25}, {(1, 0): 1.0, (2, 1): 1.0}, 0.0, 'BINARY')
"""
bqm = independent_set(edges)
objective = BinaryQuadraticModel(vartype=Vartype.BINARY)
objective.add_linear_from((v, 1) for v in bqm.variables)
if nodes is None:
max_weight = 1.
else:
for v, weight in nodes:
objective.set_linear(v, weight)
max_weight = objective.linear.max(default=1)
if strength is None:
bqm *= max_weight*strength_multiplier
bqm -= objective
else:
bqm *= strength
bqm -= objective
bqm.offset = 0 # otherwise subtracting the objective gives -0 offset
return bqm
|
40,586 | def update_p_nom_max(n):
# if extendable carriers (solar/onwind/...) have capacity >= 0,
# e.g. existing assets from the OPSD project are included to the network,
# the installed capacity might exceed the expansion limit.
# Hence, we update the assumptions.
n.generators.p_nom_max = (n.generators
.apply(lambda b: b[['p_nom_min','p_nom_max']].max(), axis=1))
| def update_p_nom_max(n):
# if extendable carriers (solar/onwind/...) have capacity >= 0,
# e.g. existing assets from the OPSD project are included to the network,
# the installed capacity might exceed the expansion limit.
# Hence, we update the assumptions.
n.generators.p_nom_max = n.generators[['p_nom_min', 'p_nom_max']].max(1)
|
29,506 | def process_raw_message_batch(
realm_id: int,
raw_messages: List[Dict[str, Any]],
subscriber_map: Dict[int, Set[int]],
user_id_mapper: IdMapper,
user_handler: UserHandler,
get_recipient_id_from_receiver_name: Callable[[str, int], int],
is_pm_data: bool,
output_dir: str,
zerver_realmemoji: List[Dict[str, Any]],
total_reactions: List[Dict[str, Any]],
uploads_list: List[ZerverFieldsT],
zerver_attachment: List[ZerverFieldsT],
mattermost_data_dir: str,
) -> None:
def fix_mentions(content: str, mention_user_ids: Set[int]) -> str:
for user_id in mention_user_ids:
user = user_handler.get_user(user_id=user_id)
mattermost_mention = "@{short_name}".format(**user)
zulip_mention = "@**{full_name}**".format(**user)
content = content.replace(mattermost_mention, zulip_mention)
content = content.replace("@channel", "@**all**")
content = content.replace("@all", "@**all**")
# We don't have an equivalent for Mattermost's @here mention which mentions all users
# online in the channel.
content = content.replace("@here", "@**all**")
return content
mention_map: Dict[int, Set[int]] = {}
zerver_message = []
import html2text
h = html2text.HTML2Text()
pm_members = {}
for raw_message in raw_messages:
message_id = NEXT_ID("message")
mention_user_ids = get_mentioned_user_ids(raw_message, user_id_mapper)
mention_map[message_id] = mention_user_ids
content = fix_mentions(
content=raw_message["content"],
mention_user_ids=mention_user_ids,
)
content = h.handle(content)
if len(content) > 10000: # nocoverage
logging.info("skipping too-long message of length %s", len(content))
continue
date_sent = raw_message["date_sent"]
sender_user_id = raw_message["sender_id"]
if "channel_name" in raw_message:
recipient_id = get_recipient_id_from_receiver_name(
raw_message["channel_name"], Recipient.STREAM
)
elif "huddle_name" in raw_message:
recipient_id = get_recipient_id_from_receiver_name(
raw_message["huddle_name"], Recipient.HUDDLE
)
elif "pm_members" in raw_message:
members = raw_message["pm_members"]
member_ids = {user_id_mapper.get(member) for member in members}
pm_members[message_id] = member_ids
if sender_user_id == user_id_mapper.get(members[0]):
recipient_id = get_recipient_id_from_receiver_name(members[1], Recipient.PERSONAL)
else:
recipient_id = get_recipient_id_from_receiver_name(members[0], Recipient.PERSONAL)
else:
raise AssertionError("raw_message without channel_name, huddle_name or pm_members key")
rendered_content = None
has_attachment = False
has_image = False
has_link = False
if "attachments" in raw_message:
has_attachment = True
has_link = True
file_info = process_message_attachments(
attachments=raw_message["attachments"],
realm_id=realm_id,
message_id=message_id,
user_id=sender_user_id,
user_handler=user_handler,
zerver_attachment=zerver_attachment,
uploads_list=uploads_list,
mattermost_data_dir=mattermost_data_dir,
output_dir=output_dir,
)
content += file_info["content"]
has_image = file_info["has_image"]
topic_name = "imported from mattermost"
message = build_message(
content=content,
message_id=message_id,
date_sent=date_sent,
recipient_id=recipient_id,
rendered_content=rendered_content,
topic_name=topic_name,
user_id=sender_user_id,
has_image=has_image,
has_link=has_link,
has_attachment=has_attachment,
)
zerver_message.append(message)
build_reactions(
realm_id,
total_reactions,
raw_message["reactions"],
message_id,
user_id_mapper,
zerver_realmemoji,
)
zerver_usermessage = make_user_messages(
zerver_message=zerver_message,
subscriber_map=subscriber_map,
is_pm_data=is_pm_data,
mention_map=mention_map,
)
message_json = dict(
zerver_message=zerver_message,
zerver_usermessage=zerver_usermessage,
)
dump_file_id = NEXT_ID("dump_file_id" + str(realm_id))
message_file = f"/messages-{dump_file_id:06}.json"
create_converted_data_files(message_json, output_dir, message_file)
| def process_raw_message_batch(
realm_id: int,
raw_messages: List[Dict[str, Any]],
subscriber_map: Dict[int, Set[int]],
user_id_mapper: IdMapper,
user_handler: UserHandler,
get_recipient_id_from_receiver_name: Callable[[str, int], int],
is_pm_data: bool,
output_dir: str,
zerver_realmemoji: List[Dict[str, Any]],
total_reactions: List[Dict[str, Any]],
uploads_list: List[ZerverFieldsT],
zerver_attachment: List[ZerverFieldsT],
mattermost_data_dir: str,
) -> None:
def fix_mentions(content: str, mention_user_ids: Set[int]) -> str:
for user_id in mention_user_ids:
user = user_handler.get_user(user_id=user_id)
mattermost_mention = "@{short_name}".format(**user)
zulip_mention = "@**{full_name}**".format(**user)
content = content.replace(mattermost_mention, zulip_mention)
content = content.replace("@channel", "@**all**")
content = content.replace("@all", "@**all**")
# We don't have an equivalent for Mattermost's @here mention which mentions all users
# online in the channel.
content = content.replace("@here", "@**all**")
return content
mention_map: Dict[int, Set[int]] = {}
zerver_message = []
import html2text
h = html2text.HTML2Text()
pm_members = {}
for raw_message in raw_messages:
message_id = NEXT_ID("message")
mention_user_ids = get_mentioned_user_ids(raw_message, user_id_mapper)
mention_map[message_id] = mention_user_ids
content = fix_mentions(
content=raw_message["content"],
mention_user_ids=mention_user_ids,
)
content = h.handle(content)
if len(content) > 10000: # nocoverage
logging.info("skipping too-long message of length %s", len(content))
continue
date_sent = raw_message["date_sent"]
sender_user_id = raw_message["sender_id"]
if "channel_name" in raw_message:
recipient_id = get_recipient_id_from_receiver_name(
raw_message["channel_name"], Recipient.STREAM
)
elif "huddle_name" in raw_message:
recipient_id = get_recipient_id_from_receiver_name(
raw_message["huddle_name"], Recipient.HUDDLE
)
elif "pm_members" in raw_message:
members = raw_message["pm_members"]
member_ids = {user_id_mapper.get(member) for member in members}
pm_members[message_id] = member_ids
if sender_user_id == user_id_mapper.get(members[0]):
recipient_id = get_recipient_id_from_receiver_name(members[1], Recipient.PERSONAL)
else:
recipient_id = get_recipient_id_from_receiver_name(members[0], Recipient.PERSONAL)
else:
raise AssertionError("raw_message without channel_name, huddle_name or pm_members key")
rendered_content = None
has_attachment = False
has_image = False
has_link = False
if "attachments" in raw_message:
has_attachment = True
has_link = True
content, has_image = process_message_attachments(
attachments=raw_message["attachments"],
realm_id=realm_id,
message_id=message_id,
user_id=sender_user_id,
user_handler=user_handler,
zerver_attachment=zerver_attachment,
uploads_list=uploads_list,
mattermost_data_dir=mattermost_data_dir,
output_dir=output_dir,
)
content += file_info["content"]
has_image = file_info["has_image"]
topic_name = "imported from mattermost"
message = build_message(
content=content,
message_id=message_id,
date_sent=date_sent,
recipient_id=recipient_id,
rendered_content=rendered_content,
topic_name=topic_name,
user_id=sender_user_id,
has_image=has_image,
has_link=has_link,
has_attachment=has_attachment,
)
zerver_message.append(message)
build_reactions(
realm_id,
total_reactions,
raw_message["reactions"],
message_id,
user_id_mapper,
zerver_realmemoji,
)
zerver_usermessage = make_user_messages(
zerver_message=zerver_message,
subscriber_map=subscriber_map,
is_pm_data=is_pm_data,
mention_map=mention_map,
)
message_json = dict(
zerver_message=zerver_message,
zerver_usermessage=zerver_usermessage,
)
dump_file_id = NEXT_ID("dump_file_id" + str(realm_id))
message_file = f"/messages-{dump_file_id:06}.json"
create_converted_data_files(message_json, output_dir, message_file)
|
8,656 | def get_running_pid(filename):
"""Retrieve the pid number from the given ``filename``.
:param str filename: path to file to read the PID from
:return: the PID number of a sopel instance if running, ``None`` otherwise
:rtype: integer
This function tries to retrieve a PID number from the given ``filename``,
as an integer, and returns ``None`` if the file is not found or if the
content is not an integer.
"""
if not os.path.isfile(filename):
return
with open(filename, 'r') as pid_file:
try:
return int(pid_file.read())
except ValueError:
pass
| def get_running_pid(filename):
"""Retrieve the pid number from the given ``filename``.
:param str filename: path to file to read the PID from
:return: the PID number of a Sopel instance if running, ``None`` otherwise
:rtype: integer
This function tries to retrieve a PID number from the given ``filename``,
as an integer, and returns ``None`` if the file is not found or if the
content is not an integer.
"""
if not os.path.isfile(filename):
return
with open(filename, 'r') as pid_file:
try:
return int(pid_file.read())
except ValueError:
pass
|
39,302 | def vtk_points(points, deep=True):
"""Convert numpy or list of points to a vtkPoints object."""
if not isinstance(points, np.ndarray):
points = np.array(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape((-1, 3))
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. \n'
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = _vtk.vtkPoints()
vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep))
return vtkpts
| def vtk_points(points, deep=True):
"""Convert numpy or list of points to a vtkPoints object."""
if not isinstance(points, np.ndarray):
points = np.array(points)
# verify is numeric
if not np.issubdtype(points.dtype, np.number):
raise TypeError('Points must be a numeric type')
# check dimensionality
if points.ndim == 1:
points = points.reshape(-1, 3)
elif points.ndim > 2:
raise ValueError('Dimension of ``points`` should be 1 or 2, not '
f'{points.ndim}')
# verify shape
if points.shape[1] != 3:
raise ValueError('Points array must contain three values per point. \n'
f'Shape is {points.shape} and should be (X, 3)')
# points must be contiguous
if not points.flags['C_CONTIGUOUS']:
points = np.ascontiguousarray(points)
vtkpts = _vtk.vtkPoints()
vtkpts.SetData(_vtk.numpy_to_vtk(points, deep=deep))
return vtkpts
|
25,581 | def limit_thread_cpu_usage_by_time() -> None:
"""This will enable Gevent's monitoring thread, and if a Greenlet uses the
CPU for longer than `max_blocking_time` it will be killed.
This will result in the whole process being killed, since exceptions are
propagate to the top-level. The goal here is to detect slow functions that
have to be optimized.
"""
gevent.config.monitor_thread = True
gevent.config.max_blocking_time = 10.0
# The monitoring thread will use the trace api just like the TraceSampler
# and the SwitchMonitoring. Sadly there is no API to uninstall the thread,
# but this should not be a problem.
monitor_thread = gevent.get_hub().start_periodic_monitoring_thread()
# This code must not use the tracer from the monitor_thread because calls
# to `did_block_hub` will reset its internal state. If two threads use the
# same underlying tracer false positives will happen, because the swith
# counter will be artifically reset.
greenlet_tracer = GreenletTracer()
def kill_offender(hub: Hub) -> None:
tracer = monitor_thread._greenlet_tracer
if greenlet_tracer.did_block_hub(hub):
active_greenlet = tracer.active_greenlet
hub.loop.run_callback(
lambda: active_greenlet.throw(
RaidenUnrecoverableError(
f"A greenlet used the CPU for longer than "
f"{gevent.config.max_blocking_time} seconds, killing it"
)
)
)
monitor_thread.add_monitoring_function(kill_offender, gevent.config.max_blocking_time)
| def limit_thread_cpu_usage_by_time() -> None:
"""This will enable Gevent's monitoring thread, and if a Greenlet uses the
CPU for longer than `max_blocking_time` it will be killed.
This will result in the whole process being killed, since exceptions are
propagate to the top-level. The goal here is to detect slow functions that
have to be optimized.
"""
gevent.config.monitor_thread = True
gevent.config.max_blocking_time = 10.0
# The monitoring thread will use the trace api just like the TraceSampler
# and the SwitchMonitoring. Sadly there is no API to uninstall the thread,
# but this should not be a problem.
monitor_thread = gevent.get_hub().start_periodic_monitoring_thread()
# This code must not use the tracer from the monitor_thread because calls
# to `did_block_hub` will reset its internal state. If two threads use the
# same underlying tracer false positives will happen, because the switch
# counter will be artifically reset.
greenlet_tracer = GreenletTracer()
def kill_offender(hub: Hub) -> None:
tracer = monitor_thread._greenlet_tracer
if greenlet_tracer.did_block_hub(hub):
active_greenlet = tracer.active_greenlet
hub.loop.run_callback(
lambda: active_greenlet.throw(
RaidenUnrecoverableError(
f"A greenlet used the CPU for longer than "
f"{gevent.config.max_blocking_time} seconds, killing it"
)
)
)
monitor_thread.add_monitoring_function(kill_offender, gevent.config.max_blocking_time)
|
10,492 | def upgrade(m, mode="yes", force=False, default_release=None,
use_apt_get=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, no_remove=False,
allow_unauthenticated=False,
):
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist" or (mode == "full" and use_apt_get):
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade %s" % (autoremove)
elif mode == "full" and not use_apt_get:
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
if use_apt_get:
apt_cmd = APT_GET_CMD
upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
if no_remove:
no_remove = '--no-remove'
else:
no_remove = ''
allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
if apt_cmd is None:
if use_apt_get:
apt_cmd = APT_GET_CMD
else:
m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
"to have APTITUDE in path or use 'force_apt_get=True'")
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
cmd = '%s -y %s %s %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, no_remove, allow_unauthenticated, check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
| def upgrade(m, mode="yes", force=False, default_release=None,
use_apt_get=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False, fail_on_autoremove =False,
allow_unauthenticated=False,
):
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist" or (mode == "full" and use_apt_get):
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade %s" % (autoremove)
elif mode == "full" and not use_apt_get:
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
if use_apt_get:
apt_cmd = APT_GET_CMD
upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
if no_remove:
no_remove = '--no-remove'
else:
no_remove = ''
allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
if apt_cmd is None:
if use_apt_get:
apt_cmd = APT_GET_CMD
else:
m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
"to have APTITUDE in path or use 'force_apt_get=True'")
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
cmd = '%s -y %s %s %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, no_remove, allow_unauthenticated, check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
|
31,853 | def get_ransomware_alerts_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Gets ransomware alerts detected by Cohesity Helios.
:type client: ``Client``
:param Client: cohesity helios client to use.
:type args: ``Dict[str, Any]``
:param args: Dictionary with get ransomware alerts parameters.
Returns command result with the list of fetched ransomware alerts.
"""
start_time_millis = datestring_to_millis(args.get('created_after', ''))
end_time_millis = datestring_to_millis(args.get('created_before', ''))
severity_list = args.get('alert_severity_list', None)
ids_list = args.get('alert_id_list', None)
limit = args.get('limit', MAX_FETCH_DEFAULT)
# Fetch ransomware alerts via client.
resp = client.get_ransomware_alerts(
start_time_millis=start_time_millis,
end_time_millis=end_time_millis, alert_ids=ids_list,
alert_severity_list=severity_list,
max_fetch=limit)
demisto.debug("Got {numAlerts} alerts between {start} and {end}".
format(numAlerts=len(resp), start=start_time_millis, end=end_time_millis))
# Parse alerts for readable output.
ransomware_alerts = []
for alert in resp:
alert_details = get_ransomware_alert_details(alert)
ransomware_alerts.append(alert_details)
return CommandResults(
outputs_prefix='CohesityHelios.RansomwareAlert',
outputs_key_field='alert_id',
outputs=ransomware_alerts,
)
| def get_ransomware_alerts_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Gets ransomware alerts detected by Cohesity Helios.
:type client: ``Client``
:param Client: cohesity helios client to use.
:type args: ``Dict[str, Any]``
:param args: Dictionary with get ransomware alerts parameters.
Returns command result with the list of fetched ransomware alerts.
"""
start_time_millis = datestring_to_millis(args.get('created_after', ''))
end_time_millis = datestring_to_millis(args.get('created_before', ''))
severity_list = args.get('alert_severity_list', None)
ids_list = args.get('alert_id_list', None)
limit = args.get('limit', MAX_FETCH_DEFAULT)
# Fetch ransomware alerts via client.
resp = client.get_ransomware_alerts(
start_time_millis=start_time_millis,
end_time_millis=end_time_millis, alert_ids=ids_list,
alert_severity_list=severity_list,
max_fetch=limit)
demisto.debug(f"Got {len(resp)} alerts between {start_time_millis} and {end_time_millis}.")
# Parse alerts for readable output.
ransomware_alerts = []
for alert in resp:
alert_details = get_ransomware_alert_details(alert)
ransomware_alerts.append(alert_details)
return CommandResults(
outputs_prefix='CohesityHelios.RansomwareAlert',
outputs_key_field='alert_id',
outputs=ransomware_alerts,
)
|
31,453 | def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and this function asserts that.
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if changelog.get(version):
all_rn_versions = []
lowest_version = [LooseVersion('0.0.0')]
for filename in sorted(os.listdir(release_notes_dir)):
_current_version = filename.replace('.md', '')
current_version = _current_version.replace('_', '.')
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
return False
| def is_the_only_rn_in_block(release_notes_dir: str, version: str, changelog: dict):
"""
Check if the given version is a key of an aggregated changelog block, as in its value in the changelog
doesn't contains other release notes that have been aggregated in previous uploads.
If that is the case, the adjacent previous release note in the changelog will be equal to the one in the
release notes directory, and this function asserts that.
Args:
release_notes_dir: the path to the release notes dir.
version (str): the wanted version.
changelog (dict): the changelog from the production bucket.
Returns:
True if this version's value in the changelog is not an aggregated release notes block. False otherwise.
"""
if changelog.get(version):
all_rn_versions = []
lowest_version = [LooseVersion('1.0.0')]
for filename in sorted(os.listdir(release_notes_dir)):
_current_version = filename.replace('.md', '')
current_version = _current_version.replace('_', '.')
all_rn_versions.append(LooseVersion(current_version))
lower_versions_all_versions = [item for item in all_rn_versions if item < version] + lowest_version
lower_versions_in_changelog = [LooseVersion(item) for item in changelog.keys() if
LooseVersion(item) < version] + lowest_version
return max(lower_versions_all_versions) == max(lower_versions_in_changelog)
return False
|
40,765 | def supervised_training_step(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function.
Example::
from ignite.engine import Engine, supervised_training_step
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step(model, optimizer, loss_fn, 'cuda')
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionadded:: 0.5.0
Added Gradient Accumulation.
"""
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y) / gradient_accumulation_steps
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return output_transform(x, y, y_pred, loss)
return update
| def supervised_training_step(
model: torch.nn.Module,
optimizer: torch.optim.Optimizer,
loss_fn: Union[Callable, torch.nn.Module],
device: Optional[Union[str, torch.device]] = None,
non_blocking: bool = False,
prepare_batch: Callable = _prepare_batch,
output_transform: Callable = lambda x, y, y_pred, loss: loss.item(),
gradient_accumulation_steps: int = 1,
) -> Callable:
"""Factory function for supervised training.
Args:
model: the model to train.
optimizer: the optimizer to use.
loss_fn: the loss function to use.
device: device type specification (default: None).
Applies to batches after starting the engine. Model *will not* be moved.
Device can be CPU, GPU.
non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously
with respect to the host. For other cases, this argument has no effect.
prepare_batch: function that receives `batch`, `device`, `non_blocking` and outputs
tuple of tensors `(batch_x, batch_y)`.
output_transform: function that receives 'x', 'y', 'y_pred', 'loss' and returns value
to be assigned to engine's state.output after each iteration. Default is returning `loss.item()`.
gradient_accumulation_steps: Number of steps the gradients should be accumulated across.
(default: 1 (means no gradient accumulation))
Returns:
Callable: update function.
Example::
from ignite.engine import Engine, supervised_training_step
model = ...
optimizer = ...
loss_fn = ...
update_fn = supervised_training_step(model, optimizer, loss_fn, 'cuda')
trainer = Engine(update_fn)
.. versionadded:: 0.4.5
.. versionchanged:: 0.5.0
Added Gradient Accumulation.
"""
def update(engine: Engine, batch: Sequence[torch.Tensor]) -> Union[Any, Tuple[torch.Tensor]]:
model.train()
x, y = prepare_batch(batch, device=device, non_blocking=non_blocking)
y_pred = model(x)
loss = loss_fn(y_pred, y) / gradient_accumulation_steps
loss.backward()
if engine.state.iteration % gradient_accumulation_steps == 0:
optimizer.step()
optimizer.zero_grad()
return output_transform(x, y, y_pred, loss)
return update
|
31,549 | def elasticsearch_builder(proxies):
"""Builds an Elasticsearch obj with the necessary credentials, proxy settings and secure connection."""
connection_args = {
"hosts": [SERVER],
"connection_class": RequestsHttpConnection,
"proxies": proxies,
"verify_certs": INSECURE,
"timeout": TIMEOUT
}
if API_KEY_ID:
connection_args["api_key"] = API_KEY
elif USERNAME:
connection_args["http_auth"] = (USERNAME, PASSWORD)
es = Elasticsearch(**connection_args)
# this should be passed as api_key via Elasticsearch init, but this code ensures it'll be set correctly
if API_KEY_ID and hasattr(es, 'transport'):
es.transport.get_connection().session.headers['authorization'] = get_api_key_header_val(API_KEY)
return es
| def elasticsearch_builder(proxies):
"""Builds an Elasticsearch obj with the necessary credentials, proxy settings and secure connection."""
connection_args = {
"hosts": [SERVER],
"connection_class": RequestsHttpConnection,
"proxies": proxies,
"verify_certs": INSECURE,
"timeout": TIMEOUT,
}
if API_KEY_ID:
connection_args["api_key"] = API_KEY
elif USERNAME:
connection_args["http_auth"] = (USERNAME, PASSWORD)
es = Elasticsearch(**connection_args)
# this should be passed as api_key via Elasticsearch init, but this code ensures it'll be set correctly
if API_KEY_ID and hasattr(es, 'transport'):
es.transport.get_connection().session.headers['authorization'] = get_api_key_header_val(API_KEY)
return es
|
2,372 | def _inclusive_low_high(interval, dtype=float):
"""Generate values low and high to be within the interval range."""
eps = 10 * np.finfo(dtype).eps
if interval.low == -np.inf:
low = -1e10
elif interval.low < 0:
low = interval.low * (1 - eps) + eps
else:
low = interval.low * (1 + eps) + eps
if interval.high == np.inf:
high = 1e10
elif interval.high < 0:
high = interval.high * (1 + eps) - eps
else:
high = interval.high * (1 - eps) - eps
return low, high
| def _inclusive_low_high(interval, dtype=np.float32):
"""Generate values low and high to be within the interval range."""
eps = 10 * np.finfo(dtype).eps
if interval.low == -np.inf:
low = -1e10
elif interval.low < 0:
low = interval.low * (1 - eps) + eps
else:
low = interval.low * (1 + eps) + eps
if interval.high == np.inf:
high = 1e10
elif interval.high < 0:
high = interval.high * (1 + eps) - eps
else:
high = interval.high * (1 - eps) - eps
return low, high
|
34 | def _get_betterworldbooks_thirdparty_metadata(isbn):
if isbn:
url = 'https://www.betterworldbooks.com/product/detail/-%s' % isbn
try:
r = requests.get(url)
results = [{
'url': BWB_AFFILIATE_LINK % (isbn),
'qlt': r[0].lower(),
'price': '$%s (%s)' % (r[1], r[0].lower()),
'price_amt': r[1],
} for r in re.findall('data-condition=\"(New|Used).*data-price=\"([0-9.]+)\"', r.content)]
cheapest = sorted(results, key=lambda r: Decimal(r['price_amt']))[0]
return cheapest
except Exception:
return {}
| def _get_betterworldbooks_thirdparty_metadata(isbn):
if isbn:
url = 'https://www.betterworldbooks.com/product/detail/-%s' % isbn
try:
r = requests.get(url)
results = [{
'url': BWB_AFFILIATE_LINK % (isbn),
'qlt': r[0].lower(),
'price': '$%s (%s)' % (r[1], r[0].lower()),
'price_amt': r[1],
} for r in re.findall('data-condition="(New|Used).*data-price="([0-9.]+)"', r.content)]
cheapest = sorted(results, key=lambda r: Decimal(r['price_amt']))[0]
return cheapest
except Exception:
return {}
|
40,593 | def plot_total_cost_bar(n, ax=None, opts={}):
if ax is None: ax = plt.gca()
total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum()
tech_colors = opts['tech_colors']
def split_costs(n):
costs = aggregate_costs(n).reset_index(level=0, drop=True)
costs_ex = aggregate_costs(n, existing_only=True).reset_index(level=0, drop=True)
return (costs['capital'].add(costs['marginal'], fill_value=0.),
costs_ex['capital'], costs['capital'] - costs_ex['capital'], costs['marginal'])
costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n)
costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore')),
index=['AC-AC', 'AC line', 'onwind', 'offwind-ac',
'offwind-dc', 'solar', 'OCGT','CCGT', 'battery', 'H2']).dropna()
bottom = np.array([0., 0.])
texts = []
for i,ind in enumerate(costs_graph.index):
data = np.asarray(costs_graph.loc[ind])/total_load
ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind],
width=0.7, zorder=-1)
bottom_sub = bottom
bottom = bottom+data
if ind in opts['conv_techs'] + ['AC line']:
for c in [costs_cap_ex, costs_marg]:
if ind in c:
data_sub = np.asarray([c.loc[ind]])/total_load
ax.bar([0.5], data_sub, linewidth=0,
bottom=bottom_sub, color=tech_colors[ind],
width=0.7, zorder=-1, alpha=0.8)
bottom_sub += data_sub
if abs(data[-1]) < 5:
continue
text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind))
texts.append(text)
ax.set_ylabel("Average system cost [Eur/MWh]")
ax.set_ylim([0, opts.get('costs_max', 80)])
ax.set_xlim([0, 1])
ax.set_xticklabels([])
ax.grid(True, axis="y", color='k', linestyle='dotted')
| def plot_total_cost_bar(n, opts, ax=None):
if ax is None: ax = plt.gca()
total_load = (n.snapshot_weightings.generators * n.loads_t.p.sum(axis=1)).sum()
tech_colors = opts['tech_colors']
def split_costs(n):
costs = aggregate_costs(n).reset_index(level=0, drop=True)
costs_ex = aggregate_costs(n, existing_only=True).reset_index(level=0, drop=True)
return (costs['capital'].add(costs['marginal'], fill_value=0.),
costs_ex['capital'], costs['capital'] - costs_ex['capital'], costs['marginal'])
costs, costs_cap_ex, costs_cap_new, costs_marg = split_costs(n)
costs_graph = pd.DataFrame(dict(a=costs.drop('load', errors='ignore')),
index=['AC-AC', 'AC line', 'onwind', 'offwind-ac',
'offwind-dc', 'solar', 'OCGT','CCGT', 'battery', 'H2']).dropna()
bottom = np.array([0., 0.])
texts = []
for i,ind in enumerate(costs_graph.index):
data = np.asarray(costs_graph.loc[ind])/total_load
ax.bar([0.5], data, bottom=bottom, color=tech_colors[ind],
width=0.7, zorder=-1)
bottom_sub = bottom
bottom = bottom+data
if ind in opts['conv_techs'] + ['AC line']:
for c in [costs_cap_ex, costs_marg]:
if ind in c:
data_sub = np.asarray([c.loc[ind]])/total_load
ax.bar([0.5], data_sub, linewidth=0,
bottom=bottom_sub, color=tech_colors[ind],
width=0.7, zorder=-1, alpha=0.8)
bottom_sub += data_sub
if abs(data[-1]) < 5:
continue
text = ax.text(1.1,(bottom-0.5*data)[-1]-3,opts['nice_names'].get(ind,ind))
texts.append(text)
ax.set_ylabel("Average system cost [Eur/MWh]")
ax.set_ylim([0, opts.get('costs_max', 80)])
ax.set_xlim([0, 1])
ax.set_xticklabels([])
ax.grid(True, axis="y", color='k', linestyle='dotted')
|
45,913 | def interp(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:
"""Interpolate ``x`` tensor according to ``xp`` and ``fp`` as in ``np.interp``.
This implementation cannot reproduce numpy results identically, but reasonable.
Code refered to `here <https://github.com/pytorch/pytorch/issues/1552#issuecomment-926972915>`_.
"""
slopes = (fp[1:] - fp[:-1]) / (xp[1:] - xp[:-1])
locs = torch.searchsorted(xp, x)
locs = locs.clip(1, len(xp) - 1) - 1
return slopes[locs] * (x - xp[locs]) + xp[locs]
| def interp(x: torch.Tensor, xp: torch.Tensor, fp: torch.Tensor) -> torch.Tensor:
"""Interpolate ``x`` tensor according to ``xp`` and ``fp`` as in ``np.interp``.
This implementation cannot reproduce numpy results identically, but reasonable.
Code referred to `here <https://github.com/pytorch/pytorch/issues/1552#issuecomment-926972915>`_.
"""
slopes = (fp[1:] - fp[:-1]) / (xp[1:] - xp[:-1])
locs = torch.searchsorted(xp, x)
locs = locs.clip(1, len(xp) - 1) - 1
return slopes[locs] * (x - xp[locs]) + xp[locs]
|
43,890 | def two_qubit_decomposition(U, wires):
r"""Recover the decomposition of a two-qubit matrix :math:`U` in terms of
elementary operations.
The work of `Shende, Markov, and Bullock (2003)
<https://arxiv.org/abs/quant-ph/0308033>`__ presents a fixed-form
decomposition of :math:`U` in terms of single-qubit gates and
CNOTs. Multiple such decompositions are possible (by choosing two of
{``RX``, ``RY``, ``RZ``}). Here we choose the ``RY``, ``RZ`` case (fig. 2 in
the above) to match with the default decomposition of the single-qubit
``Rot`` operations as ``RZ RY RZ``. The form of the decomposition is:
.. figure:: ../../_static/two_qubit_decomposition.svg
:align: center
:width: 100%
:target: javascript:void(0);
where :math:`A, B, C, D` are :math:`SU(2)` gates.
Args:
U (tensor): A 4 x 4 unitary matrix.
wires (Union[Wires, Sequence[int] or int]): The wires on which to apply the operation.
Returns:
list[qml.Operation]: A list of operations that represent the decomposition
of the matrix U.
"""
# First, test if we have a tensor product of two single-qubit operations. If
# so, we don't actually need to do a decomposition. To test this, we can
# check if Edag U E is in SO(4) because of the isomorphism between SO(4) and
# SU(2) x SU(2).
test_so4 = qml.math.linalg.multi_dot([Edag, U, E])
if qml.math.isclose(qml.math.linalg.det(test_so4), 1.0) and qml.math.allclose(
qml.math.dot(test_so4, qml.math.T(test_so4)), qml.math.eye(4)
):
A, B = _su2su2_to_tensor_products(U)
A_ops = zyz_decomposition(A, wires[0])
B_ops = zyz_decomposition(B, wires[1])
return A_ops + B_ops
# The final form of this decomposition is U = (A \otimes B) V (C \otimes D),
# as expressed in the circuit below.
# -U- = -C--X--RZ(d)--C---------X--A-|
# -U- = -D--C--RY(b)--X--RY(a)--C--B-|
# First, we note that this method works only for SU(4) gates, meaning that
# we need to rescale the matrix by its determinant. Furthermore, we add a
# SWAP as per v1 of 0308033, which helps with some rearranging of gates in
# the decomposition (it will cancel out the fact that we need to add a SWAP
# to fix the determinant in another part later).
swap_U = qml.math.exp(1j * np.pi / 4) * qml.math.dot(SWAP, _convert_to_su4(U))
# Next, we can choose the angles of the RZ / RY rotations. See the docstring
# within the function used below. This is to ensure U and V somehow maintain
# a relationship between their spectra to ensure we can recover A, B, C, D.
alpha, beta, delta = _select_rotation_angles(swap_U)
# This is the interior portion of the decomposition circuit
interior_decomp = [
qml.CNOT(wires=[wires[1], wires[0]]),
qml.RZ(delta, wires=wires[0]),
qml.RY(beta, wires=wires[1]),
qml.CNOT(wires=[wires[0], wires[1]]),
qml.RY(alpha, wires=wires[1]),
qml.CNOT(wires=[wires[1], wires[0]]),
]
# We need the matrix representation of this interior part, V, in order to
# decompose U = (A \otimes B) V (C \otimes D)
#
# Looking at the decomposition above, V has determinant -1 (because there
# are 3 CNOTs, each with determinant -1). The relationship between U and V
# requires that both are in SU(4), so we add a SWAP after to V. We will see
# how this gets fixed later.
#
# -V- = -X--RZ(d)--C---------X--SWAP-|
# -V- = -C--RY(b)--X--RY(a)--C--SWAP-|
RZd = qml.RZ(delta, wires=0).matrix
RYb = qml.RY(beta, wires=0).matrix
RYa = qml.RY(alpha, wires=0).matrix
V = qml.math.linalg.multi_dot(
[
SWAP,
CNOT10,
qml.math.kron(qml.math.eye(2), RYa),
CNOT01,
qml.math.kron(RZd, RYb),
CNOT10,
]
)
# Now we need to find the four SU(2) operations A, B, C, D
A, B, C, D = _extract_su2su2_prefactors(swap_U, V)
# At this point, we have the following:
# -U-SWAP- = --C--X-RZ(d)-C-------X-SWAP--A|
# -U-SWAP- = --D--C-RZ(b)-X-RY(a)-C-SWAP--B|
#
# Using the relationship that SWAP(A \otimes B) SWAP = B \otimes A,
# -U-SWAP- = --C--X-RZ(d)-C-------X--B--SWAP-|
# -U-SWAP- = --D--C-RZ(b)-X-RY(a)-C--A--SWAP-|
#
# Now the SWAPs cancel, giving us the desired decomposition
# (up to a global phase).
# -U- = --C--X-RZ(d)-C-------X--B--|
# -U- = --D--C-RZ(b)-X-RY(a)-C--A--|
A_ops = zyz_decomposition(A, wires[1])
B_ops = zyz_decomposition(B, wires[0])
C_ops = zyz_decomposition(C, wires[0])
D_ops = zyz_decomposition(D, wires[1])
# Return the full decomposition
return C_ops + D_ops + interior_decomp + A_ops + B_ops
| def two_qubit_decomposition(U, wires):
r"""Recover the decomposition of a two-qubit matrix :math:`U` in terms of
elementary operations.
The work of `Shende, Markov, and Bullock (2003)
<https://arxiv.org/abs/quant-ph/0308033>`__ presents a fixed-form
decomposition of :math:`U` in terms of single-qubit gates and
CNOTs. Multiple such decompositions are possible (by choosing two of
{``RX``, ``RY``, ``RZ``}). Here we choose the ``RY``, ``RZ`` case (fig. 2 in
the above) to match with the default decomposition of the single-qubit
``Rot`` operations as ``RZ RY RZ``. The form of the decomposition is:
.. figure:: ../../_static/two_qubit_decomposition.svg
:align: center
:width: 100%
:target: javascript:void(0);
where :math:`A, B, C, D` are :math:`SU(2)` gates.
Args:
U (tensor): A 4 x 4 unitary matrix.
wires (Union[Wires, Sequence[int] or int]): The wires on which to apply the operation.
Returns:
list[qml.Operation]: A list of operations that represent the decomposition
of the matrix U.
"""
# First, test if we have a tensor product of two single-qubit operations. If
# so, we don't actually need to do a decomposition. To test this, we can
# check if Edag U E is in SO(4) because of the isomorphism between SO(4) and
# SU(2) x SU(2).
test_so4 = qml.math.linalg.multi_dot([Edag, U, E])
if qml.math.isclose(qml.math.linalg.det(test_so4), 1.0) and qml.math.allclose(
qml.math.dot(test_so4, qml.math.T(test_so4)), qml.math.eye(4)
):
A, B = _su2su2_to_tensor_products(U)
A_ops = zyz_decomposition(A, wires[0])
B_ops = zyz_decomposition(B, wires[1])
return A_ops + B_ops
# The final form of this decomposition is U = (A \otimes B) V (C \otimes D),
# as expressed in the circuit below.
# -U- = -C--X--RZ(d)--C---------X--A-|
# -U- = -D--C--RY(b)--X--RY(a)--C--B-|
# First, we note that this method works only for SU(4) gates, meaning that
# we need to rescale the matrix by its determinant. Furthermore, we add a
# SWAP as per v1 of 0308033, which helps with some rearranging of gates in
# the decomposition (it will cancel out the fact that we need to add a SWAP
# to fix the determinant in another part later).
swap_U = np.exp(1j * np.pi / 4) * qml.math.dot(SWAP, _convert_to_su4(U))
# Next, we can choose the angles of the RZ / RY rotations. See the docstring
# within the function used below. This is to ensure U and V somehow maintain
# a relationship between their spectra to ensure we can recover A, B, C, D.
alpha, beta, delta = _select_rotation_angles(swap_U)
# This is the interior portion of the decomposition circuit
interior_decomp = [
qml.CNOT(wires=[wires[1], wires[0]]),
qml.RZ(delta, wires=wires[0]),
qml.RY(beta, wires=wires[1]),
qml.CNOT(wires=[wires[0], wires[1]]),
qml.RY(alpha, wires=wires[1]),
qml.CNOT(wires=[wires[1], wires[0]]),
]
# We need the matrix representation of this interior part, V, in order to
# decompose U = (A \otimes B) V (C \otimes D)
#
# Looking at the decomposition above, V has determinant -1 (because there
# are 3 CNOTs, each with determinant -1). The relationship between U and V
# requires that both are in SU(4), so we add a SWAP after to V. We will see
# how this gets fixed later.
#
# -V- = -X--RZ(d)--C---------X--SWAP-|
# -V- = -C--RY(b)--X--RY(a)--C--SWAP-|
RZd = qml.RZ(delta, wires=0).matrix
RYb = qml.RY(beta, wires=0).matrix
RYa = qml.RY(alpha, wires=0).matrix
V = qml.math.linalg.multi_dot(
[
SWAP,
CNOT10,
qml.math.kron(qml.math.eye(2), RYa),
CNOT01,
qml.math.kron(RZd, RYb),
CNOT10,
]
)
# Now we need to find the four SU(2) operations A, B, C, D
A, B, C, D = _extract_su2su2_prefactors(swap_U, V)
# At this point, we have the following:
# -U-SWAP- = --C--X-RZ(d)-C-------X-SWAP--A|
# -U-SWAP- = --D--C-RZ(b)-X-RY(a)-C-SWAP--B|
#
# Using the relationship that SWAP(A \otimes B) SWAP = B \otimes A,
# -U-SWAP- = --C--X-RZ(d)-C-------X--B--SWAP-|
# -U-SWAP- = --D--C-RZ(b)-X-RY(a)-C--A--SWAP-|
#
# Now the SWAPs cancel, giving us the desired decomposition
# (up to a global phase).
# -U- = --C--X-RZ(d)-C-------X--B--|
# -U- = --D--C-RZ(b)-X-RY(a)-C--A--|
A_ops = zyz_decomposition(A, wires[1])
B_ops = zyz_decomposition(B, wires[0])
C_ops = zyz_decomposition(C, wires[0])
D_ops = zyz_decomposition(D, wires[1])
# Return the full decomposition
return C_ops + D_ops + interior_decomp + A_ops + B_ops
|
17,502 | def open_dataset(
filename_or_obj: str | os.PathLike,
*,
engine: T_Engine = None,
chunks: T_Chunks = None,
cache: bool | None = None,
decode_cf: bool | None = None,
mask_and_scale: bool | None = None,
decode_times: bool | None = None,
decode_timedelta: bool | None = None,
use_cftime: bool | None = None,
concat_characters: bool | None = None,
decode_coords: Literal["coordinates", "all"] | bool | None = None,
drop_variables: str | Iterable[str] | None = None,
inline_array: bool = False,
backend_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> Dataset:
"""Open and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
"pseudonetcdf", "zarr", None}, installed backenend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
can also be used.
chunks : int, dict, 'auto' or None, optional
If chunks is provided, it is used to load the new dataset into dask
arrays. ``chunks=-1`` loads the dataset with dask using a single
chunk for all arrays. ``chunks={}`` loads the dataset with dask using
engine preferred chunks if exposed by the backend, otherwise with
a single chunk for all arrays.
``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks. See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
This keyword may not be supported by all the backends.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
This keyword may not be supported by all the backends.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables: str or iterable of str, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
inline_array: bool, default: False
How to include the array in the dask task graph.
By default(``inline_array=False``) the array is included in a task by
itself, and each chunk refers to that task by its key. With
``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph. See :py:func:`dask.array.from_array`.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"scipy", "pynio", "pseudonetcdf", "cfgrib".
See engine open function for kwargs accepted by each specific engine.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
if cache is None:
cache = chunks is None
if backend_kwargs is not None:
kwargs.update(backend_kwargs)
if engine is None:
engine = plugins.guess_engine(filename_or_obj)
backend = plugins.get_backend(engine)
decoders = _resolve_decoders_kwargs(
decode_cf,
open_backend_dataset_parameters=backend.open_dataset_parameters,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
concat_characters=concat_characters,
use_cftime=use_cftime,
decode_coords=decode_coords,
)
overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None)
backend_ds = backend.open_dataset(
filename_or_obj,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
ds = _dataset_from_backend_dataset(
backend_ds,
filename_or_obj,
engine,
chunks,
cache,
overwrite_encoded_chunks,
inline_array,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
return ds
| def open_dataset(
filename_or_obj: str | os.PathLike,
*,
engine: T_Engine = None,
chunks: T_Chunks = None,
cache: bool | None = None,
decode_cf: bool | None = None,
mask_and_scale: bool | None = None,
decode_times: bool | None = None,
decode_timedelta: bool | None = None,
use_cftime: bool | None = None,
concat_characters: bool | None = None,
decode_coords: Literal["coordinates", "all"] | bool | None = None,
drop_variables: str | Iterable[str] | None = None,
inline_array: bool = False,
backend_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> Dataset:
"""Open and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file-like or DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
engine : {"netcdf4", "scipy", "pydap", "h5netcdf", "pynio", "cfgrib", \
"pseudonetcdf", "zarr", None}, installed backend \
or subclass of xarray.backends.BackendEntrypoint, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
"netcdf4". A custom backend class (a subclass of ``BackendEntrypoint``)
can also be used.
chunks : int, dict, 'auto' or None, optional
If chunks is provided, it is used to load the new dataset into dask
arrays. ``chunks=-1`` loads the dataset with dask using a single
chunk for all arrays. ``chunks={}`` loads the dataset with dask using
engine preferred chunks if exposed by the backend, otherwise with
a single chunk for all arrays.
``chunks='auto'`` will use dask ``auto`` chunking taking into account the
engine preferred chunks. See dask chunking for more details.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend. This keyword may not be supported by all the backends.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
This keyword may not be supported by all the backends.
decode_timedelta : bool, optional
If True, decode variables and coordinates with time units in
{"days", "hours", "minutes", "seconds", "milliseconds", "microseconds"}
into timedelta objects. If False, leave them encoded as numbers.
If None (default), assume the same value of decode_time.
This keyword may not be supported by all the backends.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. "gregorian", "proleptic_gregorian", "standard", or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error. This keyword may not be supported by all the backends.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
This keyword may not be supported by all the backends.
decode_coords : bool or {"coordinates", "all"}, optional
Controls which variables are set as coordinate variables:
- "coordinates" or True: Set variables referred to in the
``'coordinates'`` attribute of the datasets or individual variables
as coordinate variables.
- "all": Set variables referred to in ``'grid_mapping'``, ``'bounds'`` and
other attributes as coordinate variables.
drop_variables: str or iterable of str, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
inline_array: bool, default: False
How to include the array in the dask task graph.
By default(``inline_array=False``) the array is included in a task by
itself, and each chunk refers to that task by its key. With
``inline_array=True``, Dask will instead inline the array directly
in the values of the task graph. See :py:func:`dask.array.from_array`.
backend_kwargs: dict
Additional keyword arguments passed on to the engine open function,
equivalent to `**kwargs`.
**kwargs: dict
Additional keyword arguments passed on to the engine open function.
For example:
- 'group': path to the netCDF4 group in the given file to open given as
a str,supported by "netcdf4", "h5netcdf", "zarr".
- 'lock': resource lock to use when reading data from disk. Only
relevant when using dask or another form of parallelism. By default,
appropriate locks are chosen to safely read and write files with the
currently active dask scheduler. Supported by "netcdf4", "h5netcdf",
"scipy", "pynio", "pseudonetcdf", "cfgrib".
See engine open function for kwargs accepted by each specific engine.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
if cache is None:
cache = chunks is None
if backend_kwargs is not None:
kwargs.update(backend_kwargs)
if engine is None:
engine = plugins.guess_engine(filename_or_obj)
backend = plugins.get_backend(engine)
decoders = _resolve_decoders_kwargs(
decode_cf,
open_backend_dataset_parameters=backend.open_dataset_parameters,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
decode_timedelta=decode_timedelta,
concat_characters=concat_characters,
use_cftime=use_cftime,
decode_coords=decode_coords,
)
overwrite_encoded_chunks = kwargs.pop("overwrite_encoded_chunks", None)
backend_ds = backend.open_dataset(
filename_or_obj,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
ds = _dataset_from_backend_dataset(
backend_ds,
filename_or_obj,
engine,
chunks,
cache,
overwrite_encoded_chunks,
inline_array,
drop_variables=drop_variables,
**decoders,
**kwargs,
)
return ds
|
38,513 | def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray:
"""
For a given grid compute the star shape center for each cell.
The algorithm computes the half space intersections, by using the above method
half_space_pt,
of the spaces defined by the cell faces and the face normals.
This is a wrapper method that operate on a grid.
Parameters
----------
g: pp.Grid
the grid
as_nan: bool, optional
Decide whether, in case some cells are not star-shaped return nan as
new center. Otherwise an exception is raised (default behaviour).
Returns
-------
np.ndarray
The new cell centers.
"""
# no need for 1d or 0d grids
if g.dim < 2:
return g.cell_centers
# retrieve the faces and nodes
faces, _, sgn = sps.find(g.cell_faces)
nodes, _, _ = sps.find(g.face_nodes)
# shift the nodes close to the origin, to avoid numerical problems when coordinates are
# too big
xn = g.nodes.copy()
xn_shift = np.average(xn, axis=1)
xn -= np.tile(xn_shift, (xn.shape[1], 1)).T
# compute the star shape cell centers by constructing the half spaces of each cell
# given by its faces and related normals
cell_centers = np.zeros((3, g.num_cells))
for c in np.arange(g.num_cells):
loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
loc_n = g.face_nodes.indptr[faces_loc]
# make the normals coherent
normal = np.multiply(
sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc])
)
x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]]
coords = np.concatenate((x0, x1), axis=1)
# compute a point in the half space intersection of all cell faces
try:
cell_centers[:, c] = pp.half_space.half_space_interior_point(
normal, (x1 + x0) / 2.0, coords
)
except ValueError:
# the cell is not star-shaped
if as_nan:
cell_centers[:, c] = np.array([np.nan, np.nan, np.nan])
else:
raise ValueError(
"Cell not star-shaped impossible to compute the centre"
)
# shift back the computed cell centers and return them
return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
| def star_shape_cell_centers(g: "pp.Grid", as_nan: bool = False) -> np.ndarray:
"""
For a given grid compute the star shape center for each cell.
The algorithm computes the half space intersections, by using the above method
half_space_pt,
of the spaces defined by the cell faces and the face normals.
This is a wrapper method that operate on a grid.
Parameters
----------
g: pp.Grid
the grid
as_nan: bool, optional
Decide whether to return nan as the new center for cells which are not
new center. Otherwise an exception is raised (default behaviour).
Returns
-------
np.ndarray
The new cell centers.
"""
# no need for 1d or 0d grids
if g.dim < 2:
return g.cell_centers
# retrieve the faces and nodes
faces, _, sgn = sps.find(g.cell_faces)
nodes, _, _ = sps.find(g.face_nodes)
# shift the nodes close to the origin, to avoid numerical problems when coordinates are
# too big
xn = g.nodes.copy()
xn_shift = np.average(xn, axis=1)
xn -= np.tile(xn_shift, (xn.shape[1], 1)).T
# compute the star shape cell centers by constructing the half spaces of each cell
# given by its faces and related normals
cell_centers = np.zeros((3, g.num_cells))
for c in np.arange(g.num_cells):
loc = slice(g.cell_faces.indptr[c], g.cell_faces.indptr[c + 1])
faces_loc = faces[loc]
loc_n = g.face_nodes.indptr[faces_loc]
# make the normals coherent
normal = np.multiply(
sgn[loc], np.divide(g.face_normals[:, faces_loc], g.face_areas[faces_loc])
)
x0, x1 = xn[:, nodes[loc_n]], xn[:, nodes[loc_n + 1]]
coords = np.concatenate((x0, x1), axis=1)
# compute a point in the half space intersection of all cell faces
try:
cell_centers[:, c] = pp.half_space.half_space_interior_point(
normal, (x1 + x0) / 2.0, coords
)
except ValueError:
# the cell is not star-shaped
if as_nan:
cell_centers[:, c] = np.array([np.nan, np.nan, np.nan])
else:
raise ValueError(
"Cell not star-shaped impossible to compute the centre"
)
# shift back the computed cell centers and return them
return cell_centers + np.tile(xn_shift, (g.num_cells, 1)).T
|
23,666 | def AM_AOD_PW_spectral_correction(airmass_absolute, aod500, pw,
module_type=None, coefficients=None,
min_aod500=0.05, max_aod500=0.6,
min_pw=0.25, max_pw=4):
r"""
Spectral mismatch modifier based on absolute (pressure-adjusted)
airmass (AM), aerosol optical depth (AOD) at 500 nm and
precipitable water (PW).
Estimates a spectral mismatch modifier :math:`M` representing
the effect on module short circuit current of variation in the
spectral irradiance, :math:`MM` is estimated from absolute
(pressure-adjusted) AM, :math:`ama`, AOD at 500 nm, :math:`aod500`
and PW, :math:`pw`.
The best fit polynomial for each atmospheric parameter (AM, AOD, PW)
and PV technology under study has been obtained from synthetic spectra
generated with SMARTS [1], considering the following boundary
conditions:
* :math:`1.0 <= ama <= 5.0`
* :math:`0.05 <= aod500 <= 0.6`
* :math:`0.25 \textrm{cm} <= pw <= 4 \textrm{cm}`
* Spectral range is limited to that of CMP11 (280 nm to 2800 nm)
* All other parameters fixed at G173 standard
Elevation (deg), AOD and PW data were recorded in the city of Jaén,
Spain for one year synchronously with both, broadband and
spectroradiometric measurements of 30º tilted global irradiance
south-facing logged in 5-min intervals. AM was estimated through
elevation data.
Finally, the spectral mismatch factor was calculated for each
of the PV technologies and a multivariable regression adjustment
as a function of AM, AOD and PW was performed according to [2] and [3].
As such, the polynomial adjustment coefficients included in [3]
were obtained.
Parameters
----------
airmass_absolute : array-like
absolute (pressure-adjusted) airmass. [unitless]
aod500 : array-like
atmospheric aerosol optical depth at 500 nm. [unitless]
pw : array-like
atmospheric precipitable water. [cm]
min_aod500 : float, default 0.05
minimum atmospheric aerosol optical depth at 500 nm. Any aod500 value
lower than min_aod500 is set to min_aod500 to avoid model
divergence. [unitless]
max_aod500 : float, default 0.6
maximum atmospheric aerosol optical depth at 500 nm. Any aod500 value
higher than max_aod500 is set to NaN to avoid model
divergence. [unitless]
min_pw : float, default 0.25
minimum atmospheric precipitable water. Any pw value lower than min_pw
is set to min_pw to avoid model divergence. [cm]
max_pw : float, default 4
maximum atmospheric precipitable water. Any pw value higher than max_pw
is set to NaN to avoid model divergence. [cm]
module_type : None or string, default None
a string specifying a cell type. Values of 'cdte', 'monosi', 'cigs',
'multisi','asi' and 'pervovskite'. If provided,
module_type selects default coefficients for the following modules:
* 'cdte' - anonymous CdTe module.
* 'monosi', - anonymous sc-si module.
* 'multisi', - anonymous mc-si- module.
* 'cigs' - anonymous copper indium gallium selenide module.
* 'asi' - anonymous amorphous silicon module.
* 'perovskite' - anonymous pervoskite module.
coefficients : None or array-like, default None
the coefficients employed have been obtained with experimental
data in the city of Jaén, Spain. It is pending to verify if such
coefficients vary in places with extreme climates where AOD and
pw values are frequently high.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which is can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
References
----------
.. [1] Gueymard, Christian. SMARTS2: a simple model of the
atmospheric radiative transfer of sunshine: algorithms
and performance assessment. Cocoa, FL:
Florida Solar Energy Center, 1995.
.. [2] Theristis, M., Fernández, E., Almonacid, F., and
Pérez-Higueras, Pedro. "Spectral Corrections Based
on Air Mass, Aerosol Optical Depth and Precipitable
Water for CPV Performance Modeling.
"IEEE Journal of Photovoltaics 2016, 6(6), 1598-1604.
https://doi.org/10.1109/jphotov.2016.2606702
.. [3] Caballero, J.A., Fernández, E., Theristis, M.,
Almonacid, F., and Nofuentes, G. "Spectral Corrections Based on
Air Mass, Aerosol Optical Depth and Precipitable Water
for PV Performance Modeling.
" IEEE Journal of Photovoltaics 2018, 8(2), 552-558.
https://doi.org/10.1109/jphotov.2017.2787019
"""
# --- Screen Input Data ---
# *** ama ***
# Replace Extremely High ama with ama 10 to prevent model divergence
# ama > 10 will only occur very close to sunset
if np.max(airmass_absolute) > 10:
airmass_absolute = np.minimum(airmass_absolute, 10)
# Warn user about ama data that is exceptionally low
if np.min(airmass_absolute) < 0.58:
warn('Exceptionally low air mass: ' +
'model not intended for extra-terrestrial use')
# pvl_absoluteairmass(1,pvl_alt2pres(4340)) = 0.58 Elevation of
# Mina Pirquita, Argentian = 4340 m. Highest elevation city with
# population over 50,000.
# *** aod500 ***
# Replace aod500 Values below 0.05 with 0.05 to prevent model from
# diverging"
aod500 = np.atleast_1d(aod500)
aod500 = aod500.astype('float64')
if np.min(aod500) < min_aod500:
aod500 = np.maximum(aod500, min_aod500)
warn(f'Exceptionally low aod values replaced with {min_aod500} to'
'prevent model divergence')
# Warn user about aod500 data that is exceptionally high
if np.max(aod500) > max_aod500:
aod500[aod500 > max_aod500] = np.nan
warn('Exceptionally high aod values replaced by np.nan: '
'check input data.')
# *** pw ***
# Replace pw Values below 0.25 cm with 0.25 cm to prevent model from
# diverging"
pw = np.atleast_1d(pw)
pw = pw.astype('float64')
if np.min(pw) < min_pw:
pw = np.maximum(pw, min_pw)
warn(f'Exceptionally low pw values replaced with {min_pw} cm to '
'prevent model divergence')
# Warn user about pw data that is exceptionally high
if np.max(pw) > max_pw:
pw[pw > max_pw] = np.nan
warn('Exceptionally high pw values replaced by np.nan: '
'check input data.')
# Experimental coefficients
_coefficients = {}
_coefficients['cdte'] = (
1.0044, 0.0095, -0.0037, 0.0002, 0.0000, -0.0046,
-0.0182, 0, 0.0095, 0.0068, 0, 1)
_coefficients['monosi'] = (
0.9706, 0.0377, -0.0123, 0.0025, -0.0002, 0.0159,
-0.0165, 0, -0.0016, -0.0027, 1, 0)
_coefficients['multisi'] = (
0.9836, 0.0254, -0.0085, 0.0016, -0.0001, 0.0094,
-0.0132, 0, -0.0002, -0.0011, 1, 0)
_coefficients['cigs'] = (
0.9801, 0.0283, -0.0092, 0.0019, -0.0001, 0.0117,
-0.0126, 0, -0.0011, -0.0019, 1, 0)
_coefficients['asi'] = (
1.1060, -0.0848, 0.0302, -0.0076, 0.0006, -0.12838,
0.0986, -0.0254, 0.0156, 0.0146, 1, 0)
_coefficients['perovskite'] = (
1.0637, -0.0491, 0.0180, -0.0047, 0.0004, -0.0773,
0.0583, -0.0159, 0.01251, 0.0109, 1, 0)
if module_type is not None and coefficients is None:
coefficients = _coefficients[module_type.lower()]
elif module_type is None and coefficients is not None:
pass
elif module_type is None and coefficients is None:
raise TypeError('No valid input provided, both module_type'
+ 'and coefficients are None')
else:
raise TypeError('Cannot resolve input, must supply only one'
+ 'of module_type and coefficients')
# Evaluate Spectral Shift
coeff = coefficients
ama = airmass_absolute
aod500_ref = 0.84
pw_ref = 1.42
modifier = (
coeff[0] + (ama) * coeff[1] + (ama * ama) * coeff[2]
+ (ama * ama * ama) * coeff[3] + (ama * ama * ama * ama) * coeff[4]
+ (aod500 - aod500_ref) * coeff[5]
+ ((aod500 - aod500_ref) * (ama) * coeff[6]) * coeff[10]
+ ((aod500 - aod500_ref) * (np.log(ama)) * coeff[6]) * coeff[11]
+ (aod500 - aod500_ref) + (ama * ama) * coeff[7]
+ (pw - pw_ref) * coeff[8] + (pw - pw_ref) * (np.log(ama)) * coeff[9])
return modifier
| def AM_AOD_PW_spectral_correction(airmass_absolute, aod500, pw,
module_type=None, coefficients=None,
min_aod500=0.05, max_aod500=0.6,
min_pw=0.25, max_pw=4):
r"""
Spectral mismatch modifier based on absolute (pressure-adjusted)
airmass (AM), aerosol optical depth (AOD) at 500 nm and
precipitable water (PW).
Estimates a spectral mismatch modifier :math:`M` representing
the effect on module short circuit current of variation in the
spectral irradiance, :math:`MM` is estimated from absolute
(pressure-adjusted) AM, :math:`ama`, AOD at 500 nm, :math:`aod500`
and PW, :math:`pw`.
The best fit polynomial for each atmospheric parameter (AM, AOD, PW)
and PV technology under study has been obtained from synthetic spectra
generated with SMARTS [1], considering the following boundary
conditions:
* :math:`1.0 <= ama <= 5.0`
* :math:`0.05 <= aod500 <= 0.6`
* :math:`0.25 \textrm{cm} <= pw <= 4 \textrm{cm}`
* Spectral range is limited to that of CMP11 (280 nm to 2800 nm)
* All other parameters fixed at G173 standard
Elevation (deg), AOD and PW data were recorded in the city of Jaén,
Spain for one year synchronously with both, broadband and
spectroradiometric measurements of 30º tilted global irradiance
south-facing logged in 5-min intervals. AM was estimated through
elevation data.
Finally, the spectral mismatch factor was calculated for each
of the PV technologies and a multivariable regression adjustment
as a function of AM, AOD and PW was performed according to [2] and [3].
As such, the polynomial adjustment coefficients included in [3]
were obtained.
Parameters
----------
airmass_absolute : array-like
absolute (pressure-adjusted) airmass. [unitless]
aod500 : array-like
atmospheric aerosol optical depth at 500 nm. [unitless]
pw : array-like
atmospheric precipitable water. [cm]
min_aod500 : float, default 0.05
minimum atmospheric aerosol optical depth at 500 nm. Any aod500 value
lower than min_aod500 is set to min_aod500 to avoid model
divergence. [unitless]
max_aod500 : float, default 0.6
maximum atmospheric aerosol optical depth at 500 nm. Any aod500 value
higher than max_aod500 is set to NaN to avoid model
divergence. [unitless]
min_pw : float, default 0.25
minimum atmospheric precipitable water. Any pw value lower than min_pw
is set to min_pw to avoid model divergence. [cm]
max_pw : float, default 4
maximum atmospheric precipitable water. Any pw value higher than max_pw
is set to NaN to avoid model divergence. [cm]
module_type : None or string, default None
a string specifying a cell type. Values of 'cdte', 'monosi', 'cigs',
'multisi','asi' and 'pervovskite'. If provided,
module_type selects default coefficients for the following modules:
* 'cdte' - anonymous CdTe module.
* 'monosi', - anonymous sc-si module.
* 'multisi', - anonymous mc-si- module.
* 'cigs' - anonymous copper indium gallium selenide module.
* 'asi' - anonymous amorphous silicon module.
* 'perovskite' - anonymous perovskite module.
coefficients : None or array-like, default None
the coefficients employed have been obtained with experimental
data in the city of Jaén, Spain. It is pending to verify if such
coefficients vary in places with extreme climates where AOD and
pw values are frequently high.
Returns
-------
modifier: array-like
spectral mismatch factor (unitless) which is can be multiplied
with broadband irradiance reaching a module's cells to estimate
effective irradiance, i.e., the irradiance that is converted to
electrical current.
References
----------
.. [1] Gueymard, Christian. SMARTS2: a simple model of the
atmospheric radiative transfer of sunshine: algorithms
and performance assessment. Cocoa, FL:
Florida Solar Energy Center, 1995.
.. [2] Theristis, M., Fernández, E., Almonacid, F., and
Pérez-Higueras, Pedro. "Spectral Corrections Based
on Air Mass, Aerosol Optical Depth and Precipitable
Water for CPV Performance Modeling.
"IEEE Journal of Photovoltaics 2016, 6(6), 1598-1604.
https://doi.org/10.1109/jphotov.2016.2606702
.. [3] Caballero, J.A., Fernández, E., Theristis, M.,
Almonacid, F., and Nofuentes, G. "Spectral Corrections Based on
Air Mass, Aerosol Optical Depth and Precipitable Water
for PV Performance Modeling.
" IEEE Journal of Photovoltaics 2018, 8(2), 552-558.
https://doi.org/10.1109/jphotov.2017.2787019
"""
# --- Screen Input Data ---
# *** ama ***
# Replace Extremely High ama with ama 10 to prevent model divergence
# ama > 10 will only occur very close to sunset
if np.max(airmass_absolute) > 10:
airmass_absolute = np.minimum(airmass_absolute, 10)
# Warn user about ama data that is exceptionally low
if np.min(airmass_absolute) < 0.58:
warn('Exceptionally low air mass: ' +
'model not intended for extra-terrestrial use')
# pvl_absoluteairmass(1,pvl_alt2pres(4340)) = 0.58 Elevation of
# Mina Pirquita, Argentian = 4340 m. Highest elevation city with
# population over 50,000.
# *** aod500 ***
# Replace aod500 Values below 0.05 with 0.05 to prevent model from
# diverging"
aod500 = np.atleast_1d(aod500)
aod500 = aod500.astype('float64')
if np.min(aod500) < min_aod500:
aod500 = np.maximum(aod500, min_aod500)
warn(f'Exceptionally low aod values replaced with {min_aod500} to'
'prevent model divergence')
# Warn user about aod500 data that is exceptionally high
if np.max(aod500) > max_aod500:
aod500[aod500 > max_aod500] = np.nan
warn('Exceptionally high aod values replaced by np.nan: '
'check input data.')
# *** pw ***
# Replace pw Values below 0.25 cm with 0.25 cm to prevent model from
# diverging"
pw = np.atleast_1d(pw)
pw = pw.astype('float64')
if np.min(pw) < min_pw:
pw = np.maximum(pw, min_pw)
warn(f'Exceptionally low pw values replaced with {min_pw} cm to '
'prevent model divergence')
# Warn user about pw data that is exceptionally high
if np.max(pw) > max_pw:
pw[pw > max_pw] = np.nan
warn('Exceptionally high pw values replaced by np.nan: '
'check input data.')
# Experimental coefficients
_coefficients = {}
_coefficients['cdte'] = (
1.0044, 0.0095, -0.0037, 0.0002, 0.0000, -0.0046,
-0.0182, 0, 0.0095, 0.0068, 0, 1)
_coefficients['monosi'] = (
0.9706, 0.0377, -0.0123, 0.0025, -0.0002, 0.0159,
-0.0165, 0, -0.0016, -0.0027, 1, 0)
_coefficients['multisi'] = (
0.9836, 0.0254, -0.0085, 0.0016, -0.0001, 0.0094,
-0.0132, 0, -0.0002, -0.0011, 1, 0)
_coefficients['cigs'] = (
0.9801, 0.0283, -0.0092, 0.0019, -0.0001, 0.0117,
-0.0126, 0, -0.0011, -0.0019, 1, 0)
_coefficients['asi'] = (
1.1060, -0.0848, 0.0302, -0.0076, 0.0006, -0.12838,
0.0986, -0.0254, 0.0156, 0.0146, 1, 0)
_coefficients['perovskite'] = (
1.0637, -0.0491, 0.0180, -0.0047, 0.0004, -0.0773,
0.0583, -0.0159, 0.01251, 0.0109, 1, 0)
if module_type is not None and coefficients is None:
coefficients = _coefficients[module_type.lower()]
elif module_type is None and coefficients is not None:
pass
elif module_type is None and coefficients is None:
raise TypeError('No valid input provided, both module_type'
+ 'and coefficients are None')
else:
raise TypeError('Cannot resolve input, must supply only one'
+ 'of module_type and coefficients')
# Evaluate Spectral Shift
coeff = coefficients
ama = airmass_absolute
aod500_ref = 0.84
pw_ref = 1.42
modifier = (
coeff[0] + (ama) * coeff[1] + (ama * ama) * coeff[2]
+ (ama * ama * ama) * coeff[3] + (ama * ama * ama * ama) * coeff[4]
+ (aod500 - aod500_ref) * coeff[5]
+ ((aod500 - aod500_ref) * (ama) * coeff[6]) * coeff[10]
+ ((aod500 - aod500_ref) * (np.log(ama)) * coeff[6]) * coeff[11]
+ (aod500 - aod500_ref) + (ama * ama) * coeff[7]
+ (pw - pw_ref) * coeff[8] + (pw - pw_ref) * (np.log(ama)) * coeff[9])
return modifier
|
2,447 | def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
Array of pairwise kernels between samples, or a feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array. If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
Gram matrix X.T * Y.
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
| def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_X, n_features)
A feature array.
Y : ndarray of shape (n_samples_Y, n_features), default=None
A second feature array. If `None`, uses `Y=X`.
dense_output : bool, default=True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : ndarray of shape (n_samples_X, n_samples_Y)
Gram matrix X.T * Y.
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
|
38,819 | def patch_links_index(link, out_dir=OUTPUT_DIR):
"""hack to in-place update one row's info in the generated index html"""
title = link['title'] or latest_output(link)['title']
successful = len(tuple(filter(None, latest_output(link).values())))
# Patch JSON index
changed = False
json_file_links = parse_json_links_index(out_dir)
for saved_link in json_file_links:
if saved_link['url'] == link['url']:
saved_link['title'] = title
saved_link['history'] = link['history']
changed = True
break
if changed:
write_json_links_index(out_dir, json_file_links)
# Patch HTML index
html_path = os.path.join(out_dir, 'index.html')
with open(html_path, 'r') as html_file:
html = [line[:-1] for line in html_file]
for idx, line in enumerate(html):
if title and ('<span data-title-for="{}"'.format(link['url']) in line):
html[idx] = '<span>{}</span>'.format(title)
elif successful and ('<span data-number-for="{}"'.format(link['url']) in line):
html[idx] = '<span>{}</span>'.format(successful)
break
with open(html_path, 'w') as f:
f.write('\n'.join(html))
| def patch_links_index(link, out_dir=OUTPUT_DIR):
"""hack to in-place update one row's info in the generated index html"""
title = link['title'] or latest_output(link)['title']
successful = len(tuple(filter(None, latest_output(link).values())))
# Patch JSON index
changed = False
json_file_links = parse_json_links_index(out_dir)
for saved_link in json_file_links:
if saved_link['url'] == link['url']:
saved_link['title'] = title
saved_link['history'] = link['history']
changed = True
break
if changed:
write_json_links_index(out_dir, json_file_links)
# Patch HTML index
html_path = os.path.join(out_dir, 'index.html')
with open(html_path, 'r') as html_file:
html = html_file.read().splitlines()
for idx, line in enumerate(html):
if title and ('<span data-title-for="{}"'.format(link['url']) in line):
html[idx] = '<span>{}</span>'.format(title)
elif successful and ('<span data-number-for="{}"'.format(link['url']) in line):
html[idx] = '<span>{}</span>'.format(successful)
break
with open(html_path, 'w') as f:
f.write('\n'.join(html))
|
15,106 | def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
if discovery_info is None:
return
if os.path.isfile(SYSFILE):
under_voltage = UnderVoltage()
elif os.path.isfile(SYSFILE_LEGACY): # support older kernel
under_voltage = UnderVoltage(legacy=True)
else:
_LOGGER.critical(
"Can't find the system class needed for this component, make sure that your kernel is recent and the hardware is supported."
)
return
add_entities([RaspberryChargerBinarySensor(under_voltage)])
| def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
if discovery_info is None:
return
if os.path.isfile(SYSFILE):
under_voltage = UnderVoltage()
elif os.path.isfile(SYSFILE_LEGACY): # support older kernel
under_voltage = UnderVoltage(legacy=True)
else:
_LOGGER.error(
"Can't find the system class needed for this component, make sure that your kernel is recent and the hardware is supported."
)
return
add_entities([RaspberryChargerBinarySensor(under_voltage)])
|
53,191 | def byte_to_mebibyte(byte):
return byte / (1024 * 1024) | def byte_to_mebibyte(byte):
return byte / (1024 * 1024)
|
7,192 | def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=None,
exclude_border=True, indices=True, num_peaks=np.inf,
footprint=None, labels=None, *, num_peaks_per_label=np.inf,
p=np.inf):
"""Find corners in corner measure response image.
This differs from `skimage.feature.peak_local_max` in that it suppresses
multiple connected peaks with the same accumulator value.
Parameters
----------
image : ndarray
Input image.
min_distance : int, optional
The minimum distance seperating peaks. Use the ``p`` argument
to set the Minkowski p-norm defining the distance.
* : *
See :py:meth:`skimage.feature.peak_local_max`.
p : float
Which Minkowski p-norm to use. Should be in the range [1, inf].
A finite large p may cause a ValueError if overflow can occur.
inf corresponds to the chebychev distance and 2 to the
euclidean distance.
Returns
-------
output : ndarray or ndarray of bools
* If `indices = True` : (row, column, ...) coordinates of peaks.
* If `indices = False` : Boolean array shaped like `image`, with peaks
represented by True values.
See also
--------
skimage.feature.peak_local_max
Notes
-----
The `num_peaks` limit is applied before suppression of
connected peaks. If you want to limit the number of peaks
after suppression, you should set `num_peaks=np.inf` and
post-process the output of this function.
Examples
--------
>>> from skimage.feature import peak_local_max
>>> response = np.zeros((5, 5))
>>> response[2:4, 2:4] = 1
>>> response
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0.],
[0., 0., 1., 1., 0.],
[0., 0., 0., 0., 0.]])
>>> peak_local_max(response)
array([[3, 3],
[3, 2],
[2, 3],
[2, 2]])
>>> corner_peaks(response)
array([[3, 3]])
"""
if threshold_rel is None:
threshold_rel = 0.1
warn("Until the version 0.16, threshold_rel was set to 0.1 by default."
"Starting from version 0.16, the default value is set to None."
"Until version 0.18, a None value corresponds to a threshold "
"value of 0.1. The default behavior will match "
"skimage.feature.peak_local_max.", category=FutureWarning,
stacklevel=2)
# Get the coordinates of the detected peaks
coords = peak_local_max(image, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=True, num_peaks=num_peaks,
footprint=footprint, labels=labels,
num_peaks_per_label=num_peaks_per_label)
if len(coords):
# Use KDtree to find the peaks that are too close to each others
tree = spatial.cKDTree(coords)
rejected_peaks = set()
for idx, point in enumerate(coords):
if idx not in rejected_peaks:
candidates = tree.query_ball_point(point, r=min_distance, p=p)
candidates.remove(idx)
rejected_peaks.update(candidates)
# Remove the peaks that are too close to each others
coords = np.delete(coords, tuple(rejected_peaks), axis=0)[::-1]
if indices is True:
return coords
peaks = np.zeros_like(image, dtype=bool)
peaks[tuple(coords.T)] = True
return peaks
| def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=None,
exclude_border=True, indices=True, num_peaks=np.inf,
footprint=None, labels=None, *, num_peaks_per_label=np.inf,
p=np.inf):
"""Find corners in corner measure response image.
This differs from `skimage.feature.peak_local_max` in that it suppresses
multiple connected peaks with the same accumulator value.
Parameters
----------
image : ndarray
Input image.
min_distance : int, optional
The minimum distance seperating peaks. Use the ``p`` argument
to set the Minkowski p-norm defining the distance.
* : *
See :py:meth:`skimage.feature.peak_local_max`.
p : float
Which Minkowski p-norm to use. Should be in the range [1, inf].
A finite large p may cause a ValueError if overflow can occur.
inf corresponds to the Chebyshev distance and 2 to the
euclidean distance.
Returns
-------
output : ndarray or ndarray of bools
* If `indices = True` : (row, column, ...) coordinates of peaks.
* If `indices = False` : Boolean array shaped like `image`, with peaks
represented by True values.
See also
--------
skimage.feature.peak_local_max
Notes
-----
The `num_peaks` limit is applied before suppression of
connected peaks. If you want to limit the number of peaks
after suppression, you should set `num_peaks=np.inf` and
post-process the output of this function.
Examples
--------
>>> from skimage.feature import peak_local_max
>>> response = np.zeros((5, 5))
>>> response[2:4, 2:4] = 1
>>> response
array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0.],
[0., 0., 1., 1., 0.],
[0., 0., 0., 0., 0.]])
>>> peak_local_max(response)
array([[3, 3],
[3, 2],
[2, 3],
[2, 2]])
>>> corner_peaks(response)
array([[3, 3]])
"""
if threshold_rel is None:
threshold_rel = 0.1
warn("Until the version 0.16, threshold_rel was set to 0.1 by default."
"Starting from version 0.16, the default value is set to None."
"Until version 0.18, a None value corresponds to a threshold "
"value of 0.1. The default behavior will match "
"skimage.feature.peak_local_max.", category=FutureWarning,
stacklevel=2)
# Get the coordinates of the detected peaks
coords = peak_local_max(image, min_distance=min_distance,
threshold_abs=threshold_abs,
threshold_rel=threshold_rel,
exclude_border=exclude_border,
indices=True, num_peaks=num_peaks,
footprint=footprint, labels=labels,
num_peaks_per_label=num_peaks_per_label)
if len(coords):
# Use KDtree to find the peaks that are too close to each others
tree = spatial.cKDTree(coords)
rejected_peaks = set()
for idx, point in enumerate(coords):
if idx not in rejected_peaks:
candidates = tree.query_ball_point(point, r=min_distance, p=p)
candidates.remove(idx)
rejected_peaks.update(candidates)
# Remove the peaks that are too close to each others
coords = np.delete(coords, tuple(rejected_peaks), axis=0)[::-1]
if indices is True:
return coords
peaks = np.zeros_like(image, dtype=bool)
peaks[tuple(coords.T)] = True
return peaks
|
55,680 | def worker_init_fn(worker_id: int, num_workers: int, rank: int, seed: int):
"""Function to initialize each worker. The seed of each worker equals to.
``num_worker * rank + worker_id + user_seed``.
Args:
worker_id (int): Id for each worker.
num_workers (int): Number of workers.
rank (int): Rank in distributed training.
seed (int): Random seed.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
| def worker_init_fn(worker_id: int, num_workers: int, rank: int, seed: int):
"""Function to initialize each worker.
The seed of each worker equals to ``num_worker * rank + worker_id + user_seed``.
Args:
worker_id (int): Id for each worker.
num_workers (int): Number of workers.
rank (int): Rank in distributed training.
seed (int): Random seed.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
|
12,128 | def _block2event(block, seed_map, id_default, ph2comp):
"""
Read HypoDD event block
"""
lines = block.strip().splitlines()
yr, mo, dy, hr, mn, sc, la, lo, dp, mg, eh, ez, rms, id_ = lines[0].split()
time = UTCDateTime('{}-{}-{} {}-{}-{}'.format(yr, mo, dy, hr, mn, sc))
picks = []
arrivals = []
for line in lines[1:]:
sta, reltime, weight, phase = line.split()
comp = ph2comp.get(phase, '')
wid = seed_map.get(sta, id_default)
_waveform_id = WaveformStreamID(seed_string=wid.format(sta, comp))
pick = Pick(waveform_id=_waveform_id, phase_hint=phase,
time=time + float(reltime))
arrival = Arrival(phase=phase, pick_id=pick.resource_id,
time_weight=float(weight))
picks.append(pick)
arrivals.append(arrival)
qu = None if rms == '0.0' else OriginQuality(standard_error=float(rms))
origin = Origin(arrivals=arrivals,
quality=qu,
latitude=float(la),
longitude=float(lo),
depth=1000 * float(dp),
time=time)
magnitude = Magnitude(mag=mg)
event = Event(resource_id=id_,
picks=picks,
origins=[origin],
magnitudes=[magnitude],
preferred_origin_id=origin.resource_id,
preferred_magnitude_id=magnitude.resource_id)
return event
| def _block2event(block, seed_map, id_default, ph2comp):
"""
Read HypoDD event block
"""
lines = block.strip().splitlines()
yr, mo, dy, hr, mn, sc, la, lo, dp, mg, eh, ez, rms, id_ = lines[0].split()
time = UTCDateTime('{}-{}-{} {}-{}-{}'.format(yr, mo, dy, hr, mn, sc))
picks = []
arrivals = []
for line in lines[1:]:
sta, reltime, weight, phase = line.split()
comp = ph2comp.get(phase, '')
wid = seed_map.get(sta, id_default)
_waveform_id = WaveformStreamID(seed_string=wid.format(sta, comp))
pick = Pick(waveform_id=_waveform_id, phase_hint=phase,
time=time + float(reltime))
arrival = Arrival(phase=phase, pick_id=pick.resource_id,
time_weight=float(weight))
picks.append(pick)
arrivals.append(arrival)
qu = None if rms == '0.0' else OriginQuality(standard_error=float(rms))
origin = Origin(arrivals=arrivals,
quality=qu,
latitude=float(la),
longitude=float(lo),
depth=1000 * float(dp),
time=time)
magnitude = Magnitude(mag=mg)
event = Event(resource_id="smi:local/event/" + id_,
picks=picks,
origins=[origin],
magnitudes=[magnitude],
preferred_origin_id=origin.resource_id,
preferred_magnitude_id=magnitude.resource_id)
return event
|
27,665 | def cross_chan_correlation(
st1, streams, shift_len=0.0, allow_individual_trace_shifts=True,
xcorr_func='fftw', concurrency="concurrent", cores=1, **kwargs):
"""
Calculate cross-channel correlation.
Determine the cross-channel correlation between two streams of
multichannel seismic data.
:type st1: obspy.core.stream.Stream
:param st1: Stream one
:type streams: list
:param streams: Streams to compare to.
:type shift_len: float
:param shift_len:
Seconds to shift the streams by (total value for negative and positive
direction together)
:type allow_individual_trace_shifts: bool
:param allow_individual_trace_shifts:
Controls whether templates are shifted by shift_len in relation to the
picks as a whole, or whether each trace can be shifted individually.
Defaults to True.
:type xcorr_func: str, callable
:param xcorr_func:
The method for performing correlations. Accepts either a string or
callable. See :func:`eqcorrscan.utils.correlate.register_array_xcorr`
for more details
:type concurrency: str
:param concurrency: Concurrency for xcorr-func.
:type cores: int
:param cores: Number of threads to parallel over
:returns:
cross channel correlation, float - normalized by number of channels.
locations of maximums
:rtype: numpy.ndarray, numpy.ndarray
.. Note::
If no matching channels were found then the coherance and index for
that stream will be nan.
"""
# Cut all channels in stream-list to be the correct length (shorter than
# st1 if stack = False by shift_len).
allow_individual_trace_shifts =\
allow_individual_trace_shifts and shift_len > 0
n_streams = len(streams)
df = st1[0].stats.sampling_rate
end_trim = int((shift_len * df) / 2)
_streams = []
if end_trim > 0:
for stream in streams:
_stream = stream.copy() # Do not work on the users data
for tr in _stream:
tr.data = tr.data[end_trim: -end_trim]
if tr.stats.sampling_rate != df:
raise NotImplementedError("Sampling rates differ")
_streams.append(_stream)
streams = _streams
else:
# _prep_data_for_correlation works in place on data.
# We need to copy it first.
streams = [stream.copy() for stream in streams]
# Check which channels are in st1 and match those in the stream_list
st1, prep_streams, stream_indexes = _prep_data_for_correlation(
stream=st1.copy(), templates=streams,
template_names=list(range(len(streams))), force_stream_epoch=False)
# Run the correlations
multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency)
[cccsums, no_chans, _] = multichannel_normxcorr(
templates=prep_streams, stream=st1, cores=cores, stack=False, **kwargs)
# Find maximas, sum and divide by no_chans
if allow_individual_trace_shifts:
coherances = cccsums.max(axis=-1).sum(axis=-1) / no_chans
else:
cccsums = cccsums.sum(axis=1)
coherances = cccsums.max(axis=-1) / no_chans
# Subtract half length of correlogram and convert positions to seconds
positions = (cccsums.argmax(axis=-1) - end_trim) / df
# This section re-orders the coherences to correspond to the order of the
# input streams
_coherances = np.empty(n_streams)
if allow_individual_trace_shifts:
n_max_traces = max([len(st) for st in streams])
n_shifts_per_stream = positions.shape[1]
_positions = np.empty([positions.shape[0], n_max_traces])
else:
# _positions = np.empty_like(positions)
_positions = np.empty([positions.shape[0], 1])
n_shifts_per_stream = 1
_coherances.fill(np.nan)
_positions.fill(np.nan)
for coh_ind, stream_ind in enumerate(stream_indexes):
_coherances[stream_ind] = coherances[coh_ind]
_positions[stream_ind, :n_shifts_per_stream] = positions[coh_ind]
if not allow_individual_trace_shifts: # remove empty third axis from array
_positions = _positions[:, ]
return _coherances, _positions
| def cross_chan_correlation(
st1, streams, shift_len=0.0, allow_individual_trace_shifts=True,
xcorr_func='fftw', concurrency="concurrent", cores=1, **kwargs):
"""
Calculate cross-channel correlation.
Determine the cross-channel correlation between two streams of
multichannel seismic data.
:type st1: obspy.core.stream.Stream
:param st1: Stream one
:type streams: list
:param streams: Streams to compare to.
:type shift_len: float
:param shift_len:
Seconds to shift the streams by (total value for negative and positive
direction together)
:type allow_individual_trace_shifts: bool
:param allow_individual_trace_shifts:
Controls whether templates are shifted by shift_len in relation to the
picks as a whole, or whether each trace can be shifted individually.
Defaults to True.
:type xcorr_func: str, callable
:param xcorr_func:
The method for performing correlations. Accepts either a string or
callable. See :func:`eqcorrscan.utils.correlate.register_array_xcorr`
for more details
:type concurrency: str
:param concurrency: Concurrency for xcorr-func.
:type cores: int
:param cores: Number of threads to parallel over
:returns:
cross channel correlation, float - normalized by number of channels.
locations of maximums
:rtype: numpy.ndarray, numpy.ndarray
.. Note::
If no matching channels were found then the coherance and index for
that stream will be nan.
"""
# Cut all channels in stream-list to be the correct length (shorter than
# st1 if stack = False by shift_len).
allow_individual_trace_shifts =\
allow_individual_trace_shifts and shift_len > 0
n_streams = len(streams)
df = st1[0].stats.sampling_rate
end_trim = int((shift_len * df) / 2)
_streams = []
if end_trim > 0:
for stream in streams:
_stream = stream.copy() # Do not work on the users data
for tr in _stream:
tr.data = tr.data[end_trim: -end_trim]
if tr.stats.sampling_rate != df:
raise NotImplementedError("Sampling rates differ")
_streams.append(_stream)
streams = _streams
else:
# _prep_data_for_correlation works in place on data.
# We need to copy it first.
streams = [stream.copy() for stream in streams]
# Check which channels are in st1 and match those in the stream_list
st_preped, prep_streams, stream_indexes = _prep_data_for_correlation(
stream=st1.copy(), templates=streams,
template_names=list(range(len(streams))), force_stream_epoch=False)
# Run the correlations
multichannel_normxcorr = get_stream_xcorr(xcorr_func, concurrency)
[cccsums, no_chans, _] = multichannel_normxcorr(
templates=prep_streams, stream=st1, cores=cores, stack=False, **kwargs)
# Find maximas, sum and divide by no_chans
if allow_individual_trace_shifts:
coherances = cccsums.max(axis=-1).sum(axis=-1) / no_chans
else:
cccsums = cccsums.sum(axis=1)
coherances = cccsums.max(axis=-1) / no_chans
# Subtract half length of correlogram and convert positions to seconds
positions = (cccsums.argmax(axis=-1) - end_trim) / df
# This section re-orders the coherences to correspond to the order of the
# input streams
_coherances = np.empty(n_streams)
if allow_individual_trace_shifts:
n_max_traces = max([len(st) for st in streams])
n_shifts_per_stream = positions.shape[1]
_positions = np.empty([positions.shape[0], n_max_traces])
else:
# _positions = np.empty_like(positions)
_positions = np.empty([positions.shape[0], 1])
n_shifts_per_stream = 1
_coherances.fill(np.nan)
_positions.fill(np.nan)
for coh_ind, stream_ind in enumerate(stream_indexes):
_coherances[stream_ind] = coherances[coh_ind]
_positions[stream_ind, :n_shifts_per_stream] = positions[coh_ind]
if not allow_individual_trace_shifts: # remove empty third axis from array
_positions = _positions[:, ]
return _coherances, _positions
|
10,335 | def main():
protocols = [
'http',
'https',
'email',
'email_json',
'sms',
'sqs',
'application',
'lambda',
]
argument_spec = dict(
msg=dict(required=True, aliases=['default']),
subject=dict(),
topic=dict(required=True),
message_attributes=dict(type='dict'),
message_structure=dict(choices=['json', 'string'], default='json'),
)
for p in protocols:
argument_spec[p] = dict()
module = AnsibleAWSModule(argument_spec=argument_spec)
sns_kwargs = dict(
Message=module.params['msg'],
MessageStructure=module.params['message_structure']
)
if module.params['subject']:
sns_kwargs.update({"Subject": module.params['subject']})
if module.params['message_attributes']:
if module.params['message_structure'] != 'string':
module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
sns_kwargs['MessageAttributes'] = module.params['message_attributes']
dict_msg = {
'default': sns_kwargs['Message']
}
for p in protocols:
if module.params[p]:
if sns_kwargs['MessageStructure'] != 'json':
module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
dict_msg[p.replace('_', '-')] = module.params[p]
client = module.client('sns')
topic = module.params['topic']
if ':' in topic:
# Short names can't contain ':' so we'll assume this is the full ARN
sns_kwargs['TopicArn'] = topic
else:
sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)
if not sns_kwargs['TopicArn']:
module.fail_json(msg='Could not find topic: {0}'.format(topic))
if sns_kwargs['MessageStructure'] == 'json':
sns_kwargs['Message'] = json.dumps(dict_msg)
try:
result = client.publish(**sns_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to publish message')
module.exit_json(msg='OK', message_id=result['MessageId'])
| def main():
protocols = [
'http',
'https',
'email',
'email_json',
'sms',
'sqs',
'application',
'lambda',
]
argument_spec = dict(
msg=dict(required=True, aliases=['default']),
subject=dict(),
topic=dict(required=True),
message_attributes=dict(type='dict'),
message_structure=dict(choices=['json', 'string'], default='json'),
)
for p in protocols:
argument_spec[p] = dict()
module = AnsibleAWSModule(argument_spec=argument_spec)
sns_kwargs = dict(
Message=module.params['msg'],
MessageStructure=module.params['message_structure'],
)
if module.params['subject']:
sns_kwargs.update({"Subject": module.params['subject']})
if module.params['message_attributes']:
if module.params['message_structure'] != 'string':
module.fail_json(msg='message_attributes is only supported when the message_structure is "string".')
sns_kwargs['MessageAttributes'] = module.params['message_attributes']
dict_msg = {
'default': sns_kwargs['Message']
}
for p in protocols:
if module.params[p]:
if sns_kwargs['MessageStructure'] != 'json':
module.fail_json(msg='Protocol-specific messages are only supported when message_structure is "json".')
dict_msg[p.replace('_', '-')] = module.params[p]
client = module.client('sns')
topic = module.params['topic']
if ':' in topic:
# Short names can't contain ':' so we'll assume this is the full ARN
sns_kwargs['TopicArn'] = topic
else:
sns_kwargs['TopicArn'] = arn_topic_lookup(module, client, topic)
if not sns_kwargs['TopicArn']:
module.fail_json(msg='Could not find topic: {0}'.format(topic))
if sns_kwargs['MessageStructure'] == 'json':
sns_kwargs['Message'] = json.dumps(dict_msg)
try:
result = client.publish(**sns_kwargs)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to publish message')
module.exit_json(msg='OK', message_id=result['MessageId'])
|
5,738 | def load_complex(folder, precision):
file = 'mhd1280b.cua'
dtype = _dtype_map[precision]
path = os.path.join(folder, precision, 'Examples', file)
with open(path, "r") as f:
contents = f.readlines()
file_metadata = contents[1].split()
matrix_metadata = contents[2].split()
datum_length = 15 # hard code rather than getting from contents[3]
n_header = 4
n_total, n_indptr, n_indices, n_data, _ = (int(n) for n in file_metadata)
m, n, nnz, _ = (int(n) for n in matrix_metadata[1:])
line_indptr = n_header
line_indices = line_indptr + n_indptr
line_data = line_indices + n_indices
def _concatenate_lines(lines):
return "".join([line.rstrip() for line in lines])
indptr = _concatenate_lines(contents[line_indptr:line_indices])
indptr = np.asarray([int(i) for i in indptr.split()])-1
indices = _concatenate_lines(contents[line_indices:line_data])
indices = np.asarray([int(i) for i in indices.split()])-1
data = _concatenate_lines(contents[line_data:])
data = np.asarray([float(data[i:i+datum_length])
for i in range(0, len(data), datum_length)])
real, imag= data[::2], data[1::2]
data = real + imag*1.0j
data.astype(dtype)
return csc_matrix((data, indices, indptr), (m, n))
| def load_complex(folder, precision):
file = 'mhd1280b.cua'
dtype = _dtype_map[precision]
path = os.path.join(folder, precision, 'Examples', file)
with open(path, "r") as f:
contents = f.readlines()
file_metadata = contents[1].split()
matrix_metadata = contents[2].split()
datum_length = 15 # hard code rather than getting from contents[3]
n_header = 4
n_total, n_indptr, n_indices, n_data, _ = (int(n) for n in file_metadata)
m, n, nnz, _ = (int(n) for n in matrix_metadata[1:])
line_indptr = n_header
line_indices = line_indptr + n_indptr
line_data = line_indices + n_indices
def _concatenate_lines(lines):
return "".join([line.rstrip() for line in lines])
indptr = _concatenate_lines(contents[line_indptr:line_indices])
indptr = np.asarray([int(i) for i in indptr.split()])-1
indices = _concatenate_lines(contents[line_indices:line_data])
indices = np.asarray([int(i) for i in indices.split()])-1
data = _concatenate_lines(contents[line_data:])
data = np.asarray([float(data[i:i+datum_length])
for i in range(0, len(data), datum_length)])
real, imag = data[::2], data[1::2]
data = real + imag*1.0j
data.astype(dtype)
return csc_matrix((data, indices, indptr), (m, n))
|
3,324 | def get_performance_facets(
query,
params,
orderby=None,
aggregate_column="duration",
aggregate_function="avg",
limit=20,
referrer=None,
):
"""
High-level API for getting 'facet map' results for performance data
Performance facets are high frequency tags and the aggregate duration of
their most frequent values
query (str) Filter query string to create conditions from.
params (Dict[str, str]) Filtering parameters with start, end, project_id, environment
limit (int) The number of records to fetch.
referrer (str|None) A referrer string to help locate the origin of this query.
Returns Sequence[FacetResult]
"""
with sentry_sdk.start_span(
op="discover.discover", description="facets.filter_transform"
) as span:
span.set_data("query", query)
snuba_filter = get_filter(query, params)
# Resolve the public aliases into the discover dataset names.
snuba_filter, translated_columns = resolve_discover_aliases(snuba_filter)
with sentry_sdk.start_span(op="discover.discover", description="facets.frequent_tags"):
# Get the most relevant tag keys
key_names = raw_query(
aggregations=[
[aggregate_function, aggregate_column, "aggregate"],
["count", None, "count"],
],
start=snuba_filter.start,
end=snuba_filter.end,
conditions=snuba_filter.conditions,
filter_keys=snuba_filter.filter_keys,
orderby=["-count"],
dataset=Dataset.Discover,
limit=limit,
referrer=referrer,
)
counts = [r["count"] for r in key_names["data"]]
if not counts:
return []
results = []
snuba_filter.conditions.append([aggregate_column, "IS NOT NULL", None])
# Aggregate for transaction
transaction_aggregate = key_names["data"][0]["aggregate"]
# Dynamically sample so at least 10000 transactions are selected
transaction_count = key_names["data"][0]["count"]
sampling_enabled = transaction_count > 10000
target_sample = 10000 * (math.log(transaction_count, 10) - 3) # Log growth starting at 10,000
dynamic_sample_rate = 0 if transaction_count <= 0 else (target_sample / transaction_count)
options_sample_rate = (
options.get("discover2.tags_performance_facet_sample_rate") or dynamic_sample_rate
)
sample_rate = options_sample_rate if sampling_enabled else None
sample_multiplier = sample_rate if sample_rate else 1
excluded_tags = [
"tags_key",
"NOT IN",
["trace", "trace.ctx", "trace.span", "project", "browser"],
]
with sentry_sdk.start_span(op="discover.discover", description="facets.aggregate_tags"):
conditions = snuba_filter.conditions
aggregate_comparison = transaction_aggregate * 1.01 if transaction_aggregate else 0
having = [excluded_tags]
if orderby and orderby in ("-sumdelta", "aggregate", "-aggregate"):
having.append(["aggregate", ">", aggregate_comparison])
if orderby is None:
orderby = []
else:
orderby = [orderby]
tag_values = raw_query(
selected_columns=[
[
"sum",
[
"minus",
[
aggregate_column,
str(transaction_aggregate),
],
],
"sumdelta",
],
],
aggregations=[
[aggregate_function, aggregate_column, "aggregate"],
["count", None, "cnt"],
],
conditions=conditions,
start=snuba_filter.start,
end=snuba_filter.end,
filter_keys=snuba_filter.filter_keys,
orderby=orderby + ["tags_key"],
groupby=["tags_key", "tags_value"],
having=having,
dataset=Dataset.Discover,
referrer=referrer,
sample=sample_rate,
turbo=sample_rate is not None,
limitby=[5, "tags_key"],
)
results.extend(
[
PerformanceFacetResult(
r["tags_key"],
r["tags_value"],
float(r["aggregate"]),
float(r["cnt"] / transaction_count),
float(r["aggregate"] / transaction_aggregate),
float(r["sumdelta"]),
)
for r in tag_values["data"]
]
)
return results
| def get_performance_facets(
query,
params,
orderby=None,
aggregate_column="duration",
aggregate_function="avg",
limit=20,
referrer=None,
):
"""
High-level API for getting 'facet map' results for performance data
Performance facets are high frequency tags and the aggregate duration of
their most frequent values
query (str) Filter query string to create conditions from.
params (Dict[str, str]) Filtering parameters with start, end, project_id, environment
limit (int) The number of records to fetch.
referrer (str|None) A referrer string to help locate the origin of this query.
Returns Sequence[FacetResult]
"""
with sentry_sdk.start_span(
op="discover.discover", description="facets.filter_transform"
) as span:
span.set_data("query", query)
snuba_filter = get_filter(query, params)
# Resolve the public aliases into the discover dataset names.
snuba_filter, translated_columns = resolve_discover_aliases(snuba_filter)
with sentry_sdk.start_span(op="discover.discover", description="facets.frequent_tags"):
# Get the most relevant tag keys
key_names = raw_query(
aggregations=[
[aggregate_function, aggregate_column, "aggregate"],
["count", None, "count"],
],
start=snuba_filter.start,
end=snuba_filter.end,
conditions=snuba_filter.conditions,
filter_keys=snuba_filter.filter_keys,
orderby=["-count"],
dataset=Dataset.Discover,
limit=limit,
referrer=referrer,
)
counts = [r["count"] for r in key_names["data"]]
if len(counts) != 1:
return []
results = []
snuba_filter.conditions.append([aggregate_column, "IS NOT NULL", None])
# Aggregate for transaction
transaction_aggregate = key_names["data"][0]["aggregate"]
# Dynamically sample so at least 10000 transactions are selected
transaction_count = key_names["data"][0]["count"]
sampling_enabled = transaction_count > 10000
target_sample = 10000 * (math.log(transaction_count, 10) - 3) # Log growth starting at 10,000
dynamic_sample_rate = 0 if transaction_count <= 0 else (target_sample / transaction_count)
options_sample_rate = (
options.get("discover2.tags_performance_facet_sample_rate") or dynamic_sample_rate
)
sample_rate = options_sample_rate if sampling_enabled else None
sample_multiplier = sample_rate if sample_rate else 1
excluded_tags = [
"tags_key",
"NOT IN",
["trace", "trace.ctx", "trace.span", "project", "browser"],
]
with sentry_sdk.start_span(op="discover.discover", description="facets.aggregate_tags"):
conditions = snuba_filter.conditions
aggregate_comparison = transaction_aggregate * 1.01 if transaction_aggregate else 0
having = [excluded_tags]
if orderby and orderby in ("-sumdelta", "aggregate", "-aggregate"):
having.append(["aggregate", ">", aggregate_comparison])
if orderby is None:
orderby = []
else:
orderby = [orderby]
tag_values = raw_query(
selected_columns=[
[
"sum",
[
"minus",
[
aggregate_column,
str(transaction_aggregate),
],
],
"sumdelta",
],
],
aggregations=[
[aggregate_function, aggregate_column, "aggregate"],
["count", None, "cnt"],
],
conditions=conditions,
start=snuba_filter.start,
end=snuba_filter.end,
filter_keys=snuba_filter.filter_keys,
orderby=orderby + ["tags_key"],
groupby=["tags_key", "tags_value"],
having=having,
dataset=Dataset.Discover,
referrer=referrer,
sample=sample_rate,
turbo=sample_rate is not None,
limitby=[5, "tags_key"],
)
results.extend(
[
PerformanceFacetResult(
r["tags_key"],
r["tags_value"],
float(r["aggregate"]),
float(r["cnt"] / transaction_count),
float(r["aggregate"] / transaction_aggregate),
float(r["sumdelta"]),
)
for r in tag_values["data"]
]
)
return results
|
10,897 | def template_constant_dict(config, ignore=None, skip_lower=None, toolchain=None):
"""Create a dict for templating the values in the easyconfigs.
- config is a dict with the structure of EasyConfig._config
"""
if skip_lower is not None:
_log.deprecated("Use of 'skip_lower' named argument for template_constant_dict has no effect anymore", '4.0')
# TODO find better name
# ignore
if ignore is None:
ignore = []
# make dict
template_values = {}
_log.debug("config: %s", config)
# set 'arch' for system architecture based on 'machine' (4th) element of platform.uname() return value
template_values['arch'] = platform.uname()[4]
# step 1: add TEMPLATE_NAMES_EASYCONFIG
for name in TEMPLATE_NAMES_EASYCONFIG:
if name in ignore:
continue
# check if this template name is already handled
if template_values.get(name[0]) is not None:
continue
if name[0].startswith('toolchain_'):
tc = config.get('toolchain')
if tc is not None:
template_values['toolchain_name'] = tc.get('name', None)
template_values['toolchain_version'] = tc.get('version', None)
# only go through this once
ignore.extend(['toolchain_name', 'toolchain_version'])
elif name[0].startswith('version_'):
# parse major and minor version numbers
version = config['version']
if version is not None:
_log.debug("version found in easyconfig is %s", version)
version = version.split('.')
try:
major = version[0]
template_values['version_major'] = major
minor = version[1]
template_values['version_minor'] = minor
template_values['version_major_minor'] = '.'.join([major, minor])
except IndexError:
# if there is no minor version, skip it
pass
# only go through this once
ignore.extend(['version_major', 'version_minor', 'version_major_minor'])
elif name[0].endswith('letter'):
# parse first letters
if name[0].startswith('name'):
softname = config['name']
if softname is not None:
template_values['nameletter'] = softname[0]
else:
raise EasyBuildError("Undefined name %s from TEMPLATE_NAMES_EASYCONFIG", name)
# step 2: define *ver and *shortver templates
for name, pref in TEMPLATE_SOFTWARE_VERSIONS:
# copy to avoid changing original list below
deps = copy.copy(config.get('dependencies', []))
if hasattr(config, 'iterating'):
if 'builddependencies' in config.iterate_options:
if config.iterating:
deps += config.get('builddependencies', [])
else:
deps += config.get('builddependencies', [])
for dep in deps:
if isinstance(dep, dict):
dep_name, dep_version = dep['name'], dep['version']
# take into account dependencies marked as external modules,
# where name/version may have to be harvested from metadata available for that external module
if dep.get('external_module', False):
metadata = dep.get('external_module_metadata', {})
if dep_name is None:
# name is a list in metadata, just take first value (if any)
dep_name = metadata.get('name', [None])[0]
if dep_version is None:
# version is a list in metadata, just take first value (if any)
dep_version = metadata.get('version', [None])[0]
elif isinstance(dep, (list, tuple)):
dep_name, dep_version = dep[0], dep[1]
else:
raise EasyBuildError("Unexpected type for dependency: %s", dep)
if isinstance(dep_name, string_type) and dep_name.lower() == name.lower() and dep_version:
dep_version = pick_dep_version(dep_version)
template_values['%sver' % pref] = dep_version
dep_version_parts = dep_version.split('.')
template_values['%smajver' % pref] = dep_version_parts[0]
if len(dep_version_parts) > 1:
template_values['%sminver' % pref] = dep_version_parts[1]
template_values['%sshortver' % pref] = '.'.join(dep_version_parts[:2])
break
# step 3: add remaining from config
for name in TEMPLATE_NAMES_CONFIG:
if name in ignore:
continue
if name in config:
template_values[name] = config[name]
_log.debug('name: %s, config: %s', name, config[name])
# step 4. make lower variants
for name in TEMPLATE_NAMES_LOWER:
if name in ignore:
continue
value = config.get(name) or template_values.get(name)
if value is None:
continue
try:
template_values[TEMPLATE_NAMES_LOWER_TEMPLATE % {'name': name}] = value.lower()
except Exception:
_log.warning("Failed to get .lower() for name %s value %s (type %s)", name, value, type(value))
# step 5. add additional conditional templates
if toolchain is not None and hasattr(toolchain, 'mpi_cmd_prefix'):
try:
# get prefix for commands to be run with mpi runtime using default number of ranks
mpi_cmd_prefix = toolchain.mpi_cmd_prefix()
if mpi_cmd_prefix is not None:
template_values['mpi_cmd_prefix'] = mpi_cmd_prefix
except EasyBuildError as err:
# don't fail just because we couldn't resolve this template
_log.warning("Failed to create mpi_cmd_prefix template, error was:\n%s", err)
return template_values
| def template_constant_dict(config, ignore=None, skip_lower=None, toolchain=None):
"""Create a dict for templating the values in the easyconfigs.
- config is a dict with the structure of EasyConfig._config
"""
if skip_lower is not None:
_log.deprecated("Use of 'skip_lower' named argument for template_constant_dict has no effect anymore", '4.0')
# TODO find better name
# ignore
if ignore is None:
ignore = []
# make dict
template_values = {}
_log.debug("config: %s", config)
# set 'arch' for system architecture based on 'machine' (4th) element of platform.uname() return value
template_values['arch'] = platform.uname()[4]
# step 1: add TEMPLATE_NAMES_EASYCONFIG
for name in TEMPLATE_NAMES_EASYCONFIG:
if name in ignore:
continue
# check if this template name is already handled
if template_values.get(name[0]) is not None:
continue
if name[0].startswith('toolchain_'):
tc = config.get('toolchain')
if tc is not None:
template_values['toolchain_name'] = tc.get('name', None)
template_values['toolchain_version'] = tc.get('version', None)
# only go through this once
ignore.extend(['toolchain_name', 'toolchain_version'])
elif name[0].startswith('version_'):
# parse major and minor version numbers
version = config['version']
if version is not None:
_log.debug("version found in easyconfig is %s", version)
version = version.split('.')
try:
major = version[0]
template_values['version_major'] = major
minor = version[1]
template_values['version_minor'] = minor
template_values['version_major_minor'] = '.'.join([major, minor])
except IndexError:
# if there is no minor version, skip it
pass
# only go through this once
ignore.extend(['version_major', 'version_minor', 'version_major_minor'])
elif name[0].endswith('letter'):
# parse first letters
if name[0].startswith('name'):
softname = config['name']
if softname is not None:
template_values['nameletter'] = softname[0]
else:
raise EasyBuildError("Undefined name %s from TEMPLATE_NAMES_EASYCONFIG", name)
# step 2: define *ver and *shortver templates
for name, pref in TEMPLATE_SOFTWARE_VERSIONS:
# copy to avoid changing original list below
deps = copy.copy(config.get('dependencies', []))
deps += config.get('builddependencies', [])
for dep in deps:
if isinstance(dep, dict):
dep_name, dep_version = dep['name'], dep['version']
# take into account dependencies marked as external modules,
# where name/version may have to be harvested from metadata available for that external module
if dep.get('external_module', False):
metadata = dep.get('external_module_metadata', {})
if dep_name is None:
# name is a list in metadata, just take first value (if any)
dep_name = metadata.get('name', [None])[0]
if dep_version is None:
# version is a list in metadata, just take first value (if any)
dep_version = metadata.get('version', [None])[0]
elif isinstance(dep, (list, tuple)):
dep_name, dep_version = dep[0], dep[1]
else:
raise EasyBuildError("Unexpected type for dependency: %s", dep)
if isinstance(dep_name, string_type) and dep_name.lower() == name.lower() and dep_version:
dep_version = pick_dep_version(dep_version)
template_values['%sver' % pref] = dep_version
dep_version_parts = dep_version.split('.')
template_values['%smajver' % pref] = dep_version_parts[0]
if len(dep_version_parts) > 1:
template_values['%sminver' % pref] = dep_version_parts[1]
template_values['%sshortver' % pref] = '.'.join(dep_version_parts[:2])
break
# step 3: add remaining from config
for name in TEMPLATE_NAMES_CONFIG:
if name in ignore:
continue
if name in config:
template_values[name] = config[name]
_log.debug('name: %s, config: %s', name, config[name])
# step 4. make lower variants
for name in TEMPLATE_NAMES_LOWER:
if name in ignore:
continue
value = config.get(name) or template_values.get(name)
if value is None:
continue
try:
template_values[TEMPLATE_NAMES_LOWER_TEMPLATE % {'name': name}] = value.lower()
except Exception:
_log.warning("Failed to get .lower() for name %s value %s (type %s)", name, value, type(value))
# step 5. add additional conditional templates
if toolchain is not None and hasattr(toolchain, 'mpi_cmd_prefix'):
try:
# get prefix for commands to be run with mpi runtime using default number of ranks
mpi_cmd_prefix = toolchain.mpi_cmd_prefix()
if mpi_cmd_prefix is not None:
template_values['mpi_cmd_prefix'] = mpi_cmd_prefix
except EasyBuildError as err:
# don't fail just because we couldn't resolve this template
_log.warning("Failed to create mpi_cmd_prefix template, error was:\n%s", err)
return template_values
|
48,091 | def put_labels(frame, predictions):
"""
Coverts predictions to text labels and puts them to the top left corner of a frame.
"""
frame = frame.copy()
assert len(predictions) == 1
# TODO (ilya-krylov): handle multi-label classification
assert len(predictions[0].get_labels()) == 1
label = predictions[0].get_labels()[0]
color = tuple(getattr(label.color, x) for x in ("blue", "green", "red"))
put_text_on_rect_bg(frame, label.name, (0, 0), color=color)
return frame
| def put_labels(frame, predictions):
"""
Converts predictions to text labels and puts them to the top left corner of a frame.
"""
frame = frame.copy()
assert len(predictions) == 1
# TODO (ilya-krylov): handle multi-label classification
assert len(predictions[0].get_labels()) == 1
label = predictions[0].get_labels()[0]
color = tuple(getattr(label.color, x) for x in ("blue", "green", "red"))
put_text_on_rect_bg(frame, label.name, (0, 0), color=color)
return frame
|
37,383 | def initialize(self, params, qubits=None):
"""Qubit initializalition is done by appending instructions to the quantum circuit (by
calling Initialize(params)) and the qubits we wish to iniatilize. Note that the
qubits are first set to `|0>` and then the desired state is achieved by applying
a state preparing unitary.
Args:
params (str or list):
* list: vector of complex amplitudes to initialize to.
* string: labels of basis states of the Pauli eigenstates Z, X, Y. See
:meth:`~qiskit.quantum_info.states.statevector.Statevector.from_label`.
Notice the order of the labels is reversed with respect to the qubit index to
be applied to. Example label '01' initializes the qubit zero to `|1>` and the
qubit one to `|0>`.
qubits (QuantumRegister or int):
* QuantumRegister: A list of qubits to be initialized [Default: None].
* int: Index of qubit to initialzied [Default: None].
Returns:
qiskit.circuit.Instruction: a handle to the instruction that was just initialized
Examples:
Prepare a qubit in the anti-symmetric state 1/sqrt(2)(`|0>` - `|1>`).
.. code-block::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(1)
circuit.initialize([1/np.sqrt(2), -1/np.sqrt(2)], 0)
circuit.draw()
output:
┌──────────────────────────────┐
q_0: ┤ initialize(0.70711,-0.70711) ├
└──────────────────────────────┘
Initialize from a string two qubits in the state `|10>`.
The order of the labels is reversed with respect to qubit index.
More information about labels for basis states are in
:meth:`~qiskit.quantum_info.states.statevector.Statevector.from_label`.
.. code-block::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
circuit.initialize('01', circuit.qubits)
circuit.draw()
output:
┌──────────────────┐
q_0: ┤0 ├
│ initialize(0,1) │
q_1: ┤1 ├
└──────────────────┘
Initialize two qubits from an array of complex amplitudes
.. code-block::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
circuit.initialize([0, 1/np.sqrt(2), -1.j/np.sqrt(2), 0], circuit.qubits)
circuit.draw()
output:
┌────────────────────────────────────┐
q_0: ┤0 ├
│ initialize(0,0.70711,-0.70711j,0) │
q_1: ┤1 ├
└────────────────────────────────────┘
"""
if qubits is None:
qubits = self.qubits
else:
if isinstance(qubits, int):
qubits = [qubits]
qubits = self._bit_argument_conversion(qubits, self.qubits)
num_qubits = None if not isinstance(params, int) else len(qubits)
return self.append(Initialize(params, num_qubits), qubits)
| def initialize(self, params, qubits=None):
"""Qubit initializalition is done by appending instructions to the quantum circuit (by
calling Initialize(params)) and the qubits we wish to initialize. Note that the
qubits are first set to `|0>` and then the desired state is achieved by applying
a state preparing unitary.
Args:
params (str or list):
* list: vector of complex amplitudes to initialize to.
* string: labels of basis states of the Pauli eigenstates Z, X, Y. See
:meth:`~qiskit.quantum_info.states.statevector.Statevector.from_label`.
Notice the order of the labels is reversed with respect to the qubit index to
be applied to. Example label '01' initializes the qubit zero to `|1>` and the
qubit one to `|0>`.
qubits (QuantumRegister or int):
* QuantumRegister: A list of qubits to be initialized [Default: None].
* int: Index of qubit to initialzied [Default: None].
Returns:
qiskit.circuit.Instruction: a handle to the instruction that was just initialized
Examples:
Prepare a qubit in the anti-symmetric state 1/sqrt(2)(`|0>` - `|1>`).
.. code-block::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(1)
circuit.initialize([1/np.sqrt(2), -1/np.sqrt(2)], 0)
circuit.draw()
output:
┌──────────────────────────────┐
q_0: ┤ initialize(0.70711,-0.70711) ├
└──────────────────────────────┘
Initialize from a string two qubits in the state `|10>`.
The order of the labels is reversed with respect to qubit index.
More information about labels for basis states are in
:meth:`~qiskit.quantum_info.states.statevector.Statevector.from_label`.
.. code-block::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
circuit.initialize('01', circuit.qubits)
circuit.draw()
output:
┌──────────────────┐
q_0: ┤0 ├
│ initialize(0,1) │
q_1: ┤1 ├
└──────────────────┘
Initialize two qubits from an array of complex amplitudes
.. code-block::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
circuit.initialize([0, 1/np.sqrt(2), -1.j/np.sqrt(2), 0], circuit.qubits)
circuit.draw()
output:
┌────────────────────────────────────┐
q_0: ┤0 ├
│ initialize(0,0.70711,-0.70711j,0) │
q_1: ┤1 ├
└────────────────────────────────────┘
"""
if qubits is None:
qubits = self.qubits
else:
if isinstance(qubits, int):
qubits = [qubits]
qubits = self._bit_argument_conversion(qubits, self.qubits)
num_qubits = None if not isinstance(params, int) else len(qubits)
return self.append(Initialize(params, num_qubits), qubits)
|
32,080 | def main():
if not TOKEN:
raise Exception('api token must be provided.')
handle_proxy()
command = demisto.command()
LOG(f'Command being called is {command}')
# should raise error in case of issue
if command == 'fetch-incidents':
demisto.incidents(fetch_incidents())
else:
try:
if command == 'test-module':
# This is the call made when pressing the integration test button.
test_module()
demisto.results('ok')
elif command == 'cbp-fileCatalog-search':
search_file_catalog_command()
elif command == 'cbp-computer-search':
search_computer_command()
elif command == 'cbp-computer-update':
update_computer_command()
elif command == 'cbp-fileInstance-search':
search_file_instance_command()
elif command == 'cbp-event-search':
search_event_command()
elif command == 'cbp-approvalRequest-search':
search_approval_request_command()
elif command == 'cbp-fileRule-search':
search_file_rule_command()
elif command == 'cbp-fileRule-get':
get_file_rule_command()
elif command == 'cbp-fileRule-delete':
delete_file_rule_command()
elif command in ('cbp-fileRule-update', 'cbp-fileRule-createOrUpdate'):
update_file_rule_command()
elif command == 'cbp-policy-search':
search_policy_command()
elif command == 'cbp-serverConfig-search':
search_server_config_command()
elif command == 'cbp-publisher-search':
search_publisher_command()
elif command == 'cbp-fileAnalysis-search':
search_file_analysis_command()
elif command == 'cbp-fileAnalysis-get':
get_file_analysis_command()
elif command == 'cbp-fileAnalysis-createOrUpdate':
update_file_analysis_command()
elif command == 'cbp-fileUpload-createOrUpdate':
update_file_upload_command()
elif command == 'cbp-fileUpload-download':
download_file_upload_command()
elif command == 'cbp-fileUpload-search':
search_file_upload_command()
elif command == 'cbp-fileUpload-get':
get_file_upload_command()
elif command == 'cbp-computer-get':
get_computer_command()
elif command == 'cbp-connector-get':
get_connector_command()
elif command == 'cbp-connector-search':
search_connector_command()
elif command == 'cbp-approvalRequest-resolve':
resolve_approval_request_command()
else:
return_error(f"Command {command} is not supported.")
# Log exceptions
except Exception as e:
return_error(str(e))
| def main():
if not TOKEN:
raise Exception('API Token must be provided.')
handle_proxy()
command = demisto.command()
LOG(f'Command being called is {command}')
# should raise error in case of issue
if command == 'fetch-incidents':
demisto.incidents(fetch_incidents())
else:
try:
if command == 'test-module':
# This is the call made when pressing the integration test button.
test_module()
demisto.results('ok')
elif command == 'cbp-fileCatalog-search':
search_file_catalog_command()
elif command == 'cbp-computer-search':
search_computer_command()
elif command == 'cbp-computer-update':
update_computer_command()
elif command == 'cbp-fileInstance-search':
search_file_instance_command()
elif command == 'cbp-event-search':
search_event_command()
elif command == 'cbp-approvalRequest-search':
search_approval_request_command()
elif command == 'cbp-fileRule-search':
search_file_rule_command()
elif command == 'cbp-fileRule-get':
get_file_rule_command()
elif command == 'cbp-fileRule-delete':
delete_file_rule_command()
elif command in ('cbp-fileRule-update', 'cbp-fileRule-createOrUpdate'):
update_file_rule_command()
elif command == 'cbp-policy-search':
search_policy_command()
elif command == 'cbp-serverConfig-search':
search_server_config_command()
elif command == 'cbp-publisher-search':
search_publisher_command()
elif command == 'cbp-fileAnalysis-search':
search_file_analysis_command()
elif command == 'cbp-fileAnalysis-get':
get_file_analysis_command()
elif command == 'cbp-fileAnalysis-createOrUpdate':
update_file_analysis_command()
elif command == 'cbp-fileUpload-createOrUpdate':
update_file_upload_command()
elif command == 'cbp-fileUpload-download':
download_file_upload_command()
elif command == 'cbp-fileUpload-search':
search_file_upload_command()
elif command == 'cbp-fileUpload-get':
get_file_upload_command()
elif command == 'cbp-computer-get':
get_computer_command()
elif command == 'cbp-connector-get':
get_connector_command()
elif command == 'cbp-connector-search':
search_connector_command()
elif command == 'cbp-approvalRequest-resolve':
resolve_approval_request_command()
else:
return_error(f"Command {command} is not supported.")
# Log exceptions
except Exception as e:
return_error(str(e))
|
3,926 | def lexicographical_topological_sort(G, key=None):
"""Generates a unique ordering of nodes by first sorting topologically (for which there are often
multiple valid orderings) and then additionally by sorting lexicographically.
A topological sort arranges the nodes of a directed graph so that the
upstream node of each directed edge precedes the downstream node.
It is always possible to find a solution for directed graphs that have no cycles.
There may be more than one valid solution.
Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the
topological sort and to determine a single, unique ordering. This can be useful in comparing
sort results.
The lexicographical order can be customized by providing a function to the `key=` parameter.
The definition of the key function is the same as used in python's built-in `sort()`.
The function takes a single argument and returns a key to use for sorting purposes.
Lexicographical sorting can fail if the node names are un-sortable. See the example below.
The solution is to provide a function to the `key=` argument that returns sortable keys.
Parameters
----------
G : NetworkX digraph
A directed acyclic graph (DAG)
key : function, optional
A function of one argument that converts a node name to a comparison key.
Use to resolve ambiguities in the sort order. Defaults to the identity function.
Yields
------
nodes
Yields the nodes of G in lexicographical topological sort order.
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the graph `G`
is undirected, a :exc:`NetworkXError` is raised.
NetworkXUnfeasible
If `G` is not a directed acyclic graph (DAG) no topological sort exists
and a :exc:`NetworkXUnfeasible` exception is raised. This can also be
raised if `G` is changed while the returned iterator is being processed
RuntimeError
If `G` is changed while the returned iterator is being processed.
TypeError
Results from un-sortable node names.
Consider using `key=` parameter to resolve ambiguities in the sort order.
Examples
--------
>>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)])
>>> list(nx.lexicographical_topological_sort(DG))
[2, 1, 3, 5, 4]
>>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x))
[2, 5, 1, 4, 3]
The sort will fail for this graph because the comparison of integers to strings
is not defined in python. Is 3 greater or less than 'red'?
>>> DG = nx.DiGraph([(1, 'red'), (3, 'red'), (1, 'green'), (2, 'blue')])
>>> list(nx.lexicographical_topological_sort(DG))
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
...
The solution is to provide a function that returns keys that do compare.
There are many ways to write a `key` function. This one returns a tuple where the first
element is True for `str`, False otherwise. The second element is the node name.
This groups the strings and integers separately so they can be compared only among themselves.
>>> key = lambda node: (isinstance(node, str), node)
>>> list(nx.lexicographical_topological_sort(DG, key=key))
[1, 2, 3, 'blue', 'green', 'red']
Notes
-----
This algorithm is based on a description and proof in
"Introduction to Algorithms: A Creative Approach" [1]_ .
See also
--------
topological_sort
References
----------
.. [1] Manber, U. (1989).
*Introduction to Algorithms - A Creative Approach.* Addison-Wesley.
"""
if not G.is_directed():
msg = "Topological sort not defined on undirected graphs."
raise nx.NetworkXError(msg)
if key is None:
def key(node):
return node
nodeid_map = {n: i for i, n in enumerate(G)}
def create_tuple(node):
return key(node), nodeid_map[node], node
indegree_map = {v: d for v, d in G.in_degree() if d > 0}
# These nodes have zero indegree and ready to be returned.
zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0]
heapq.heapify(zero_indegree)
while zero_indegree:
_, _, node = heapq.heappop(zero_indegree)
if node not in G:
raise RuntimeError("Graph changed during iteration")
for _, child in G.edges(node):
try:
indegree_map[child] -= 1
except KeyError as err:
raise RuntimeError("Graph changed during iteration") from err
if indegree_map[child] == 0:
try:
heapq.heappush(zero_indegree, create_tuple(child))
except TypeError as err:
raise TypeError(
f"{err}\nConsider using `key=` parameter to resolve ambiguities in the sort order."
)
del indegree_map[child]
yield node
if indegree_map:
msg = "Graph contains a cycle or graph changed during iteration"
raise nx.NetworkXUnfeasible(msg)
| def lexicographical_topological_sort(G, key=None):
"""Generates a unique ordering of nodes by first sorting topologically (for which there are often
multiple valid orderings) and then additionally by sorting lexicographically.
A topological sort arranges the nodes of a directed graph so that the
upstream node of each directed edge precedes the downstream node.
It is always possible to find a solution for directed graphs that have no cycles.
There may be more than one valid solution.
Lexicographical sorting is just sorting alphabetically. It is used here to break ties in the
topological sort and to determine a single, unique ordering. This can be useful in comparing
sort results.
The lexicographical order can be customized by providing a function to the `key=` parameter.
The definition of the key function is the same as used in python's built-in `sort()`.
The function takes a single argument and returns a key to use for sorting purposes.
Lexicographical sorting can fail if the node names are un-sortable. See the example below.
The solution is to provide a function to the `key=` argument that returns sortable keys.
Parameters
----------
G : NetworkX digraph
A directed acyclic graph (DAG)
key : function, optional
A function of one argument that converts a node name to a comparison key.
It defines and resolves ambiguities in the sort order. Defaults to the identity function.
Yields
------
nodes
Yields the nodes of G in lexicographical topological sort order.
Raises
------
NetworkXError
Topological sort is defined for directed graphs only. If the graph `G`
is undirected, a :exc:`NetworkXError` is raised.
NetworkXUnfeasible
If `G` is not a directed acyclic graph (DAG) no topological sort exists
and a :exc:`NetworkXUnfeasible` exception is raised. This can also be
raised if `G` is changed while the returned iterator is being processed
RuntimeError
If `G` is changed while the returned iterator is being processed.
TypeError
Results from un-sortable node names.
Consider using `key=` parameter to resolve ambiguities in the sort order.
Examples
--------
>>> DG = nx.DiGraph([(2, 1), (2, 5), (1, 3), (1, 4), (5, 4)])
>>> list(nx.lexicographical_topological_sort(DG))
[2, 1, 3, 5, 4]
>>> list(nx.lexicographical_topological_sort(DG, key=lambda x: -x))
[2, 5, 1, 4, 3]
The sort will fail for this graph because the comparison of integers to strings
is not defined in python. Is 3 greater or less than 'red'?
>>> DG = nx.DiGraph([(1, 'red'), (3, 'red'), (1, 'green'), (2, 'blue')])
>>> list(nx.lexicographical_topological_sort(DG))
Traceback (most recent call last):
...
TypeError: '<' not supported between instances of 'str' and 'int'
...
The solution is to provide a function that returns keys that do compare.
There are many ways to write a `key` function. This one returns a tuple where the first
element is True for `str`, False otherwise. The second element is the node name.
This groups the strings and integers separately so they can be compared only among themselves.
>>> key = lambda node: (isinstance(node, str), node)
>>> list(nx.lexicographical_topological_sort(DG, key=key))
[1, 2, 3, 'blue', 'green', 'red']
Notes
-----
This algorithm is based on a description and proof in
"Introduction to Algorithms: A Creative Approach" [1]_ .
See also
--------
topological_sort
References
----------
.. [1] Manber, U. (1989).
*Introduction to Algorithms - A Creative Approach.* Addison-Wesley.
"""
if not G.is_directed():
msg = "Topological sort not defined on undirected graphs."
raise nx.NetworkXError(msg)
if key is None:
def key(node):
return node
nodeid_map = {n: i for i, n in enumerate(G)}
def create_tuple(node):
return key(node), nodeid_map[node], node
indegree_map = {v: d for v, d in G.in_degree() if d > 0}
# These nodes have zero indegree and ready to be returned.
zero_indegree = [create_tuple(v) for v, d in G.in_degree() if d == 0]
heapq.heapify(zero_indegree)
while zero_indegree:
_, _, node = heapq.heappop(zero_indegree)
if node not in G:
raise RuntimeError("Graph changed during iteration")
for _, child in G.edges(node):
try:
indegree_map[child] -= 1
except KeyError as err:
raise RuntimeError("Graph changed during iteration") from err
if indegree_map[child] == 0:
try:
heapq.heappush(zero_indegree, create_tuple(child))
except TypeError as err:
raise TypeError(
f"{err}\nConsider using `key=` parameter to resolve ambiguities in the sort order."
)
del indegree_map[child]
yield node
if indegree_map:
msg = "Graph contains a cycle or graph changed during iteration"
raise nx.NetworkXUnfeasible(msg)
|
56,224 | def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
mask_rcnn_model_xml = args.mask_rcnn_model
mask_rcnn_model_bin = os.path.splitext(mask_rcnn_model_xml)[0] + '.bin'
text_enc_model_xml = args.text_enc_model
text_enc_model_bin = os.path.splitext(text_enc_model_xml)[0] + '.bin'
text_dec_model_xml = args.text_dec_model
text_dec_model_bin = os.path.splitext(text_dec_model_xml)[0] + '.bin'
# Plugin initialization for specified device and load extensions library if specified.
log.info('Creating Inference Engine...')
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, 'CPU')
# Read IR
log.info('Loading network files:\n\t{}\n\t{}'.format(mask_rcnn_model_xml, mask_rcnn_model_bin))
mask_rcnn_net = IENetwork(model=mask_rcnn_model_xml, weights=mask_rcnn_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_enc_model_xml, text_enc_model_bin))
text_enc_net = IENetwork(model=text_enc_model_xml, weights=text_enc_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_dec_model_xml, text_dec_model_bin))
text_dec_net = IENetwork(model=text_dec_model_xml, weights=text_dec_model_bin)
if 'CPU' in args.device:
supported_layers = ie.query_network(mask_rcnn_net, 'CPU')
not_supported_layers = [l for l in mask_rcnn_net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error('Following layers are not supported by the plugin for specified device {}:\n {}'.
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
required_input_keys = {'im_data', 'im_info'}
assert required_input_keys == set(mask_rcnn_net.inputs.keys()), \
'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys))
required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'}
assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \
'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys))
n, c, h, w = mask_rcnn_net.inputs['im_data'].shape
assert n == 1, 'Only batch 1 is supported by the demo application'
log.info('Loading IR to the plugin...')
mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2)
text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device)
text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device)
for name, input in text_dec_exec_net.inputs.items():
if len(input.shape) == 3:
if input.shape[1] == 1:
trd_input_prev_hidden = name
else:
trd_input_encoder_outputs = name
elif len(input.shape) == 1:
trd_input_prev_symbol = name
for name, output in text_dec_exec_net.outputs.items():
if len(output.shape) == 3:
trd_output_cur_hidden = name
elif len(output.shape) == 2:
trd_output_symbols_distr = name
hidden_shape = text_dec_net.inputs[trd_input_prev_hidden].shape
del mask_rcnn_net
del text_enc_net
del text_dec_net
try:
input_source = int(args.input_source)
except ValueError:
input_source = args.input_source
if os.path.isdir(input_source):
cap = FolderCapture(input_source)
else:
cap = cv2.VideoCapture(input_source)
if not cap.isOpened():
log.error('Failed to open "{}"'.format(args.input_source))
if isinstance(cap, cv2.VideoCapture):
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if args.no_track:
tracker = None
else:
tracker = StaticIOUTracker()
visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores)
render_time = 0
log.info('Starting inference...')
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if not args.keep_aspect_ratio:
# Resize the image to a target size.
scale_x = w / frame.shape[1]
scale_y = h / frame.shape[0]
input_image = cv2.resize(frame, (w, h))
else:
# Resize the image to keep the same aspect ratio and to fit it to a window of a target size.
scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])
input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)
input_image_size = input_image.shape[:2]
input_image = np.pad(input_image, ((0, h - input_image_size[0]),
(0, w - input_image_size[1]),
(0, 0)),
mode='constant', constant_values=0)
# Change data layout from HWC to CHW.
input_image = input_image.transpose((2, 0, 1))
input_image = input_image.reshape((n, c, h, w)).astype(np.float32)
input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)
# Run the net.
inf_start = time.time()
outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info})
# Parse detection results of the current request
boxes = outputs['boxes']
scores = outputs['scores']
classes = outputs['classes'].astype(np.uint32)
raw_masks = outputs['raw_masks']
text_features = outputs['text_features']
# Filter out detections with low confidence.
detections_filter = scores > args.prob_threshold
scores = scores[detections_filter]
classes = classes[detections_filter]
boxes = boxes[detections_filter]
raw_masks = raw_masks[detections_filter]
text_features = text_features[detections_filter]
boxes[:, 0::2] /= scale_x
boxes[:, 1::2] /= scale_y
masks = []
for box, cls, raw_mask in zip(boxes, classes, raw_masks):
raw_cls_mask = raw_mask[cls, ...]
mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1])
masks.append(mask)
texts = []
for feature in text_features:
feature = text_enc_exec_net.infer({'input': feature})
feature = list(feature.values())[0]
feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1))
feature = np.transpose(feature, (0, 2, 1))
hidden = np.zeros(hidden_shape)
prev_symbol_index = np.ones((1,)) * SOS_INDEX
text = ''
for i in range(MAX_SEQ_LEN):
decoder_output = text_dec_exec_net.infer({
trd_input_prev_symbol: prev_symbol_index,
trd_input_prev_hidden: hidden,
trd_input_encoder_outputs: feature})
symbols_distr = decoder_output[trd_output_symbols_distr]
prev_symbol_index = int(np.argmax(symbols_distr, axis=1))
if prev_symbol_index == EOS_INDEX:
break
text += args.alphabet[prev_symbol_index]
hidden = decoder_output[trd_output_cur_hidden]
texts.append(text)
inf_end = time.time()
inf_time = inf_end - inf_start
render_start = time.time()
if len(boxes) and args.raw_output_message:
log.info('Detected boxes:')
log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ')
for box, cls, score, mask in zip(boxes, classes, scores, masks):
log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box))
# Get instance track IDs.
masks_tracks_ids = None
if tracker is not None:
masks_tracks_ids = tracker(masks, classes)
# Visualize masks.
frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids)
# Draw performance stats.
inf_time_message = 'Inference and post-processing time: {:.3f} ms'.format(inf_time * 1000)
render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000)
cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
# Print performance counters.
if args.perf_counts:
perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts()
log.info('Performance counters:')
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status',
'real_time, us'))
for layer, stats in perf_counts.items():
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
if not args.no_show:
# Show resulting image.
cv2.imshow('Results', frame)
render_end = time.time()
render_time = render_end - render_start
if not args.no_show:
key = cv2.waitKey(args.delay)
esc_code = 27
if key == esc_code:
break
cv2.destroyAllWindows()
cap.release()
| def main():
log.basicConfig(format='[ %(levelname)s ] %(message)s', level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
mask_rcnn_model_xml = args.mask_rcnn_model
mask_rcnn_model_bin = os.path.splitext(mask_rcnn_model_xml)[0] + '.bin'
text_enc_model_xml = args.text_enc_model
text_enc_model_bin = os.path.splitext(text_enc_model_xml)[0] + '.bin'
text_dec_model_xml = args.text_dec_model
text_dec_model_bin = os.path.splitext(text_dec_model_xml)[0] + '.bin'
# Plugin initialization for specified device and load extensions library if specified.
log.info('Creating Inference Engine...')
ie = IECore()
if args.cpu_extension and 'CPU' in args.device:
ie.add_extension(args.cpu_extension, 'CPU')
# Read IR
log.info('Loading network files:\n\t{}\n\t{}'.format(mask_rcnn_model_xml, mask_rcnn_model_bin))
mask_rcnn_net = IENetwork(model=mask_rcnn_model_xml, weights=mask_rcnn_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_enc_model_xml, text_enc_model_bin))
text_enc_net = IENetwork(model=text_enc_model_xml, weights=text_enc_model_bin)
log.info('Loading network files:\n\t{}\n\t{}'.format(text_dec_model_xml, text_dec_model_bin))
text_dec_net = IENetwork(model=text_dec_model_xml, weights=text_dec_model_bin)
if 'CPU' in args.device:
supported_layers = ie.query_network(mask_rcnn_net, 'CPU')
not_supported_layers = [l for l in mask_rcnn_net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
log.error('Following layers are not supported by the plugin for specified device {}:\n {}'.
format(args.device, ', '.join(not_supported_layers)))
log.error("Please try to specify cpu extensions library path in sample's command line parameters using -l "
"or --cpu_extension command line argument")
sys.exit(1)
required_input_keys = {'im_data', 'im_info'}
assert required_input_keys == set(mask_rcnn_net.inputs.keys()), \
'Demo supports only topologies with the following input keys: {}'.format(', '.join(required_input_keys))
required_output_keys = {'boxes', 'scores', 'classes', 'raw_masks', 'text_features'}
assert required_output_keys.issubset(mask_rcnn_net.outputs.keys()), \
'Demo supports only topologies with the following output keys: {}'.format(', '.join(required_output_keys))
n, c, h, w = mask_rcnn_net.inputs['im_data'].shape
assert n == 1, 'Only batch 1 is supported by the demo application'
log.info('Loading IR to the plugin...')
mask_rcnn_exec_net = ie.load_network(network=mask_rcnn_net, device_name=args.device, num_requests=2)
text_enc_exec_net = ie.load_network(network=text_enc_net, device_name=args.device)
text_dec_exec_net = ie.load_network(network=text_dec_net, device_name=args.device)
for name, input in text_dec_exec_net.inputs.items():
if len(input.shape) == 3:
if input.shape[1] == 1:
trd_input_prev_hidden = name
else:
trd_input_encoder_outputs = name
elif len(input.shape) == 1:
trd_input_prev_symbol = name
for name, output in text_dec_exec_net.outputs.items():
if len(output.shape) == 3:
trd_output_cur_hidden = name
elif len(output.shape) == 2:
trd_output_symbols_distr = name
hidden_shape = text_dec_net.inputs[trd_input_prev_hidden].shape
del mask_rcnn_net
del text_enc_net
del text_dec_net
try:
input_source = int(args.input_source)
except ValueError:
input_source = args.input_source
if os.path.isdir(input_source):
cap = FolderCapture(input_source)
else:
cap = cv2.VideoCapture(input_source)
if not cap.isOpened():
log.error('Failed to open "{}"'.format(args.input_source))
if isinstance(cap, cv2.VideoCapture):
cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
if args.no_track:
tracker = None
else:
tracker = StaticIOUTracker()
visualizer = Visualizer(['__background__', 'text'], show_boxes=args.show_boxes, show_scores=args.show_scores)
render_time = 0
log.info('Starting inference...')
print("To close the application, press 'CTRL+C' here or switch to the output window and press ESC key")
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
if not args.keep_aspect_ratio:
# Resize the image to a target size.
scale_x = w / frame.shape[1]
scale_y = h / frame.shape[0]
input_image = cv2.resize(frame, (w, h))
else:
# Resize the image to keep the same aspect ratio and to fit it to a window of a target size.
scale_x = scale_y = min(h / frame.shape[0], w / frame.shape[1])
input_image = cv2.resize(frame, None, fx=scale_x, fy=scale_y)
input_image_size = input_image.shape[:2]
input_image = np.pad(input_image, ((0, h - input_image_size[0]),
(0, w - input_image_size[1]),
(0, 0)),
mode='constant', constant_values=0)
# Change data layout from HWC to CHW.
input_image = input_image.transpose((2, 0, 1))
input_image = input_image.reshape((n, c, h, w)).astype(np.float32)
input_image_info = np.asarray([[input_image_size[0], input_image_size[1], 1]], dtype=np.float32)
# Run the net.
inf_start = time.time()
outputs = mask_rcnn_exec_net.infer({'im_data': input_image, 'im_info': input_image_info})
# Parse detection results of the current request
boxes = outputs['boxes']
scores = outputs['scores']
classes = outputs['classes'].astype(np.uint32)
raw_masks = outputs['raw_masks']
text_features = outputs['text_features']
# Filter out detections with low confidence.
detections_filter = scores > args.prob_threshold
scores = scores[detections_filter]
classes = classes[detections_filter]
boxes = boxes[detections_filter]
raw_masks = raw_masks[detections_filter]
text_features = text_features[detections_filter]
boxes[:, 0::2] /= scale_x
boxes[:, 1::2] /= scale_y
masks = []
for box, cls, raw_mask in zip(boxes, classes, raw_masks):
raw_cls_mask = raw_mask[cls, ...]
mask = segm_postprocess(box, raw_cls_mask, frame.shape[0], frame.shape[1])
masks.append(mask)
texts = []
for feature in text_features:
feature = text_enc_exec_net.infer({'input': feature})
feature = list(feature.values())[0]
feature = np.reshape(feature, (feature.shape[0], feature.shape[1], -1))
feature = np.transpose(feature, (0, 2, 1))
hidden = np.zeros(hidden_shape)
prev_symbol_index = np.ones((1,)) * SOS_INDEX
text = ''
for i in range(MAX_SEQ_LEN):
decoder_output = text_dec_exec_net.infer({
trd_input_prev_symbol: prev_symbol_index,
trd_input_prev_hidden: hidden,
trd_input_encoder_outputs: feature})
symbols_distr = decoder_output[trd_output_symbols_distr]
prev_symbol_index = int(np.argmax(symbols_distr, axis=1))
if prev_symbol_index == EOS_INDEX:
break
text += args.alphabet[prev_symbol_index]
hidden = decoder_output[trd_output_cur_hidden]
texts.append(text)
inf_end = time.time()
inf_time = inf_end - inf_start
render_start = time.time()
if len(boxes) and args.raw_output_message:
log.info('Detected boxes:')
log.info(' Class ID | Confidence | XMIN | YMIN | XMAX | YMAX ')
for box, cls, score, mask in zip(boxes, classes, scores, masks):
log.info('{:>10} | {:>10f} | {:>8.2f} | {:>8.2f} | {:>8.2f} | {:>8.2f} '.format(cls, score, *box))
# Get instance track IDs.
masks_tracks_ids = None
if tracker is not None:
masks_tracks_ids = tracker(masks, classes)
# Visualize masks.
frame = visualizer(frame, boxes, classes, scores, masks, texts, masks_tracks_ids)
# Draw performance stats.
inf_time_message = 'Inference and post-processing time: {:.3f} ms'.format(inf_time * 1000)
render_time_message = 'OpenCV rendering time: {:.3f} ms'.format(render_time * 1000)
cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
# Print performance counters.
if args.perf_counts:
perf_counts = mask_rcnn_exec_net.requests[0].get_perf_counts()
log.info('Performance counters:')
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format('name', 'layer_type', 'exet_type', 'status',
'real_time, us'))
for layer, stats in perf_counts.items():
print('{:<70} {:<15} {:<15} {:<15} {:<10}'.format(layer, stats['layer_type'], stats['exec_type'],
stats['status'], stats['real_time']))
if not args.no_show:
# Show resulting image.
cv2.imshow('Results', frame)
render_end = time.time()
render_time = render_end - render_start
if not args.no_show:
key = cv2.waitKey(args.delay)
esc_code = 27
if key == esc_code:
break
cv2.destroyAllWindows()
cap.release()
|
55,062 | def sparse_hamiltonian(H):
r"""Computes the sparse matrix representation a Hamiltonian in the computational basis.
Args:
H (~.Hamiltonian): Hamiltonian operator for which the matrix representation should be
measured
Returns:
coo_matrix: a sparse matrix in scipy COOrdinate format with the dimension of
:math:`(2^n, 2^n)` where :math:`n` is the number of wires
**Example:**
This function can be used by passing a `qml.Hamiltonian` object as:
>>> coeffs = [0.5, 0.5]
>>> obs = [qml.PauliZ(wires=[0]) @ qml.PauliZ(wires=[1]),
... qml.Identity(wires=[0]) @ qml.PauliZ(wires=[1])]
>>> H = qml.Hamiltonian(coeffs, obs)
>>> H_sparse = sparse_hamiltonian(H)
The resulting sparse matrix can be either used directly or transformed into a numpy array:
>>> H_sparse.toarray()
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
if not isinstance(H, qml.Hamiltonian):
raise TypeError("Passed Hamiltonian must be of type `qml.Hamiltonian`")
n = len(H.wires)
matrix = scipy.sparse.coo_matrix((2 ** n, 2 ** n), dtype="complex128")
for coeffs, ops in zip(H.coeffs, H.ops):
obs = [scipy.sparse.coo_matrix(o.matrix) for o in ops.obs]
mat = [scipy.sparse.eye(2, format="coo")] * n
for i, j in enumerate(ops.wires):
mat[j] = obs[i]
matrix += functools.reduce(lambda i, j: scipy.sparse.kron(i, j, format="coo"), mat) * coeffs
return matrix.tocoo()
| def sparse_hamiltonian(H):
r"""Computes the sparse matrix representation a Hamiltonian in the computational basis.
Args:
H (~.Hamiltonian): Hamiltonian operator for which the matrix representation should be
measured
Returns:
coo_matrix: a sparse matrix in scipy COOrdinate format with the dimension of
:math:`(2^n, 2^n)` where :math:`n` is the number of wires
**Example:**
This function can be used by passing a `qml.Hamiltonian` object as:
>>> coeffs = [0.5, 0.5]
>>> obs = [qml.PauliZ(0) @ qml.PauliZ(1), qml.PauliZ(1)]
>>> H = qml.Hamiltonian(coeffs, obs)
>>> H_sparse = sparse_hamiltonian(H)
The resulting sparse matrix can be either used directly or transformed into a numpy array:
>>> H_sparse.toarray()
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, -1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j]])
"""
if not isinstance(H, qml.Hamiltonian):
raise TypeError("Passed Hamiltonian must be of type `qml.Hamiltonian`")
n = len(H.wires)
matrix = scipy.sparse.coo_matrix((2 ** n, 2 ** n), dtype="complex128")
for coeffs, ops in zip(H.coeffs, H.ops):
obs = [scipy.sparse.coo_matrix(o.matrix) for o in ops.obs]
mat = [scipy.sparse.eye(2, format="coo")] * n
for i, j in enumerate(ops.wires):
mat[j] = obs[i]
matrix += functools.reduce(lambda i, j: scipy.sparse.kron(i, j, format="coo"), mat) * coeffs
return matrix.tocoo()
|
50,229 | def test_match_orders_with_new_user(customer_user):
def get_order_user_by_customer_email(email):
return Order.objects.filter(user_email=email).first().user
customer_email = customer_user.email
address = customer_user.default_billing_address.get_copy()
Order.objects.create(
billing_address=address, user=None, user_email=customer_email,
)
assert get_order_user_by_customer_email(customer_email) is None
match_orders_with_new_user(customer_user)
assert get_order_user_by_customer_email(customer_email) == customer_user
| def test_match_orders_with_new_user(customer_user):
def get_order_user_by_customer_email(email):
return Order.objects.filter(user_email=email).first().user
customer_email = customer_user.email
address = customer_user.default_billing_address.get_copy()
order = Order.objects.create(
billing_address=address, user=None, user_email=customer_email,
)
match_orders_with_new_user(customer_user)
order.refresh_from_db()
assert order.user == customer_user
|
5,795 | def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
import numpy as np
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent.astype(np.uint8)
| def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
import numpy as np
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = np.array(pickle.load(f), dtype=np.uint8)
return ascent.astype(np.uint8)
|
35,901 | def test_defaults_for_casper_votes(sample_slashable_vote_data_params):
votes = SlashableVoteData(**sample_slashable_vote_data_params)
assert (votes.aggregate_signature_poc_0_indices ==
sample_slashable_vote_data_params['aggregate_signature_poc_0_indices'])
assert (votes.aggregate_signature_poc_1_indices ==
sample_slashable_vote_data_params['aggregate_signature_poc_1_indices'])
assert votes.data == sample_slashable_vote_data_params['data']
assert votes.aggregate_signature == sample_slashable_vote_data_params['aggregate_signature']
| def test_defaults_for_slashable_vote_data(sample_slashable_vote_data_params):
votes = SlashableVoteData(**sample_slashable_vote_data_params)
assert (votes.aggregate_signature_poc_0_indices ==
sample_slashable_vote_data_params['aggregate_signature_poc_0_indices'])
assert (votes.aggregate_signature_poc_1_indices ==
sample_slashable_vote_data_params['aggregate_signature_poc_1_indices'])
assert votes.data == sample_slashable_vote_data_params['data']
assert votes.aggregate_signature == sample_slashable_vote_data_params['aggregate_signature']
|
41,035 | def selcomps(seldict, comptable, mmix, manacc, n_echos):
"""
Classify components in seldict as "accepted," "rejected," or "ignored."
The selection process uses previously calculated parameters listed in `seldict`
for each ICA component such as Kappa (a T2* weighting metric), Rho (an S0 weighting metric),
and variance explained. See `Notes` for additional calculated metrics used to
classify each component into one of the four listed groups.
Parameters
----------
seldict : :obj:`dict`
A dictionary with component-specific features used for classification.
As output from `fitmodels_direct`
comptable : (C x 5) :obj:`pandas.DataFrame`
Component metric table
mmix : (T x C) array_like
Mixing matrix for converting input data to component space, where `C`
is components and `T` is the number of volumes in the original data
manacc : :obj:`list`
Comma-separated list of indices of manually accepted components
n_echos : :obj:`int`
Number of echos in original data
Returns
-------
comptable : :obj:`pandas.DataFrame`
Updated component table with additional metrics and with
classification (accepted, rejected, or ignored)
Notes
-----
The selection algorithm used in this function was originated in ME-ICA
by Prantik Kundu, and his original implementation is available at:
https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py
This component selection process uses multiple, previously calculated metrics that include:
kappa, rho, variance explained, component spatial weighting maps, noise and spatial
frequency metrics, and measures of spatial overlap across metrics.
Prantik began to update these selection criteria to use SVMs to
distinguish components, a hypercommented version of this attempt is available at:
https://gist.github.com/emdupre/ca92d52d345d08ee85e104093b81482e
"""
cols_at_end = ['classification', 'rationale']
comptable['classification'] = 'accepted'
comptable['rationale'] = ''
Z_maps = seldict['Z_maps']
Z_clmaps = seldict['Z_clmaps']
F_R2_maps = seldict['F_R2_maps']
F_S0_clmaps = seldict['F_S0_clmaps']
F_R2_clmaps = seldict['F_R2_clmaps']
Br_S0_clmaps = seldict['Br_S0_clmaps']
Br_R2_clmaps = seldict['Br_R2_clmaps']
n_vols, n_comps = mmix.shape
# Set knobs
LOW_PERC = 25
HIGH_PERC = 90
if n_vols < 100:
EXTEND_FACTOR = 3
else:
EXTEND_FACTOR = 2
RESTRICT_FACTOR = 2
# Lists of components
all_comps = np.arange(comptable.shape[0])
# unclf is a full list that is whittled down over criteria
# since the default classification is "accepted", at the end of the tree
# the remaining elements in unclf match up to the accepted components
unclf = np.arange(comptable.shape[0])
# If user has specified
if manacc:
acc = sorted([int(vv) for vv in manacc.split(',')])
rej = sorted(np.setdiff1d(all_comps, acc))
comptable.loc[acc, 'classification'] = 'accepted'
comptable.loc[rej, 'classification'] = 'rejected'
comptable.loc[rej, 'rationale'] += 'I001;'
# Move decision columns to end
comptable = comptable[[c for c in comptable if c not in cols_at_end] +
[c for c in cols_at_end if c in comptable]]
comptable['rationale'] = comptable['rationale'].str.rstrip(';')
return comptable
"""
Tally number of significant voxels for cluster-extent thresholded R2 and S0
model F-statistic maps.
"""
comptable['countsigFR2'] = F_R2_clmaps.sum(axis=0)
comptable['countsigFS0'] = F_S0_clmaps.sum(axis=0)
"""
Generate Dice values for R2 and S0 models
- dice_FR2: Dice value of cluster-extent thresholded maps of R2-model betas
and F-statistics.
- dice_FS0: Dice value of cluster-extent thresholded maps of S0-model betas
and F-statistics.
"""
comptable['dice_FR2'] = np.zeros(all_comps.shape[0])
comptable['dice_FS0'] = np.zeros(all_comps.shape[0])
for i_comp in all_comps:
comptable.loc[i_comp, 'dice_FR2'] = utils.dice(Br_R2_clmaps[:, i_comp],
F_R2_clmaps[:, i_comp])
comptable.loc[i_comp, 'dice_FS0'] = utils.dice(Br_S0_clmaps[:, i_comp],
F_S0_clmaps[:, i_comp])
comptable.loc[np.isnan(comptable['dice_FR2']), 'dice_FR2'] = 0
comptable.loc[np.isnan(comptable['dice_FS0']), 'dice_FS0'] = 0
"""
Generate three metrics of component noise:
- countnoise: Number of "noise" voxels (voxels highly weighted for
component, but not from clusters)
- signal-noise_t: T-statistic for two-sample t-test of F-statistics from
"signal" voxels (voxels in clusters) against "noise" voxels (voxels not
in clusters) for R2 model.
- signal-noise_p: P-value from t-test.
"""
comptable['countnoise'] = 0
comptable['signal-noise_t'] = 0
comptable['signal-noise_p'] = 0
for i_comp in all_comps:
# index voxels significantly loading on component but not from clusters
comp_noise_sel = ((np.abs(Z_maps[:, i_comp]) > 1.95) &
(Z_clmaps[:, i_comp] == 0))
comptable.loc[i_comp, 'countnoise'] = np.array(
comp_noise_sel, dtype=np.int).sum()
# NOTE: Why only compare distributions of *unique* F-statistics?
noise_FR2_Z = np.log10(np.unique(F_R2_maps[comp_noise_sel, i_comp]))
signal_FR2_Z = np.log10(np.unique(
F_R2_maps[Z_clmaps[:, i_comp] == 1, i_comp]))
(comptable.loc[i_comp, 'signal-noise_t'],
comptable.loc[i_comp, 'signal-noise_p']) = stats.ttest_ind(
signal_FR2_Z, noise_FR2_Z, equal_var=False)
comptable.loc[np.isnan(comptable['signal-noise_t']), 'signal-noise_t'] = 0
comptable.loc[np.isnan(comptable['signal-noise_p']), 'signal-noise_p'] = 0
"""
Assemble decision table with five metrics:
- Kappa values ranked from largest to smallest
- R2-model F-score map/beta map Dice scores ranked from largest to smallest
- Signal F > Noise F t-statistics ranked from largest to smallest
- Number of "noise" voxels (voxels highly weighted for component, but not
from clusters) ranked from smallest to largest
- Number of voxels with significant R2-model F-scores within clusters
ranked from largest to smallest
Smaller values (i.e., higher ranks) across metrics indicate more BOLD
dependence and less noise.
"""
d_table_rank = np.vstack([
n_comps - stats.rankdata(comptable['kappa']),
n_comps - stats.rankdata(comptable['dice_FR2']),
n_comps - stats.rankdata(comptable['signal-noise_t']),
stats.rankdata(comptable['countnoise']),
n_comps - stats.rankdata(comptable['countsigFR2'])]).T
comptable['d_table_score'] = d_table_rank.mean(axis=1)
"""
Step 1: Reject anything that's obviously an artifact
a. Estimate a null variance
"""
# Rho is higher than Kappa
temp_rej0a = all_comps[(comptable['rho'] > comptable['kappa'])]
comptable.loc[temp_rej0a, 'classification'] = 'rejected'
comptable.loc[temp_rej0a, 'rationale'] += 'I002;'
# Number of significant voxels for S0 model is higher than number for R2
# model *and* number for R2 model is greater than zero.
temp_rej0b = all_comps[((comptable['countsigFS0'] > comptable['countsigFR2']) &
(comptable['countsigFR2'] > 0))]
comptable.loc[temp_rej0b, 'classification'] = 'rejected'
comptable.loc[temp_rej0b, 'rationale'] += 'I003;'
rej = np.union1d(temp_rej0a, temp_rej0b)
# Dice score for S0 maps is higher than Dice score for R2 maps and variance
# explained is higher than the median across components.
temp_rej1 = all_comps[(comptable['dice_FS0'] > comptable['dice_FR2']) &
(comptable['variance explained'] >
np.median(comptable['variance explained']))]
comptable.loc[temp_rej1, 'classification'] = 'rejected'
comptable.loc[temp_rej1, 'rationale'] += 'I004;'
rej = np.union1d(temp_rej1, rej)
# T-value is less than zero (noise has higher F-statistics than signal in
# map) and variance explained is higher than the median across components.
temp_rej2 = unclf[(comptable.loc[unclf, 'signal-noise_t'] < 0) &
(comptable.loc[unclf, 'variance explained'] >
np.median(comptable['variance explained']))]
comptable.loc[temp_rej2, 'classification'] = 'rejected'
comptable.loc[temp_rej2, 'rationale'] += 'I005;'
rej = np.union1d(temp_rej2, rej)
unclf = np.setdiff1d(unclf, rej)
"""
Step 2: Make a guess for what the good components are, in order to
estimate good component properties
a. Not outlier variance
b. Kappa>kappa_elbow
c. Rho<Rho_elbow
d. High R2* dice compared to S0 dice
e. Gain of F_R2 in clusters vs noise
f. Estimate a low and high variance
"""
# Step 2a
# Upper limit for variance explained is median across components with high
# Kappa values. High Kappa is defined as Kappa above Kappa elbow.
varex_upper_p = np.median(
comptable.loc[comptable['kappa'] > getelbow(comptable['kappa'], return_val=True),
'variance explained'])
ncls = unclf.copy()
# NOTE: We're not sure why this is done, nor why it's specifically done
# three times. Need to look into this deeper, esp. to make sure the 3
# isn't a hard-coded reference to the number of echoes.
# Reduce components to investigate as "good" to ones in which change in
# variance explained is less than the limit defined above.... What?
for i_loop in range(3):
ncls = comptable.loc[ncls].loc[
comptable.loc[
ncls, 'variance explained'].diff() < varex_upper_p].index.values
# Compute elbows from other elbows
f05, _, f01 = utils.getfbounds(n_echos)
kappas_nonsig = comptable.loc[comptable['kappa'] < f01, 'kappa']
# NOTE: Would an elbow from all Kappa values *ever* be lower than one from
# a subset of lower values?
kappa_elbow = np.min((getelbow(kappas_nonsig, return_val=True),
getelbow(comptable['kappa'], return_val=True)))
rho_elbow = np.mean((getelbow(comptable.loc[ncls, 'rho'], return_val=True),
getelbow(comptable['rho'], return_val=True),
f05))
# Provisionally accept components based on Kappa and Rho elbows
acc_prov = ncls[(comptable.loc[ncls, 'kappa'] >= kappa_elbow) &
(comptable.loc[ncls, 'rho'] < rho_elbow)]
if len(acc_prov) == 0:
LGR.warning('No BOLD-like components detected')
ign = sorted(np.setdiff1d(all_comps, rej))
comptable.loc[ign, 'classification'] = 'ignored'
comptable.loc[ign, 'rationale'] += 'I006;'
# Move decision columns to end
comptable = comptable[[c for c in comptable if c not in cols_at_end] +
[c for c in cols_at_end if c in comptable]]
comptable['rationale'] = comptable['rationale'].str.rstrip(';')
return comptable
# Calculate "rate" for kappa: kappa range divided by variance explained
# range, for potentially accepted components
# NOTE: What is the logic behind this?
kappa_rate = ((np.max(comptable.loc[acc_prov, 'kappa']) -
np.min(comptable.loc[acc_prov, 'kappa'])) /
(np.max(comptable.loc[acc_prov, 'variance explained']) -
np.min(comptable.loc[acc_prov, 'variance explained'])))
comptable['kappa ratio'] = kappa_rate * comptable['variance explained'] / comptable['kappa']
varex_lower = stats.scoreatpercentile(
comptable.loc[acc_prov, 'variance explained'], LOW_PERC)
varex_upper = stats.scoreatpercentile(
comptable.loc[acc_prov, 'variance explained'], HIGH_PERC)
"""
Step 3: Get rid of midk components; i.e., those with higher than
max decision score and high variance
"""
max_good_d_score = EXTEND_FACTOR * len(acc_prov)
midk = unclf[(comptable.loc[unclf, 'd_table_score'] > max_good_d_score) &
(comptable.loc[unclf, 'variance explained'] > EXTEND_FACTOR * varex_upper)]
comptable.loc[midk, 'classification'] = 'rejected'
comptable.loc[midk, 'rationale'] += 'I007;'
unclf = np.setdiff1d(unclf, midk)
acc_prov = np.setdiff1d(acc_prov, midk)
"""
Step 4: Find components to ignore
"""
# collect high variance unclassified components
# and mix of high/low provisionally accepted
high_varex = np.union1d(
acc_prov,
unclf[comptable.loc[unclf, 'variance explained'] > varex_lower])
# ignore low variance components
ign = np.setdiff1d(unclf, high_varex)
# but only if they have bad decision scores
ign = np.setdiff1d(
ign, ign[comptable.loc[ign, 'd_table_score'] < max_good_d_score])
# and low kappa
ign = np.setdiff1d(ign, ign[comptable.loc[ign, 'kappa'] > kappa_elbow])
comptable.loc[ign, 'classification'] = 'ignored'
comptable.loc[ign, 'rationale'] += 'I008;'
unclf = np.setdiff1d(unclf, ign)
"""
Step 5: Scrub the set if there are components that haven't been rejected or
ignored, but are still not listed in the provisionally accepted group.
"""
if len(unclf) > len(acc_prov):
comptable['d_table_score_scrub'] = np.nan
# Recompute the midk steps on the limited set to clean up the tail
d_table_rank = np.vstack([
len(unclf) - stats.rankdata(comptable.loc[unclf, 'kappa']),
len(unclf) - stats.rankdata(comptable.loc[unclf, 'dice_FR2']),
len(unclf) - stats.rankdata(comptable.loc[unclf, 'signal-noise_t']),
stats.rankdata(comptable.loc[unclf, 'countnoise']),
len(unclf) - stats.rankdata(comptable.loc[unclf, 'countsigFR2'])]).T
comptable.loc[unclf, 'd_table_score_scrub'] = d_table_rank.mean(1)
num_acc_guess = int(np.mean([
np.sum((comptable.loc[unclf, 'kappa'] > kappa_elbow) &
(comptable.loc[unclf, 'rho'] < rho_elbow)),
np.sum(comptable.loc[unclf, 'kappa'] > kappa_elbow)]))
# Rejection candidate based on artifact type A: candartA
conservative_guess = num_acc_guess / RESTRICT_FACTOR
candartA = np.intersect1d(
unclf[comptable.loc[unclf, 'd_table_score_scrub'] > conservative_guess],
unclf[comptable.loc[unclf, 'kappa ratio'] > EXTEND_FACTOR * 2])
candartA = (candartA[comptable.loc[candartA, 'variance explained'] >
varex_upper * EXTEND_FACTOR])
comptable.loc[candartA, 'classification'] = 'rejected'
comptable.loc[candartA, 'rationale'] += 'I009;'
midk = np.union1d(midk, candartA)
unclf = np.setdiff1d(unclf, midk)
# Rejection candidate based on artifact type B: candartB
conservative_guess2 = num_acc_guess * HIGH_PERC / 100.
candartB = unclf[comptable.loc[unclf, 'd_table_score_scrub'] > conservative_guess2]
candartB = (candartB[comptable.loc[candartB, 'variance explained'] >
varex_lower * EXTEND_FACTOR])
comptable.loc[candartB, 'classification'] = 'rejected'
comptable.loc[candartB, 'rationale'] += 'I010;'
midk = np.union1d(midk, candartB)
unclf = np.setdiff1d(unclf, midk)
# Find components to ignore
# Ignore high variance explained, poor decision tree scored components
new_varex_lower = stats.scoreatpercentile(
comptable.loc[unclf[:num_acc_guess], 'variance explained'],
LOW_PERC)
candart = unclf[comptable.loc[unclf, 'd_table_score_scrub'] > num_acc_guess]
ign_add0 = candart[comptable.loc[candart, 'variance explained'] > new_varex_lower]
ign_add0 = np.setdiff1d(ign_add0, midk)
comptable.loc[ign_add0, 'classification'] = 'ignored'
comptable.loc[ign_add0, 'rationale'] += 'I011;'
ign = np.union1d(ign, ign_add0)
unclf = np.setdiff1d(unclf, ign)
# Ignore low Kappa, high variance explained components
ign_add1 = np.intersect1d(
unclf[comptable.loc[unclf, 'kappa'] <= kappa_elbow],
unclf[comptable.loc[unclf, 'variance explained'] > new_varex_lower])
ign_add1 = np.setdiff1d(ign_add1, midk)
comptable.loc[ign_add1, 'classification'] = 'ignored'
comptable.loc[ign_add1, 'rationale'] += 'I012;'
# at this point, unclf is equivalent to accepted
# Move decision columns to end
comptable = comptable[[c for c in comptable if c not in cols_at_end] +
[c for c in cols_at_end if c in comptable]]
comptable['rationale'] = comptable['rationale'].str.rstrip(';')
return comptable
| def selcomps(seldict, comptable, mmix, manacc, n_echos):
"""
Classify components in seldict as "accepted," "rejected," or "ignored."
The selection process uses previously calculated parameters listed in `seldict`
for each ICA component such as Kappa (a T2* weighting metric), Rho (an S0 weighting metric),
and variance explained. See `Notes` for additional calculated metrics used to
classify each component into one of the four listed groups.
Parameters
----------
seldict : :obj:`dict`
A dictionary with component-specific features used for classification.
As output from `fitmodels_direct`
comptable : (C x 5) :obj:`pandas.DataFrame`
Component metric table
mmix : (T x C) array_like
Mixing matrix for converting input data to component space, where `C`
is components and `T` is the number of volumes in the original data
manacc : :obj:`list`
Comma-separated list of indices of manually accepted components
n_echos : :obj:`int`
Number of echos in original data
Returns
-------
comptable : :obj:`pandas.DataFrame`
Updated component table with additional metrics and with
classification (accepted, rejected, or ignored)
Notes
-----
The selection algorithm used in this function was originated in ME-ICA
by Prantik Kundu, and his original implementation is available at:
https://github.com/ME-ICA/me-ica/blob/b2781dd087ab9de99a2ec3925f04f02ce84f0adc/meica.libs/select_model.py
This component selection process uses multiple, previously calculated metrics that include:
kappa, rho, variance explained, component spatial weighting maps, noise and spatial
frequency metrics, and measures of spatial overlap across metrics.
Prantik began to update these selection criteria to use SVMs to
distinguish components, a hypercommented version of this attempt is available at:
https://gist.github.com/emdupre/ca92d52d345d08ee85e104093b81482e
"""
cols_at_end = ['classification', 'rationale']
comptable['classification'] = 'accepted'
comptable['rationale'] = ''
Z_maps = seldict['Z_maps']
Z_clmaps = seldict['Z_clmaps']
F_R2_maps = seldict['F_R2_maps']
F_S0_clmaps = seldict['F_S0_clmaps']
F_R2_clmaps = seldict['F_R2_clmaps']
Br_S0_clmaps = seldict['Br_S0_clmaps']
Br_R2_clmaps = seldict['Br_R2_clmaps']
n_vols, n_comps = mmix.shape
# Set knobs
LOW_PERC = 25
HIGH_PERC = 90
if n_vols < 100:
EXTEND_FACTOR = 3
else:
EXTEND_FACTOR = 2
RESTRICT_FACTOR = 2
# Lists of components
all_comps = np.arange(comptable.shape[0])
# unclf is a full list that is whittled down over criteria
# since the default classification is "accepted", at the end of the tree
# the remaining elements in unclf are classified as accepted
unclf = np.arange(comptable.shape[0])
# If user has specified
if manacc:
acc = sorted([int(vv) for vv in manacc.split(',')])
rej = sorted(np.setdiff1d(all_comps, acc))
comptable.loc[acc, 'classification'] = 'accepted'
comptable.loc[rej, 'classification'] = 'rejected'
comptable.loc[rej, 'rationale'] += 'I001;'
# Move decision columns to end
comptable = comptable[[c for c in comptable if c not in cols_at_end] +
[c for c in cols_at_end if c in comptable]]
comptable['rationale'] = comptable['rationale'].str.rstrip(';')
return comptable
"""
Tally number of significant voxels for cluster-extent thresholded R2 and S0
model F-statistic maps.
"""
comptable['countsigFR2'] = F_R2_clmaps.sum(axis=0)
comptable['countsigFS0'] = F_S0_clmaps.sum(axis=0)
"""
Generate Dice values for R2 and S0 models
- dice_FR2: Dice value of cluster-extent thresholded maps of R2-model betas
and F-statistics.
- dice_FS0: Dice value of cluster-extent thresholded maps of S0-model betas
and F-statistics.
"""
comptable['dice_FR2'] = np.zeros(all_comps.shape[0])
comptable['dice_FS0'] = np.zeros(all_comps.shape[0])
for i_comp in all_comps:
comptable.loc[i_comp, 'dice_FR2'] = utils.dice(Br_R2_clmaps[:, i_comp],
F_R2_clmaps[:, i_comp])
comptable.loc[i_comp, 'dice_FS0'] = utils.dice(Br_S0_clmaps[:, i_comp],
F_S0_clmaps[:, i_comp])
comptable.loc[np.isnan(comptable['dice_FR2']), 'dice_FR2'] = 0
comptable.loc[np.isnan(comptable['dice_FS0']), 'dice_FS0'] = 0
"""
Generate three metrics of component noise:
- countnoise: Number of "noise" voxels (voxels highly weighted for
component, but not from clusters)
- signal-noise_t: T-statistic for two-sample t-test of F-statistics from
"signal" voxels (voxels in clusters) against "noise" voxels (voxels not
in clusters) for R2 model.
- signal-noise_p: P-value from t-test.
"""
comptable['countnoise'] = 0
comptable['signal-noise_t'] = 0
comptable['signal-noise_p'] = 0
for i_comp in all_comps:
# index voxels significantly loading on component but not from clusters
comp_noise_sel = ((np.abs(Z_maps[:, i_comp]) > 1.95) &
(Z_clmaps[:, i_comp] == 0))
comptable.loc[i_comp, 'countnoise'] = np.array(
comp_noise_sel, dtype=np.int).sum()
# NOTE: Why only compare distributions of *unique* F-statistics?
noise_FR2_Z = np.log10(np.unique(F_R2_maps[comp_noise_sel, i_comp]))
signal_FR2_Z = np.log10(np.unique(
F_R2_maps[Z_clmaps[:, i_comp] == 1, i_comp]))
(comptable.loc[i_comp, 'signal-noise_t'],
comptable.loc[i_comp, 'signal-noise_p']) = stats.ttest_ind(
signal_FR2_Z, noise_FR2_Z, equal_var=False)
comptable.loc[np.isnan(comptable['signal-noise_t']), 'signal-noise_t'] = 0
comptable.loc[np.isnan(comptable['signal-noise_p']), 'signal-noise_p'] = 0
"""
Assemble decision table with five metrics:
- Kappa values ranked from largest to smallest
- R2-model F-score map/beta map Dice scores ranked from largest to smallest
- Signal F > Noise F t-statistics ranked from largest to smallest
- Number of "noise" voxels (voxels highly weighted for component, but not
from clusters) ranked from smallest to largest
- Number of voxels with significant R2-model F-scores within clusters
ranked from largest to smallest
Smaller values (i.e., higher ranks) across metrics indicate more BOLD
dependence and less noise.
"""
d_table_rank = np.vstack([
n_comps - stats.rankdata(comptable['kappa']),
n_comps - stats.rankdata(comptable['dice_FR2']),
n_comps - stats.rankdata(comptable['signal-noise_t']),
stats.rankdata(comptable['countnoise']),
n_comps - stats.rankdata(comptable['countsigFR2'])]).T
comptable['d_table_score'] = d_table_rank.mean(axis=1)
"""
Step 1: Reject anything that's obviously an artifact
a. Estimate a null variance
"""
# Rho is higher than Kappa
temp_rej0a = all_comps[(comptable['rho'] > comptable['kappa'])]
comptable.loc[temp_rej0a, 'classification'] = 'rejected'
comptable.loc[temp_rej0a, 'rationale'] += 'I002;'
# Number of significant voxels for S0 model is higher than number for R2
# model *and* number for R2 model is greater than zero.
temp_rej0b = all_comps[((comptable['countsigFS0'] > comptable['countsigFR2']) &
(comptable['countsigFR2'] > 0))]
comptable.loc[temp_rej0b, 'classification'] = 'rejected'
comptable.loc[temp_rej0b, 'rationale'] += 'I003;'
rej = np.union1d(temp_rej0a, temp_rej0b)
# Dice score for S0 maps is higher than Dice score for R2 maps and variance
# explained is higher than the median across components.
temp_rej1 = all_comps[(comptable['dice_FS0'] > comptable['dice_FR2']) &
(comptable['variance explained'] >
np.median(comptable['variance explained']))]
comptable.loc[temp_rej1, 'classification'] = 'rejected'
comptable.loc[temp_rej1, 'rationale'] += 'I004;'
rej = np.union1d(temp_rej1, rej)
# T-value is less than zero (noise has higher F-statistics than signal in
# map) and variance explained is higher than the median across components.
temp_rej2 = unclf[(comptable.loc[unclf, 'signal-noise_t'] < 0) &
(comptable.loc[unclf, 'variance explained'] >
np.median(comptable['variance explained']))]
comptable.loc[temp_rej2, 'classification'] = 'rejected'
comptable.loc[temp_rej2, 'rationale'] += 'I005;'
rej = np.union1d(temp_rej2, rej)
unclf = np.setdiff1d(unclf, rej)
"""
Step 2: Make a guess for what the good components are, in order to
estimate good component properties
a. Not outlier variance
b. Kappa>kappa_elbow
c. Rho<Rho_elbow
d. High R2* dice compared to S0 dice
e. Gain of F_R2 in clusters vs noise
f. Estimate a low and high variance
"""
# Step 2a
# Upper limit for variance explained is median across components with high
# Kappa values. High Kappa is defined as Kappa above Kappa elbow.
varex_upper_p = np.median(
comptable.loc[comptable['kappa'] > getelbow(comptable['kappa'], return_val=True),
'variance explained'])
ncls = unclf.copy()
# NOTE: We're not sure why this is done, nor why it's specifically done
# three times. Need to look into this deeper, esp. to make sure the 3
# isn't a hard-coded reference to the number of echoes.
# Reduce components to investigate as "good" to ones in which change in
# variance explained is less than the limit defined above.... What?
for i_loop in range(3):
ncls = comptable.loc[ncls].loc[
comptable.loc[
ncls, 'variance explained'].diff() < varex_upper_p].index.values
# Compute elbows from other elbows
f05, _, f01 = utils.getfbounds(n_echos)
kappas_nonsig = comptable.loc[comptable['kappa'] < f01, 'kappa']
# NOTE: Would an elbow from all Kappa values *ever* be lower than one from
# a subset of lower values?
kappa_elbow = np.min((getelbow(kappas_nonsig, return_val=True),
getelbow(comptable['kappa'], return_val=True)))
rho_elbow = np.mean((getelbow(comptable.loc[ncls, 'rho'], return_val=True),
getelbow(comptable['rho'], return_val=True),
f05))
# Provisionally accept components based on Kappa and Rho elbows
acc_prov = ncls[(comptable.loc[ncls, 'kappa'] >= kappa_elbow) &
(comptable.loc[ncls, 'rho'] < rho_elbow)]
if len(acc_prov) == 0:
LGR.warning('No BOLD-like components detected')
ign = sorted(np.setdiff1d(all_comps, rej))
comptable.loc[ign, 'classification'] = 'ignored'
comptable.loc[ign, 'rationale'] += 'I006;'
# Move decision columns to end
comptable = comptable[[c for c in comptable if c not in cols_at_end] +
[c for c in cols_at_end if c in comptable]]
comptable['rationale'] = comptable['rationale'].str.rstrip(';')
return comptable
# Calculate "rate" for kappa: kappa range divided by variance explained
# range, for potentially accepted components
# NOTE: What is the logic behind this?
kappa_rate = ((np.max(comptable.loc[acc_prov, 'kappa']) -
np.min(comptable.loc[acc_prov, 'kappa'])) /
(np.max(comptable.loc[acc_prov, 'variance explained']) -
np.min(comptable.loc[acc_prov, 'variance explained'])))
comptable['kappa ratio'] = kappa_rate * comptable['variance explained'] / comptable['kappa']
varex_lower = stats.scoreatpercentile(
comptable.loc[acc_prov, 'variance explained'], LOW_PERC)
varex_upper = stats.scoreatpercentile(
comptable.loc[acc_prov, 'variance explained'], HIGH_PERC)
"""
Step 3: Get rid of midk components; i.e., those with higher than
max decision score and high variance
"""
max_good_d_score = EXTEND_FACTOR * len(acc_prov)
midk = unclf[(comptable.loc[unclf, 'd_table_score'] > max_good_d_score) &
(comptable.loc[unclf, 'variance explained'] > EXTEND_FACTOR * varex_upper)]
comptable.loc[midk, 'classification'] = 'rejected'
comptable.loc[midk, 'rationale'] += 'I007;'
unclf = np.setdiff1d(unclf, midk)
acc_prov = np.setdiff1d(acc_prov, midk)
"""
Step 4: Find components to ignore
"""
# collect high variance unclassified components
# and mix of high/low provisionally accepted
high_varex = np.union1d(
acc_prov,
unclf[comptable.loc[unclf, 'variance explained'] > varex_lower])
# ignore low variance components
ign = np.setdiff1d(unclf, high_varex)
# but only if they have bad decision scores
ign = np.setdiff1d(
ign, ign[comptable.loc[ign, 'd_table_score'] < max_good_d_score])
# and low kappa
ign = np.setdiff1d(ign, ign[comptable.loc[ign, 'kappa'] > kappa_elbow])
comptable.loc[ign, 'classification'] = 'ignored'
comptable.loc[ign, 'rationale'] += 'I008;'
unclf = np.setdiff1d(unclf, ign)
"""
Step 5: Scrub the set if there are components that haven't been rejected or
ignored, but are still not listed in the provisionally accepted group.
"""
if len(unclf) > len(acc_prov):
comptable['d_table_score_scrub'] = np.nan
# Recompute the midk steps on the limited set to clean up the tail
d_table_rank = np.vstack([
len(unclf) - stats.rankdata(comptable.loc[unclf, 'kappa']),
len(unclf) - stats.rankdata(comptable.loc[unclf, 'dice_FR2']),
len(unclf) - stats.rankdata(comptable.loc[unclf, 'signal-noise_t']),
stats.rankdata(comptable.loc[unclf, 'countnoise']),
len(unclf) - stats.rankdata(comptable.loc[unclf, 'countsigFR2'])]).T
comptable.loc[unclf, 'd_table_score_scrub'] = d_table_rank.mean(1)
num_acc_guess = int(np.mean([
np.sum((comptable.loc[unclf, 'kappa'] > kappa_elbow) &
(comptable.loc[unclf, 'rho'] < rho_elbow)),
np.sum(comptable.loc[unclf, 'kappa'] > kappa_elbow)]))
# Rejection candidate based on artifact type A: candartA
conservative_guess = num_acc_guess / RESTRICT_FACTOR
candartA = np.intersect1d(
unclf[comptable.loc[unclf, 'd_table_score_scrub'] > conservative_guess],
unclf[comptable.loc[unclf, 'kappa ratio'] > EXTEND_FACTOR * 2])
candartA = (candartA[comptable.loc[candartA, 'variance explained'] >
varex_upper * EXTEND_FACTOR])
comptable.loc[candartA, 'classification'] = 'rejected'
comptable.loc[candartA, 'rationale'] += 'I009;'
midk = np.union1d(midk, candartA)
unclf = np.setdiff1d(unclf, midk)
# Rejection candidate based on artifact type B: candartB
conservative_guess2 = num_acc_guess * HIGH_PERC / 100.
candartB = unclf[comptable.loc[unclf, 'd_table_score_scrub'] > conservative_guess2]
candartB = (candartB[comptable.loc[candartB, 'variance explained'] >
varex_lower * EXTEND_FACTOR])
comptable.loc[candartB, 'classification'] = 'rejected'
comptable.loc[candartB, 'rationale'] += 'I010;'
midk = np.union1d(midk, candartB)
unclf = np.setdiff1d(unclf, midk)
# Find components to ignore
# Ignore high variance explained, poor decision tree scored components
new_varex_lower = stats.scoreatpercentile(
comptable.loc[unclf[:num_acc_guess], 'variance explained'],
LOW_PERC)
candart = unclf[comptable.loc[unclf, 'd_table_score_scrub'] > num_acc_guess]
ign_add0 = candart[comptable.loc[candart, 'variance explained'] > new_varex_lower]
ign_add0 = np.setdiff1d(ign_add0, midk)
comptable.loc[ign_add0, 'classification'] = 'ignored'
comptable.loc[ign_add0, 'rationale'] += 'I011;'
ign = np.union1d(ign, ign_add0)
unclf = np.setdiff1d(unclf, ign)
# Ignore low Kappa, high variance explained components
ign_add1 = np.intersect1d(
unclf[comptable.loc[unclf, 'kappa'] <= kappa_elbow],
unclf[comptable.loc[unclf, 'variance explained'] > new_varex_lower])
ign_add1 = np.setdiff1d(ign_add1, midk)
comptable.loc[ign_add1, 'classification'] = 'ignored'
comptable.loc[ign_add1, 'rationale'] += 'I012;'
# at this point, unclf is equivalent to accepted
# Move decision columns to end
comptable = comptable[[c for c in comptable if c not in cols_at_end] +
[c for c in cols_at_end if c in comptable]]
comptable['rationale'] = comptable['rationale'].str.rstrip(';')
return comptable
|
7,337 | def ransac(data, model_class, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None,
max_trials=100, stop_sample_num=np.inf, stop_residuals_sum=0,
stop_probability=1, random_state=None, initial_inliers=None):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. Each iteration
performs the following tasks:
1. Select `min_samples` random samples from the original data and check
whether the set of data is valid (see `is_data_valid`).
2. Estimate a model to the random subset
(`model_cls.estimate(*data[random_subset]`) and check whether the
estimated model is valid (see `is_model_valid`).
3. Classify all data as inliers or outliers by calculating the residuals
to the estimated model (`model_cls.residuals(*data)`) - all data samples
with residuals smaller than the `residual_threshold` are considered as
inliers.
4. Save estimated model as best model if number of inlier samples is
maximal. In case the current estimated model has the same number of
inliers, it is only considered as the best model if it has less sum of
residuals.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all
inlier samples of the previously determined best model.
Parameters
----------
data : [list, tuple of] (N, ...) array
Data set to which the model is fitted, where N is the number of data
points and the remaining dimension are depending on model requirements.
If the model class requires multiple input data arrays (e.g. source and
destination coordinates of ``skimage.transform.AffineTransform``),
they can be optionally passed as tuple or list. Note, that in this case
the functions ``estimate(*data)``, ``residuals(*data)``,
``is_model_valid(model, *random_data)`` and
``is_data_valid(*random_data)`` must all take each data array as
separate arguments.
model_class : object
Object with the following object methods:
* ``success = estimate(*data)``
* ``residuals(*data)``
where `success` indicates whether the model estimation succeeded
(`True` or `None` for success, `False` for failure).
min_samples : int in range (0, N)
The minimum number of data points to fit a model to.
residual_threshold : float larger than 0
Maximum distance for a data point to be classified as an inlier.
is_data_valid : function, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(*random_data)`.
is_model_valid : function, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, *random_data)`, .
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_sample_num : int, optional
Stop iteration if at least this number of inliers are found.
stop_residuals_sum : float, optional
Stop iteration if sum of residuals is less than or equal to this
threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the
training data is sampled with ``probability >= stop_probability``,
depending on the current best model's inlier ratio and the number
of trials. This requires to generate at least N samples (trials):
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to a high value
such as 0.99, e is the current fraction of inliers w.r.t. the
total number of samples, and m is the min_samples value.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is None the `numpy.random.Generator` singleton is
used.
If `random_state` is an int, a new ``Generator`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` instance then that
instance is used.
initial_inliers : array-like of bool, shape (N,), optional
Initial samples selection for model estimation
Returns
-------
model : object
Best model with largest consensus set.
inliers : (N, ) array
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] "RANSAC", Wikipedia, https://en.wikipedia.org/wiki/RANSAC
Examples
--------
Generate ellipse data without tilt and add noise:
>>> t = np.linspace(0, 2 * np.pi, 50)
>>> xc, yc = 20, 30
>>> a, b = 5, 10
>>> x = xc + a * np.cos(t)
>>> y = yc + b * np.sin(t)
>>> data = np.column_stack([x, y])
>>> rng = np.random.default_rng(203560) # do not copy this value
>>> data += rng.normal(size=data.shape)
Add some faulty data:
>>> data[0] = (100, 100)
>>> data[1] = (110, 120)
>>> data[2] = (120, 130)
>>> data[3] = (140, 130)
Estimate ellipse model using all available data:
>>> model = EllipseModel()
>>> model.estimate(data)
True
>>> np.round(model.params) # doctest: +SKIP
array([ 72., 75., 77., 14., 1.])
Estimate ellipse model using RANSAC:
>>> ransac_model, inliers = ransac(data, EllipseModel, 20, 3, max_trials=50)
>>> abs(np.round(ransac_model.params))
array([20., 30., 10., 6., 2.])
>>> inliers # doctest: +SKIP
array([False, False, False, False, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True], dtype=bool)
>>> sum(inliers) > 40
True
RANSAC can be used to robustly estimate a geometric
transformation. In this section, we also show how to use a
proportion of the total samples, rather than an absolute number.
>>> from skimage.transform import SimilarityTransform
>>> rng = np.random.default_rng()
>>> src = 100 * rng.random((50, 2))
>>> model0 = SimilarityTransform(scale=0.5, rotation=1,
... translation=(10, 20))
>>> dst = model0(src)
>>> dst[0] = (10000, 10000)
>>> dst[1] = (-100, 100)
>>> dst[2] = (50, 50)
>>> ratio = 0.5 # use half of the samples
>>> min_samples = int(ratio * len(src))
>>> model, inliers = ransac((src, dst), SimilarityTransform, min_samples,
... 10,
... initial_inliers=np.ones(len(src), dtype=bool))
>>> inliers # doctest: +SKIP
array([False, False, False, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True])
"""
best_inlier_num = 0
best_inlier_residuals_sum = np.inf
best_inliers = []
validate_model = is_model_valid is not None
validate_data = is_data_valid is not None
random_state = np.random.default_rng(random_state)
# in case data is not pair of input and output, male it like it
if not isinstance(data, (tuple, list)):
data = (data, )
num_samples = len(data[0])
if not (0 < min_samples < num_samples):
raise ValueError(f"`min_samples` must be in range (0, {num_samples})")
if residual_threshold < 0:
raise ValueError("`residual_threshold` must be greater than zero")
if max_trials < 0:
raise ValueError("`max_trials` must be greater than zero")
if not (0 <= stop_probability <= 1):
raise ValueError("`stop_probability` must be in range [0, 1]")
if initial_inliers is not None and len(initial_inliers) != num_samples:
raise ValueError(
f"RANSAC received a vector of initial inliers (length "
f"{len(initial_inliers)}) that didn't match the number of "
f"samples ({num_samples}). The vector of initial inliers should "
f"have the same length as the number of samples and contain only "
f"True (this sample is an initial inlier) and False (this one "
f"isn't) values.")
# for the first run use initial guess of inliers
spl_idxs = (initial_inliers if initial_inliers is not None
else random_state.choice(num_samples, min_samples,
replace=False))
# estimate model for current random sample set
model = model_class()
for num_trials in range(max_trials):
# do sample selection according data pairs
samples = [d[spl_idxs] for d in data]
# for next iteration choose random sample set and be sure that
# no samples repeat
spl_idxs = random_state.choice(num_samples, min_samples, replace=False)
# optional check if random sample set is valid
if validate_data and not is_data_valid(*samples):
continue
success = model.estimate(*samples)
# backwards compatibility
if success is not None and not success:
continue
# optional check if estimated model is valid
if validate_model and not is_model_valid(model, *samples):
continue
residuals = np.abs(model.residuals(*data))
# consensus set / inliers
inliers = residuals < residual_threshold
residuals_sum = residuals.dot(residuals)
# choose as new best model if number of inliers is maximal
inliers_count = np.count_nonzero(inliers)
if (
# more inliers
inliers_count > best_inlier_num
# same number of inliers but less "error" in terms of residuals
or (inliers_count == best_inlier_num
and residuals_sum < best_inlier_residuals_sum)):
best_inlier_num = inliers_count
best_inlier_residuals_sum = residuals_sum
best_inliers = inliers
dynamic_max_trials = _dynamic_max_trials(best_inlier_num,
num_samples,
min_samples,
stop_probability)
if (best_inlier_num >= stop_sample_num
or best_inlier_residuals_sum <= stop_residuals_sum
or num_trials >= dynamic_max_trials):
break
# estimate final model using all inliers
if any(best_inliers):
# select inliers for each data array
data_inliers = [d[best_inliers] for d in data]
model.estimate(*data_inliers)
if validate_model and not is_model_valid(model, *data_inliers):
warn("Estimated model is not valid. Try increase max_trials.")
else:
model = None
best_inliers = None
warn("No inliers found. Model not fitted")
return model, best_inliers
| def ransac(data, model_class, min_samples, residual_threshold,
is_data_valid=None, is_model_valid=None,
max_trials=100, stop_sample_num=np.inf, stop_residuals_sum=0,
stop_probability=1, random_state=None, initial_inliers=None):
"""Fit a model to data with the RANSAC (random sample consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. Each iteration
performs the following tasks:
1. Select `min_samples` random samples from the original data and check
whether the set of data is valid (see `is_data_valid`).
2. Estimate a model to the random subset
(`model_cls.estimate(*data[random_subset]`) and check whether the
estimated model is valid (see `is_model_valid`).
3. Classify all data as inliers or outliers by calculating the residuals
to the estimated model (`model_cls.residuals(*data)`) - all data samples
with residuals smaller than the `residual_threshold` are considered as
inliers.
4. Save estimated model as best model if number of inlier samples is
maximal. In case the current estimated model has the same number of
inliers, it is only considered as the best model if it has less sum of
residuals.
These steps are performed either a maximum number of times or until one of
the special stop criteria are met. The final model is estimated using all
inlier samples of the previously determined best model.
Parameters
----------
data : [list, tuple of] (N, ...) array
Data set to which the model is fitted, where N is the number of data
points and the remaining dimension are depending on model requirements.
If the model class requires multiple input data arrays (e.g. source and
destination coordinates of ``skimage.transform.AffineTransform``),
they can be optionally passed as tuple or list. Note, that in this case
the functions ``estimate(*data)``, ``residuals(*data)``,
``is_model_valid(model, *random_data)`` and
``is_data_valid(*random_data)`` must all take each data array as
separate arguments.
model_class : object
Object with the following object methods:
* ``success = estimate(*data)``
* ``residuals(*data)``
where `success` indicates whether the model estimation succeeded
(`True` or `None` for success, `False` for failure).
min_samples : int in range (0, N)
The minimum number of data points to fit a model to.
residual_threshold : float larger than 0
Maximum distance for a data point to be classified as an inlier.
is_data_valid : function, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(*random_data)`.
is_model_valid : function, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, *random_data)`, .
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_sample_num : int, optional
Stop iteration if at least this number of inliers are found.
stop_residuals_sum : float, optional
Stop iteration if sum of residuals is less than or equal to this
threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the
training data is sampled with ``probability >= stop_probability``,
depending on the current best model's inlier ratio and the number
of trials. This requires to generate at least N samples (trials):
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to a high value
such as 0.99, e is the current fraction of inliers w.r.t. the
total number of samples, and m is the min_samples value.
random_state : {None, int, `numpy.random.Generator`}, optional
If `random_state` is None the `numpy.random.Generator` singleton is
used.
If `random_state` is an int, a new ``Generator`` instance is used,
seeded with `random_state`.
If `random_state` is already a ``Generator`` instance then that
instance is used.
initial_inliers : array-like of bool, shape (N,), optional
Initial samples selection for model estimation
Returns
-------
model : object
Best model with largest consensus set.
inliers : (N, ) array
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] "RANSAC", Wikipedia, https://en.wikipedia.org/wiki/RANSAC
Examples
--------
Generate ellipse data without tilt and add noise:
>>> t = np.linspace(0, 2 * np.pi, 50)
>>> xc, yc = 20, 30
>>> a, b = 5, 10
>>> x = xc + a * np.cos(t)
>>> y = yc + b * np.sin(t)
>>> data = np.column_stack([x, y])
>>> rng = np.random.default_rng(203560) # do not copy this value
>>> data += rng.normal(size=data.shape)
Add some faulty data:
>>> data[0] = (100, 100)
>>> data[1] = (110, 120)
>>> data[2] = (120, 130)
>>> data[3] = (140, 130)
Estimate ellipse model using all available data:
>>> model = EllipseModel()
>>> model.estimate(data)
True
>>> np.round(model.params) # doctest: +SKIP
array([ 72., 75., 77., 14., 1.])
Estimate ellipse model using RANSAC:
>>> ransac_model, inliers = ransac(data, EllipseModel, 20, 3, max_trials=50)
>>> abs(np.round(ransac_model.params))
array([20., 30., 10., 6., 2.])
>>> inliers # doctest: +SKIP
array([False, False, False, False, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True], dtype=bool)
>>> sum(inliers) > 40
True
RANSAC can be used to robustly estimate a geometric
transformation. In this section, we also show how to use a
proportion of the total samples, rather than an absolute number.
>>> from skimage.transform import SimilarityTransform
>>> rng = np.random.default_rng()
>>> src = 100 * rng.random((50, 2))
>>> model0 = SimilarityTransform(scale=0.5, rotation=1,
... translation=(10, 20))
>>> dst = model0(src)
>>> dst[0] = (10000, 10000)
>>> dst[1] = (-100, 100)
>>> dst[2] = (50, 50)
>>> ratio = 0.5 # use half of the samples
>>> min_samples = int(ratio * len(src))
>>> model, inliers = ransac((src, dst), SimilarityTransform, min_samples,
... 10,
... initial_inliers=np.ones(len(src), dtype=bool))
>>> inliers # doctest: +SKIP
array([False, False, False, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True, True, True, True, True,
True, True, True, True, True])
"""
best_inlier_num = 0
best_inlier_residuals_sum = np.inf
best_inliers = []
validate_model = is_model_valid is not None
validate_data = is_data_valid is not None
random_state = np.random.default_rng(random_state)
# in case data is not pair of input and output, male it like it
if not isinstance(data, (tuple, list)):
data = (data, )
num_samples = len(data[0])
if not (0 < min_samples < num_samples):
raise ValueError(f"`min_samples` must be in range (0, {num_samples})")
if residual_threshold < 0:
raise ValueError("`residual_threshold` must be greater than zero")
if max_trials < 0:
raise ValueError("`max_trials` must be greater than zero")
if not (0 <= stop_probability <= 1):
raise ValueError("`stop_probability` must be in range [0, 1]")
if initial_inliers is not None and len(initial_inliers) != num_samples:
raise ValueError(
f"RANSAC received a vector of initial inliers (length "
f"{len(initial_inliers)}) that didn't match the number of "
f"samples ({num_samples}). The vector of initial inliers should "
f"have the same length as the number of samples and contain only "
f"True (this sample is an initial inlier) and False (this one "
f"isn't) values.")
# for the first run use initial guess of inliers
spl_idxs = (initial_inliers if initial_inliers is not None
else random_state.choice(num_samples, min_samples,
replace=False))
# estimate model for current random sample set
model = model_class()
for num_trials in range(max_trials):
# do sample selection according data pairs
samples = [d[spl_idxs] for d in data]
# for next iteration choose random sample set and be sure that
# no samples repeat
spl_idxs = random_state.choice(num_samples, min_samples, replace=False)
# optional check if random sample set is valid
if validate_data and not is_data_valid(*samples):
continue
success = model.estimate(*samples)
# backwards compatibility
if success is not None and not success:
continue
# optional check if estimated model is valid
if validate_model and not is_model_valid(model, *samples):
continue
residuals = np.abs(model.residuals(*data))
# consensus set / inliers
inliers = residuals < residual_threshold
residuals_sum = residuals.dot(residuals)
# choose as new best model if number of inliers is maximal
inliers_count = np.count_nonzero(inliers)
if (
# more inliers
inliers_count > best_inlier_num
# same number of inliers but less "error" in terms of residuals
or (inliers_count == best_inlier_num
and residuals_sum < best_inlier_residuals_sum)):
best_inlier_num = inliers_count
best_inlier_residuals_sum = residuals_sum
best_inliers = inliers
dynamic_max_trials = _dynamic_max_trials(best_inlier_num,
num_samples,
min_samples,
stop_probability)
if (best_inlier_num >= stop_sample_num
or best_inlier_residuals_sum <= stop_residuals_sum
or num_trials >= dynamic_max_trials):
break
# estimate final model using all inliers
if any(best_inliers):
# select inliers for each data array
data_inliers = [d[best_inliers] for d in data]
model.estimate(*data_inliers)
if validate_model and not is_model_valid(model, *data_inliers):
warn("Estimated model is not valid. Try increasing max_trials.")
else:
model = None
best_inliers = None
warn("No inliers found. Model not fitted")
return model, best_inliers
|
38,714 | def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a report
if options.restore_session:
filename = options.restore_session
else:
filename = runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)
report = runreport.load_report(filename)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
loader = RegressionCheckLoader(
load_path=check_search_path,
recurse=check_search_recursive,
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts'
)
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
printer.verbose(f'Loaded {len(checks_found)} test(s)')
except OSError as e:
raise errors.ReframeError from e
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
testcases_all = generate_testcases(checks_found,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(
filters.have_name('|'.join(options.names)), testcases
)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if rec and rec['result'] == 'failure':
return True
else:
return False
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
type(tc.check).disable_hook(h)
# Act on checks
if options.list or options.list_detailed:
list_checks(testcases, printer, options.list_detailed)
sys.exit(0)
if not options.run:
printer.error(f"No action specified. Please specify `-l'/`-L' for "
f"listing or `-r' for running. "
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
optstr, *_ = re.split(r'=|\s+', opt)
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(optstr) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
runner = Runner(exec_policy, printer, max_retries)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failures():
success = False
runner.stats.print_failure_report(printer)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except errors.ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(errors.what(*exc_info))
if errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
printer.info(logfiles_message())
| def main():
# Setup command line options
argparser = argparse.ArgumentParser()
output_options = argparser.add_argument_group(
'Options controlling ReFrame output'
)
locate_options = argparser.add_argument_group(
'Options for discovering checks'
)
select_options = argparser.add_argument_group(
'Options for selecting checks'
)
action_options = argparser.add_argument_group(
'Options controlling actions'
)
run_options = argparser.add_argument_group(
'Options controlling the execution of checks'
)
env_options = argparser.add_argument_group(
'Options controlling the ReFrame environment'
)
misc_options = argparser.add_argument_group('Miscellaneous options')
# Output directory options
output_options.add_argument(
'--prefix', action='store', metavar='DIR',
help='Set general directory prefix to DIR',
envvar='RFM_PREFIX', configvar='systems/prefix'
)
output_options.add_argument(
'-o', '--output', action='store', metavar='DIR',
help='Set output directory prefix to DIR',
envvar='RFM_OUTPUT_DIR', configvar='systems/outputdir'
)
output_options.add_argument(
'-s', '--stage', action='store', metavar='DIR',
help='Set stage directory prefix to DIR',
envvar='RFM_STAGE_DIR', configvar='systems/stagedir'
)
output_options.add_argument(
'--timestamp', action='store', nargs='?', const='', metavar='TIMEFMT',
help=('Append a timestamp to the output and stage directory prefixes '
'(default: "%%FT%%T")'),
envvar='RFM_TIMESTAMP_DIRS', configvar='general/timestamp_dirs'
)
output_options.add_argument(
'--perflogdir', action='store', metavar='DIR',
help=('Set performance log data directory prefix '
'(relevant only to the filelog log handler)'),
envvar='RFM_PERFLOG_DIR',
configvar='logging/handlers_perflog/filelog_basedir'
)
output_options.add_argument(
'--keep-stage-files', action='store_true',
help='Keep stage directories even for successful checks',
envvar='RFM_KEEP_STAGE_FILES', configvar='general/keep_stage_files'
)
output_options.add_argument(
'--dont-restage', action='store_false', dest='clean_stagedir',
help='Reuse the test stage directory',
envvar='RFM_CLEAN_STAGEDIR', configvar='general/clean_stagedir'
)
output_options.add_argument(
'--save-log-files', action='store_true', default=False,
help='Save ReFrame log files to the output directory',
envvar='RFM_SAVE_LOG_FILES', configvar='general/save_log_files'
)
output_options.add_argument(
'--report-file', action='store', metavar='FILE',
help="Store JSON run report in FILE",
envvar='RFM_REPORT_FILE',
configvar='general/report_file'
)
# Check discovery options
locate_options.add_argument(
'-c', '--checkpath', action='append', metavar='PATH',
help="Add PATH to the check search path list",
envvar='RFM_CHECK_SEARCH_PATH :', configvar='general/check_search_path'
)
locate_options.add_argument(
'-R', '--recursive', action='store_true',
help='Search for checks in the search path recursively',
envvar='RFM_CHECK_SEARCH_RECURSIVE',
configvar='general/check_search_recursive'
)
locate_options.add_argument(
'--ignore-check-conflicts', action='store_true',
help='Skip checks with conflicting names',
envvar='RFM_IGNORE_CHECK_CONFLICTS',
configvar='general/ignore_check_conflicts'
)
# Select options
select_options.add_argument(
'-t', '--tag', action='append', dest='tags', metavar='PATTERN',
default=[],
help='Select checks with at least one tag matching PATTERN'
)
select_options.add_argument(
'-n', '--name', action='append', dest='names', default=[],
metavar='PATTERN', help='Select checks whose name matches PATTERN'
)
select_options.add_argument(
'-x', '--exclude', action='append', dest='exclude_names',
metavar='PATTERN', default=[],
help='Exclude checks whose name matches PATTERN'
)
select_options.add_argument(
'-p', '--prgenv', action='append', default=[r'.*'], metavar='PATTERN',
help=('Select checks with at least one '
'programming environment matching PATTERN')
)
select_options.add_argument(
'--failed', action='store_true',
help="Select failed test cases (only when '--restore-session' is used)"
)
select_options.add_argument(
'--gpu-only', action='store_true',
help='Select only GPU checks'
)
select_options.add_argument(
'--cpu-only', action='store_true',
help='Select only CPU checks'
)
# Action options
action_options.add_argument(
'-l', '--list', action='store_true',
help='List the selected checks'
)
action_options.add_argument(
'-L', '--list-detailed', action='store_true',
help='List the selected checks providing details for each test'
)
action_options.add_argument(
'-r', '--run', action='store_true',
help='Run the selected checks'
)
# Run options
run_options.add_argument(
'-J', '--job-option', action='append', metavar='OPT',
dest='job_options', default=[],
help='Pass option OPT to job scheduler'
)
run_options.add_argument(
'--force-local', action='store_true',
help='Force local execution of checks'
)
run_options.add_argument(
'--skip-sanity-check', action='store_true',
help='Skip sanity checking'
)
run_options.add_argument(
'--skip-performance-check', action='store_true',
help='Skip performance checking'
)
run_options.add_argument(
'--strict', action='store_true',
help='Enforce strict performance checking'
)
run_options.add_argument(
'--skip-system-check', action='store_true',
help='Skip system check'
)
run_options.add_argument(
'--skip-prgenv-check', action='store_true',
help='Skip programming environment check'
)
run_options.add_argument(
'--exec-policy', metavar='POLICY', action='store',
choices=['async', 'serial'], default='async',
help='Set the execution policy of ReFrame (default: "async")'
)
run_options.add_argument(
'--mode', action='store', help='Execution mode to use'
)
run_options.add_argument(
'--max-retries', metavar='NUM', action='store', default=0,
help='Set the maximum number of times a failed regression test '
'may be retried (default: 0)'
)
run_options.add_argument(
'--restore-session', action='store', nargs='?', const='',
metavar='REPORT',
help='Restore a testing session from REPORT file'
)
run_options.add_argument(
'--flex-alloc-nodes', action='store',
dest='flex_alloc_nodes', metavar='{all|STATE|NUM}', default=None,
help='Set strategy for the flexible node allocation (default: "idle").'
)
run_options.add_argument(
'--disable-hook', action='append', metavar='NAME', dest='hooks',
default=[], help='Disable a pipeline hook for this run'
)
env_options.add_argument(
'-M', '--map-module', action='append', metavar='MAPPING',
dest='module_mappings', default=[],
help='Add a module mapping',
envvar='RFM_MODULE_MAPPINGS ,', configvar='general/module_mappings'
)
env_options.add_argument(
'-m', '--module', action='append', default=[],
metavar='MOD', dest='user_modules',
help='Load module MOD before running any regression check',
envvar='RFM_USER_MODULES ,', configvar='general/user_modules'
)
env_options.add_argument(
'--module-mappings', action='store', metavar='FILE',
dest='module_map_file',
help='Load module mappings from FILE',
envvar='RFM_MODULE_MAP_FILE', configvar='general/module_map_file'
)
env_options.add_argument(
'-u', '--unload-module', action='append', metavar='MOD',
dest='unload_modules', default=[],
help='Unload module MOD before running any regression check',
envvar='RFM_UNLOAD_MODULES ,', configvar='general/unload_modules'
)
env_options.add_argument(
'--module-path', action='append', metavar='PATH',
dest='module_paths', default=[],
help='(Un)use module path PATH before running any regression check',
)
env_options.add_argument(
'--purge-env', action='store_true', dest='purge_env', default=False,
help='Unload all modules before running any regression check',
envvar='RFM_PURGE_ENVIRONMENT', configvar='general/purge_environment'
)
env_options.add_argument(
'--non-default-craype', action='store_true',
help='Test a non-default Cray Programming Environment',
envvar='RFM_NON_DEFAULT_CRAYPE', configvar='general/non_default_craype'
)
# Miscellaneous options
misc_options.add_argument(
'-C', '--config-file', action='store',
dest='config_file', metavar='FILE',
help='Set configuration file',
envvar='RFM_CONFIG_FILE'
)
misc_options.add_argument(
'--nocolor', action='store_false', dest='colorize',
help='Disable coloring of output',
envvar='RFM_COLORIZE', configvar='general/colorize'
)
misc_options.add_argument(
'--failure-stats', action='store_true', help='Print failure statistics'
)
misc_options.add_argument(
'--performance-report', action='store_true',
help='Print a report for performance tests'
)
misc_options.add_argument(
'--show-config', action='store', nargs='?', const='all',
metavar='PARAM',
help='Print the value of configuration parameter PARAM and exit'
)
misc_options.add_argument(
'--system', action='store', help='Load configuration for SYSTEM',
envvar='RFM_SYSTEM'
)
misc_options.add_argument(
'--upgrade-config-file', action='store', metavar='OLD[:NEW]',
help='Upgrade ReFrame 2.x configuration file to ReFrame 3.x syntax'
)
misc_options.add_argument(
'-V', '--version', action='version', version=osext.reframe_version()
)
misc_options.add_argument(
'-v', '--verbose', action='count',
help='Increase verbosity level of output',
envvar='RFM_VERBOSE', configvar='general/verbose'
)
# Options not associated with command-line arguments
argparser.add_argument(
dest='graylog_server',
envvar='RFM_GRAYLOG_ADDRESS',
configvar='logging/handlers_perflog/graylog_address',
help='Graylog server address'
)
argparser.add_argument(
dest='syslog_address',
envvar='RFM_SYSLOG_ADDRESS',
configvar='logging/handlers_perflog/syslog_address',
help='Syslog server address'
)
argparser.add_argument(
dest='ignore_reqnodenotavail',
envvar='RFM_IGNORE_REQNODENOTAVAIL',
configvar='schedulers/ignore_reqnodenotavail',
action='store_true',
help='Graylog server address'
)
argparser.add_argument(
dest='use_login_shell',
envvar='RFM_USE_LOGIN_SHELL',
configvar='general/use_login_shell',
action='store_true',
help='Use a login shell for job scripts'
)
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(1)
# Parse command line
options = argparser.parse_args()
# First configure logging with our generic configuration so as to be able
# to print pretty messages; logging will be reconfigured by user's
# configuration later
site_config = config.load_config(
os.path.join(reframe.INSTALL_PREFIX, 'reframe/core/settings.py')
)
site_config.select_subconfig('generic')
options.update_config(site_config)
logging.configure_logging(site_config)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer = PrettyPrinter()
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
if os.getenv('RFM_GRAYLOG_SERVER'):
printer.warning(
'RFM_GRAYLOG_SERVER environment variable is deprecated; '
'please use RFM_GRAYLOG_ADDRESS instead'
)
os.environ['RFM_GRAYLOG_ADDRESS'] = os.getenv('RFM_GRAYLOG_SERVER')
if options.upgrade_config_file is not None:
old_config, *new_config = options.upgrade_config_file.split(
':', maxsplit=1
)
new_config = new_config[0] if new_config else None
try:
new_config = config.convert_old_config(old_config, new_config)
except Exception as e:
printer.error(f'could not convert file: {e}')
sys.exit(1)
printer.info(
f'Conversion successful! '
f'The converted file can be found at {new_config!r}.'
)
sys.exit(0)
# Now configure ReFrame according to the user configuration file
try:
try:
printer.debug('Loading user configuration')
site_config = config.load_config(options.config_file)
except warnings.ReframeDeprecationWarning as e:
printer.warning(e)
converted = config.convert_old_config(options.config_file)
printer.warning(
f"configuration file has been converted "
f"to the new syntax here: '{converted}'"
)
site_config = config.load_config(converted)
site_config.validate()
# We ignore errors about unresolved sections or configuration
# parameters here, because they might be defined at the individual
# partition level and will be caught when we will instantiating
# internally the system and partitions later on.
site_config.select_subconfig(options.system,
ignore_resolve_errors=True)
for err in options.update_config(site_config):
printer.warning(str(err))
# Update options from the selected execution mode
if options.mode:
mode_args = site_config.get(f'modes/@{options.mode}/options')
# We lexically split the mode options, because otherwise spaces
# will be treated as part of the option argument; see GH bug #1554
mode_args = list(itertools.chain.from_iterable(shlex.split(m)
for m in mode_args))
# Parse the mode's options and reparse the command-line
options = argparser.parse_args(mode_args)
options = argparser.parse_args(namespace=options.cmd_options)
options.update_config(site_config)
logging.configure_logging(site_config)
except (OSError, errors.ConfigError) as e:
printer.error(f'failed to load configuration: {e}')
printer.error(logfiles_message())
sys.exit(1)
logging.getlogger().colorize = site_config.get('general/0/colorize')
printer.colorize = site_config.get('general/0/colorize')
printer.inc_verbosity(site_config.get('general/0/verbose'))
try:
printer.debug('Initializing runtime')
runtime.init_runtime(site_config)
except errors.ConfigError as e:
printer.error(f'failed to initialize runtime: {e}')
printer.error(logfiles_message())
sys.exit(1)
rt = runtime.runtime()
try:
if site_config.get('general/0/module_map_file'):
rt.modules_system.load_mapping_from_file(
site_config.get('general/0/module_map_file')
)
if site_config.get('general/0/module_mappings'):
for m in site_config.get('general/0/module_mappings'):
rt.modules_system.load_mapping(m)
except (errors.ConfigError, OSError) as e:
printer.error('could not load module mappings: %s' % e)
sys.exit(1)
if (osext.samefile(rt.stage_prefix, rt.output_prefix) and
not site_config.get('general/0/keep_stage_files')):
printer.error("stage and output refer to the same directory; "
"if this is on purpose, please use the "
"'--keep-stage-files' option.")
printer.error(logfiles_message())
sys.exit(1)
# Show configuration after everything is set up
if options.show_config:
config_param = options.show_config
if config_param == 'all':
printer.info(str(rt.site_config))
else:
value = rt.get_option(config_param)
if value is None:
printer.error(
f'no such configuration parameter found: {config_param}'
)
else:
printer.info(json.dumps(value, indent=2))
sys.exit(0)
printer.debug(format_env(options.env_vars))
# Setup the check loader
if options.restore_session is not None:
# We need to load the failed checks only from a report
if options.restore_session:
filename = options.restore_session
else:
filename = runreport.next_report_filename(
osext.expandvars(site_config.get('general/0/report_file')),
new=False
)
report = runreport.load_report(filename)
check_search_path = list(report.slice('filename', unique=True))
check_search_recursive = False
# If `-c` or `-R` are passed explicitly outside the configuration
# file, override the values set from the report file
if site_config.is_sticky_option('general/check_search_path'):
printer.warning(
'Ignoring check search path set in the report file: '
'search path set explicitly in the command-line or '
'the environment'
)
check_search_path = site_config.get(
'general/0/check_search_path'
)
if site_config.is_sticky_option('general/check_search_recursive'):
printer.warning(
'Ignoring check search recursive option from the report file: '
'option set explicitly in the command-line or the environment'
)
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
else:
check_search_recursive = site_config.get(
'general/0/check_search_recursive'
)
check_search_path = site_config.get('general/0/check_search_path')
loader = RegressionCheckLoader(
load_path=check_search_path,
recurse=check_search_recursive,
ignore_conflicts=site_config.get(
'general/0/ignore_check_conflicts'
)
)
def print_infoline(param, value):
param = param + ':'
printer.info(f" {param.ljust(18)} {value}")
session_info = {
'cmdline': ' '.join(sys.argv),
'config_file': rt.site_config.filename,
'data_version': runreport.DATA_VERSION,
'hostname': socket.gethostname(),
'prefix_output': rt.output_prefix,
'prefix_stage': rt.stage_prefix,
'user': osext.osuser(),
'version': osext.reframe_version(),
'workdir': os.getcwd(),
}
# Print command line
printer.info(f"[ReFrame Setup]")
print_infoline('version', session_info['version'])
print_infoline('command', repr(session_info['cmdline']))
print_infoline(
f"launched by",
f"{session_info['user'] or '<unknown>'}@{session_info['hostname']}"
)
print_infoline('working directory', repr(session_info['workdir']))
print_infoline('settings file', f"{session_info['config_file']!r}")
print_infoline('check search path',
f"{'(R) ' if loader.recurse else ''}"
f"{':'.join(loader.load_path)!r}")
print_infoline('stage directory', repr(session_info['prefix_stage']))
print_infoline('output directory', repr(session_info['prefix_output']))
printer.info('')
try:
# Locate and load checks
try:
checks_found = loader.load_all()
printer.verbose(f'Loaded {len(checks_found)} test(s)')
except OSError as e:
raise errors.ReframeError from e
# Generate all possible test cases first; we will need them for
# resolving dependencies after filtering
# Determine the allowed programming environments
allowed_environs = {e.name
for env_patt in options.prgenv
for p in rt.system.partitions
for e in p.environs if re.match(env_patt, e.name)}
testcases_all = generate_testcases(checks_found,
options.skip_system_check,
options.skip_prgenv_check,
allowed_environs)
testcases = testcases_all
printer.verbose(f'Generated {len(testcases)} test case(s)')
# Filter test cases by name
if options.exclude_names:
for name in options.exclude_names:
testcases = filter(filters.have_not_name(name), testcases)
if options.names:
testcases = filter(
filters.have_name('|'.join(options.names)), testcases
)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by name: {len(testcases)} remaining'
)
# Filter test cases by tags
for tag in options.tags:
testcases = filter(filters.have_tag(tag), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by tags: {len(testcases)} remaining'
)
# Filter test cases further
if options.gpu_only and options.cpu_only:
printer.error("options `--gpu-only' and `--cpu-only' "
"are mutually exclusive")
sys.exit(1)
if options.gpu_only:
testcases = filter(filters.have_gpu_only(), testcases)
elif options.cpu_only:
testcases = filter(filters.have_cpu_only(), testcases)
testcases = list(testcases)
printer.verbose(
f'Filtering test cases(s) by other attributes: '
f'{len(testcases)} remaining'
)
# Filter in failed cases
if options.failed:
if options.restore_session is None:
printer.error(
"the option '--failed' can only be used "
"in combination with the '--restore-session' option"
)
sys.exit(1)
def _case_failed(t):
rec = report.case(*t)
if rec and rec['result'] == 'failure':
return True
else:
return False
testcases = list(filter(_case_failed, testcases))
printer.verbose(
f'Filtering successful test case(s): '
f'{len(testcases)} remaining'
)
# Prepare for running
printer.debug('Building and validating the full test DAG')
testgraph, skipped_cases = dependencies.build_deps(testcases_all)
if skipped_cases:
# Some cases were skipped, so adjust testcases
testcases = list(set(testcases) - set(skipped_cases))
printer.verbose(
f'Filtering test case(s) due to unresolved dependencies: '
f'{len(testcases)} remaining'
)
dependencies.validate_deps(testgraph)
printer.debug('Full test DAG:')
printer.debug(dependencies.format_deps(testgraph))
restored_cases = []
if len(testcases) != len(testcases_all):
testgraph = dependencies.prune_deps(
testgraph, testcases,
max_depth=1 if options.restore_session is not None else None
)
printer.debug('Pruned test DAG')
printer.debug(dependencies.format_deps(testgraph))
if options.restore_session is not None:
testgraph, restored_cases = report.restore_dangling(testgraph)
testcases = dependencies.toposort(
testgraph,
is_subgraph=options.restore_session is not None
)
printer.verbose(f'Final number of test cases: {len(testcases)}')
# Disable hooks
for tc in testcases:
for h in options.hooks:
type(tc.check).disable_hook(h)
# Act on checks
if options.list or options.list_detailed:
list_checks(testcases, printer, options.list_detailed)
sys.exit(0)
if not options.run:
printer.error(f"No action specified. Please specify `-l'/`-L' for "
f"listing or `-r' for running. "
f"Try `{argparser.prog} -h' for more options.")
sys.exit(1)
# Manipulate ReFrame's environment
if site_config.get('general/0/purge_environment'):
rt.modules_system.unload_all()
else:
for m in site_config.get('general/0/unload_modules'):
rt.modules_system.unload_module(**m)
# Load the environment for the current system
try:
printer.debug(f'Loading environment for current system')
runtime.loadenv(rt.system.preload_environ)
except errors.EnvironError as e:
printer.error("failed to load current system's environment; "
"please check your configuration")
printer.debug(str(e))
raise
def module_use(*paths):
try:
rt.modules_system.searchpath_add(*paths)
except errors.EnvironError as e:
printer.warning(f'could not add module paths correctly')
printer.debug(str(e))
def module_unuse(*paths):
try:
rt.modules_system.searchpath_remove(*paths)
except errors.EnvironError as e:
printer.warning(f'could not remove module paths correctly')
printer.debug(str(e))
printer.debug('(Un)using module paths from command line')
module_paths = {}
for d in options.module_paths:
if d.startswith('-'):
module_paths.setdefault('-', [])
module_paths['-'].append(d[1:])
elif d.startswith('+'):
module_paths.setdefault('+', [])
module_paths['+'].append(d[1:])
else:
module_paths.setdefault('x', [])
module_paths['x'].append(d)
for op, paths in module_paths.items():
if op == '+':
module_use(*paths)
elif op == '-':
module_unuse(*paths)
else:
# First empty the current module path in a portable way
searchpath = [p for p in rt.modules_system.searchpath if p]
if searchpath:
rt.modules_system.searchpath_remove(*searchpath)
# Treat `A:B` syntax as well in this case
paths = itertools.chain(*(p.split(':') for p in paths))
module_use(*paths)
printer.debug('Loading user modules from command line')
for m in site_config.get('general/0/user_modules'):
try:
rt.modules_system.load_module(**m, force=True)
except errors.EnvironError as e:
printer.warning(
f'could not load module {m["name"]!r} correctly; '
f'skipping...'
)
printer.debug(str(e))
options.flex_alloc_nodes = options.flex_alloc_nodes or 'idle'
# Run the tests
# Setup the execution policy
if options.exec_policy == 'serial':
exec_policy = SerialExecutionPolicy()
elif options.exec_policy == 'async':
exec_policy = AsynchronousExecutionPolicy()
else:
# This should not happen, since choices are handled by
# argparser
printer.error("unknown execution policy `%s': Exiting...")
sys.exit(1)
exec_policy.skip_system_check = options.skip_system_check
exec_policy.force_local = options.force_local
exec_policy.strict_check = options.strict
exec_policy.skip_sanity_check = options.skip_sanity_check
exec_policy.skip_performance_check = options.skip_performance_check
exec_policy.keep_stage_files = site_config.get(
'general/0/keep_stage_files'
)
try:
errmsg = "invalid option for --flex-alloc-nodes: '{0}'"
sched_flex_alloc_nodes = int(options.flex_alloc_nodes)
if sched_flex_alloc_nodes <= 0:
raise errors.ConfigError(
errmsg.format(options.flex_alloc_nodes)
)
except ValueError:
sched_flex_alloc_nodes = options.flex_alloc_nodes
exec_policy.sched_flex_alloc_nodes = sched_flex_alloc_nodes
parsed_job_options = []
for opt in options.job_options:
optstr = re.split(r'=|\s+', opt)[0]
if opt.startswith('-') or opt.startswith('#'):
parsed_job_options.append(opt)
elif len(optstr) == 1:
parsed_job_options.append(f'-{opt}')
else:
parsed_job_options.append(f'--{opt}')
exec_policy.sched_options = parsed_job_options
try:
max_retries = int(options.max_retries)
except ValueError:
raise errors.ConfigError(
f'--max-retries is not a valid integer: {max_retries}'
) from None
runner = Runner(exec_policy, printer, max_retries)
try:
time_start = time.time()
session_info['time_start'] = time.strftime(
'%FT%T%z', time.localtime(time_start),
)
runner.runall(testcases, restored_cases)
finally:
time_end = time.time()
session_info['time_end'] = time.strftime(
'%FT%T%z', time.localtime(time_end)
)
session_info['time_elapsed'] = time_end - time_start
# Print a retry report if we did any retries
if runner.stats.failures(run=0):
printer.info(runner.stats.retry_report())
# Print a failure report if we had failures in the last run
success = True
if runner.stats.failures():
success = False
runner.stats.print_failure_report(printer)
if options.failure_stats:
runner.stats.print_failure_stats(printer)
if options.performance_report:
printer.info(runner.stats.performance_report())
# Generate the report for this session
report_file = os.path.normpath(
osext.expandvars(rt.get_option('general/0/report_file'))
)
basedir = os.path.dirname(report_file)
if basedir:
os.makedirs(basedir, exist_ok=True)
# Build final JSON report
run_stats = runner.stats.json()
session_info.update({
'num_cases': run_stats[0]['num_cases'],
'num_failures': run_stats[-1]['num_failures']
})
json_report = {
'session_info': session_info,
'runs': run_stats,
'restored_cases': []
}
if options.restore_session is not None:
for c in restored_cases:
json_report['restored_cases'].append(report.case(*c))
report_file = runreport.next_report_filename(report_file)
try:
with open(report_file, 'w') as fp:
jsonext.dump(json_report, fp, indent=2)
fp.write('\n')
except OSError as e:
printer.warning(
f'failed to generate report in {report_file!r}: {e}'
)
if not success:
sys.exit(1)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(1)
except errors.ReframeError as e:
printer.error(str(e))
sys.exit(1)
except (Exception, errors.ReframeFatalError):
exc_info = sys.exc_info()
tb = ''.join(traceback.format_exception(*exc_info))
printer.error(errors.what(*exc_info))
if errors.is_severe(*exc_info):
printer.error(tb)
else:
printer.verbose(tb)
sys.exit(1)
finally:
try:
log_files = logging.log_files()
if site_config.get('general/0/save_log_files'):
log_files = logging.save_log_files(rt.output_prefix)
except OSError as e:
printer.error(f'could not save log file: {e}')
sys.exit(1)
finally:
printer.info(logfiles_message())
|
35,360 | def convert_script(
filename_in,
filename_out,
loglevel="WARNING",
auto_exit=True,
line_ending=None,
exec_file=None,
macros_as_functions=True,
use_function_names=True,
show_log=False,
add_imports=True,
comment_solve=False,
cleanup_output=True,
header=True,
print_com=True,
):
"""Converts an ANSYS input file to a python PyMAPDL script.
Parameters
----------
filename_in : str
Filename of the ansys input file to read in.
filename_out : str
Filename of the python script to write a translation to.
loglevel : str, optional
Logging level of the ansys object within the script.
auto_exit : bool, optional
Adds a line to the end of the script to exit MAPDL. Default
``True``.
line_ending : str, optional
When None, automatically determined by OS being used.
macros_as_functions : bool, optional
Attempt to convert MAPDL macros to python functions.
use_function_names : bool, optional
Convert MAPDL functions to ansys.mapdl.core.Mapdl class
methods. When ``True``, the MAPDL command "K" will be
converted to ``mapdl.k``. When ``False``, it will be
converted to ``mapdl.run('k')``.
show_log : bool, optional
Print the converted commands using a logger (from ``logging``
Python module).
add_imports : bool, optional
If ``True``, add the lines ``from ansys.mapdl.core import launch_mapdl``
and ``mapdl = launch_mapdl(loglevel="WARNING")`` to the
beginning of the output file. This option is useful if you
are planning to use the output script from another mapdl
session. See examples section.
This option overrides ``auto_exit``.
comment_solve : bool, optional
If ``True``, it will pythonically comment the lines that
contain ``"SOLVE"`` or ``"/EOF"``.
cleanup_output : bool, optional
If ``True`` the output is formatted using ``autopep8`` before writing
the file or returning the string. This requires ``autopep8`` to be
installed.
header : bool, optional
If ``True``, the default header is written in the first line
of the output. If a string is provided, this string will be
used as header.
print_com : bool, optional
Print command ``/COM`` arguments to python console.
Defaults to ``True``.
Returns
-------
list
List of lines translated.
Examples
--------
>>> from ansys.mapdl import core as pymapdl
>>> from ansys.mapdl.core import examples
>>> clines = pymapdl.convert_script(examples.vmfiles['vm1'], 'vm1.py')
Converting a script and using it already in the same session.
For this case, it is recommended to use
:func:`convert_apdl_block() <ansys.mapdl.core.convert_apdl_block>`
since this way it is not needed to write the file.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core import examples
>>> from ansys.mapdl.core import convert_script
>>> in_file = examples.vmfiles['vm10']
>>> filename = in_file.split('\\')[-1]
>>> out_file = 'out_' + filename.replace('.dat', '.py')
>>> output = convert_script(file, out_file, line_ending='\\n')
>>> mapdl = launch_mapdl()
>>> with open(out_file, 'r') as fid:
... cmds = fid.read()
>>> mapdl.input_strings(cmds.splitlines()[2:10])
"""
with open(filename_in, "r") as fid:
apdl_strings = fid.readlines()
translator = _convert(
apdl_strings=apdl_strings,
loglevel=loglevel,
auto_exit=auto_exit,
line_ending=line_ending,
exec_file=exec_file,
macros_as_functions=macros_as_functions,
use_function_names=use_function_names,
show_log=show_log,
add_imports=add_imports,
comment_solve=comment_solve,
cleanup_output=cleanup_output,
header=header,
print_com=print_com,
)
translator.save(filename_out)
return translator.lines
| def convert_script(
filename_in,
filename_out,
loglevel="WARNING",
auto_exit=True,
line_ending=None,
exec_file=None,
macros_as_functions=True,
use_function_names=True,
show_log=False,
add_imports=True,
comment_solve=False,
cleanup_output=True,
header=True,
print_com=True,
):
"""Converts an ANSYS input file to a python PyMAPDL script.
Parameters
----------
filename_in : str
Filename of the ansys input file to read in.
filename_out : str
Filename of the python script to write a translation to.
loglevel : str, optional
Logging level of the ansys object within the script.
auto_exit : bool, optional
Adds a line to the end of the script to exit MAPDL. Default
``True``.
line_ending : str, optional
When None, automatically determined by OS being used.
macros_as_functions : bool, optional
Attempt to convert MAPDL macros to python functions.
use_function_names : bool, optional
Convert MAPDL functions to ansys.mapdl.core.Mapdl class
methods. When ``True``, the MAPDL command "K" will be
converted to ``mapdl.k``. When ``False``, it will be
converted to ``mapdl.run('k')``.
show_log : bool, optional
Print the converted commands using a logger (from ``logging``
Python module).
add_imports : bool, optional
If ``True``, add the lines ``from ansys.mapdl.core import launch_mapdl``
and ``mapdl = launch_mapdl(loglevel="WARNING")`` to the
beginning of the output file. This option is useful if you
are planning to use the output script from another mapdl
session. See examples section.
This option overrides ``auto_exit``.
comment_solve : bool, optional
If ``True``, it will pythonically comment the lines that
contain ``"SOLVE"`` or ``"/EOF"``.
cleanup_output : bool, optional
If ``True`` the output is formatted using ``autopep8`` before writing
the file or returning the string. This requires ``autopep8`` to be
installed.
header : bool, optional
If ``True``, the default header is written in the first line
of the output. If a string is provided, this string will be
used as header.
print_com : bool, optional
Print command ``/COM`` arguments to python console.
Defaults to ``True``.
Returns
-------
list
List of lines translated.
Examples
--------
>>> from ansys.mapdl import core as pymapdl
>>> from ansys.mapdl.core import examples
>>> clines = pymapdl.convert_script(examples.vmfiles['vm1'], 'vm1.py')
Converting a script and using it already in the same session.
For this case, it is recommended to use
:func:`convert_apdl_block() <ansys.mapdl.core.convert_apdl_block>`
since you do not need to write the file.
>>> from ansys.mapdl.core import launch_mapdl
>>> from ansys.mapdl.core import examples
>>> from ansys.mapdl.core import convert_script
>>> in_file = examples.vmfiles['vm10']
>>> filename = in_file.split('\\')[-1]
>>> out_file = 'out_' + filename.replace('.dat', '.py')
>>> output = convert_script(file, out_file, line_ending='\\n')
>>> mapdl = launch_mapdl()
>>> with open(out_file, 'r') as fid:
... cmds = fid.read()
>>> mapdl.input_strings(cmds.splitlines()[2:10])
"""
with open(filename_in, "r") as fid:
apdl_strings = fid.readlines()
translator = _convert(
apdl_strings=apdl_strings,
loglevel=loglevel,
auto_exit=auto_exit,
line_ending=line_ending,
exec_file=exec_file,
macros_as_functions=macros_as_functions,
use_function_names=use_function_names,
show_log=show_log,
add_imports=add_imports,
comment_solve=comment_solve,
cleanup_output=cleanup_output,
header=header,
print_com=print_com,
)
translator.save(filename_out)
return translator.lines
|
34,063 | def list_actors(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[Dict]:
"""List actors in the cluster.
Args:
address: The IP address and port of the head node. Defaults to
`http://localhost:8265` if not set.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("id", "=", "abcd")`
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the http/gRPC requests made.
detail: When True, more details info (specified in `ActorState`)
will be queried and returned. See
:ref:`ActorState <state-api-schema-actor>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of dictionary of
:ref:`ActorState <state-api-schema-actor>`.
Raises:
Exceptions: :ref:`RayStateApiException <state-api-exceptions>` if the CLI
failed to query the data.
"""
return StateApiClient(address=address).list(
StateResource.ACTORS,
options=ListApiOptions(
limit=limit,
timeout=timeout,
filters=filters,
detail=detail,
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
| def list_actors(
address: Optional[str] = None,
filters: Optional[List[Tuple[str, PredicateType, SupportedFilterType]]] = None,
limit: int = DEFAULT_LIMIT,
timeout: int = DEFAULT_RPC_TIMEOUT,
detail: bool = False,
raise_on_missing_output: bool = True,
_explain: bool = False,
) -> List[Dict]:
"""List actors in the cluster.
Args:
address: The IP address and port of the head node. Defaults to
`http://localhost:8265` if not set.
filters: List of tuples of filter key, predicate (=, or !=), and
the filter value. E.g., `("id", "=", "abcd")`
limit: Max number of entries returned by the state backend.
timeout: Max timeout value for the http/gRPC requests made.
detail: When True, more details info (specified in `ActorState`)
will be queried and returned. See
:ref:`ActorState <state-api-schema-actor>`.
raise_on_missing_output: When True, exceptions will be raised if
there is missing data due to truncation/data source unavailable.
_explain: Print the API information such as API latency or
failed query information.
Returns:
List of dictionarified
:ref:`ActorState <state-api-schema-actor>`.
Raises:
Exceptions: :ref:`RayStateApiException <state-api-exceptions>` if the CLI
failed to query the data.
"""
return StateApiClient(address=address).list(
StateResource.ACTORS,
options=ListApiOptions(
limit=limit,
timeout=timeout,
filters=filters,
detail=detail,
),
raise_on_missing_output=raise_on_missing_output,
_explain=_explain,
)
|
36,298 | def test_bit_flip_channel_fidelity(forest, use_seed):
"""
We use Eqn (5) of https://arxiv.org/abs/quant-ph/0701138 to compare the fidelity
"""
qc = get_qc("1q-qvm")
if use_seed:
np.random.seed(0)
qc.qam.random_seed = 0
num_expts = 1
else:
num_expts = 100
# prepare experiment settings
expt1 = ExperimentSetting(TensorProductState(plusX(0)), sX(0))
expt2 = ExperimentSetting(TensorProductState(plusY(0)), sY(0))
expt3 = ExperimentSetting(TensorProductState(plusZ(0)), sZ(0))
expt_list = [expt1, expt2, expt3]
# prepare noisy bit-flip channel as program for some random value of probability
prob = np.random.uniform(0.1, 0.5)
# the bit flip channel is composed of two Kraus operations --
# applying the X gate with probability `prob`, and applying the identity gate
# with probability `1 - prob`
kraus_ops = [
np.sqrt(1 - prob) * np.array([[1, 0], [0, 1]]),
np.sqrt(prob) * np.array([[0, 1], [1, 0]]),
]
p = Program(Pragma("PRESERVE_BLOCK"), I(0), Pragma("END_PRESERVE_BLOCK"))
p.define_noisy_gate("I", [0], kraus_ops)
# prepare TomographyExperiment
process_exp = Experiment(settings=expt_list, program=p)
# list to store experiment results
expts = []
for _ in range(num_expts):
expt_results = []
for res in measure_observables(qc, process_exp, n_shots=2000):
expt_results.append(res.expectation)
expts.append(expt_results)
expts = np.array(expts)
results = np.mean(expts, axis=0)
estimated_fidelity = _point_channel_fidelity_estimate(results)
# how close is this channel to the identity operator
expected_fidelity = 1 - (2 / 3) * prob
np.testing.assert_allclose(expected_fidelity, estimated_fidelity, atol=2e-2)
| def test_bit_flip_channel_fidelity(forest, use_seed):
"""
We use Eqn (5) of https://arxiv.org/abs/quant-ph/0701138 to compare the fidelity
"""
qc = get_qc("1q-qvm")
if use_seed:
np.random.seed(0)
qc.qam.random_seed = 0
num_expts = 1
else:
num_expts = 100
# prepare experiment settings
expt1 = ExperimentSetting(TensorProductState(plusX(0)), sX(0))
expt2 = ExperimentSetting(TensorProductState(plusY(0)), sY(0))
expt3 = ExperimentSetting(TensorProductState(plusZ(0)), sZ(0))
expt_list = [expt1, expt2, expt3]
# prepare noisy bit-flip channel as program for some random value of probability
prob = np.random.uniform(0.1, 0.5)
# the bit flip channel is composed of two Kraus operations --
# applying the X gate with probability `prob`, and applying the identity gate
# with probability `1 - prob`
kraus_ops = [
np.sqrt(1 - prob) * np.array([[1, 0], [0, 1]]),
np.sqrt(prob) * np.array([[0, 1], [1, 0]]),
]
p = Program(Pragma("PRESERVE_BLOCK"), I(0), Pragma("END_PRESERVE_BLOCK"))
p.define_noisy_gate("I", [0], kraus_ops)
# prepare Experiment
process_exp = Experiment(settings=expt_list, program=p)
# list to store experiment results
expts = []
for _ in range(num_expts):
expt_results = []
for res in measure_observables(qc, process_exp, n_shots=2000):
expt_results.append(res.expectation)
expts.append(expt_results)
expts = np.array(expts)
results = np.mean(expts, axis=0)
estimated_fidelity = _point_channel_fidelity_estimate(results)
# how close is this channel to the identity operator
expected_fidelity = 1 - (2 / 3) * prob
np.testing.assert_allclose(expected_fidelity, estimated_fidelity, atol=2e-2)
|
56,684 | def update_translations(locales=[]):
if locales:
print(f"Updating the following locales: {locales}")
locales_to_update = locales
else:
print("Updating all locales")
locales_to_update = get_locales()
pot_path = os.path.join(root, 'messages.pot')
template = read_po(open(pot_path, 'rb'))
for locale in locales_to_update:
po_path = os.path.join(root, locale, 'messages.po')
mo_path = os.path.join(root, locale, 'messages.mo')
if os.path.exists(po_path):
catalog = read_po(open(po_path, 'rb'))
catalog.update(template)
f = open(po_path, 'wb')
write_po(f, catalog)
f.close()
print('updated', po_path)
else:
print(f"ERROR: {po_path} does not exist...")
compile_translations()
| def update_translations(locales=[]):
locales_to_update = locales or get_locales()
print(f"Updating {locales_to_update}")
pot_path = os.path.join(root, 'messages.pot')
template = read_po(open(pot_path, 'rb'))
for locale in locales_to_update:
po_path = os.path.join(root, locale, 'messages.po')
mo_path = os.path.join(root, locale, 'messages.mo')
if os.path.exists(po_path):
catalog = read_po(open(po_path, 'rb'))
catalog.update(template)
f = open(po_path, 'wb')
write_po(f, catalog)
f.close()
print('updated', po_path)
else:
print(f"ERROR: {po_path} does not exist...")
compile_translations()
|
25,140 | def _precache_zipimporters(path=None):
"""
For each path that has not been already cached
in the sys.path_importer_cache, create a new zipimporter
instance and store it into the cache.
Return a dict associating all paths, stored into the cache, to corresponding
zipimporter instances
:param path: paths that has to be added into the cache
:return: association between paths stored into the cache and zipimporter instances
"""
pic = sys.path_importer_cache
# When measured, despite having the same complexity (O(n)),
# converting to tuples and then caching the conversion to sets
# and the set difference is faster than converting to sets
# and then only caching the set difference.
req_paths = tuple(path or sys.path)
cached_paths = tuple(pic)
new_paths = _cached_set_diff(req_paths, cached_paths)
for entry_path in new_paths:
try:
pic[entry_path] = zipimport.zipimporter(entry_path)
except zipimport.ZipImportError:
continue
return {key: value for key, value in pic.items() if isinstance(value, zipimport.zipimporter)}
| def _precache_zipimporters(path=None):
"""
For each path that has not been already cached
in the sys.path_importer_cache, create a new zipimporter
instance and store it into the cache.
Return a dict associating all paths, stored in the cache, to corresponding
zipimporter instances
:param path: paths that has to be added into the cache
:return: association between paths stored into the cache and zipimporter instances
"""
pic = sys.path_importer_cache
# When measured, despite having the same complexity (O(n)),
# converting to tuples and then caching the conversion to sets
# and the set difference is faster than converting to sets
# and then only caching the set difference.
req_paths = tuple(path or sys.path)
cached_paths = tuple(pic)
new_paths = _cached_set_diff(req_paths, cached_paths)
for entry_path in new_paths:
try:
pic[entry_path] = zipimport.zipimporter(entry_path)
except zipimport.ZipImportError:
continue
return {key: value for key, value in pic.items() if isinstance(value, zipimport.zipimporter)}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.