id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
6,959 | def rename_doc(
doctype: str,
old: str,
new: str,
force: bool = False,
merge: bool = False,
ignore_permissions: bool = False,
ignore_if_exists: bool = False,
show_alert: bool = True,
rebuild_search: bool = True,
) -> str:
"""Rename a doc(dt, old) to doc(dt, new) and update all linked fields of type "Link"."""
if not frappe.db.exists(doctype, old):
frappe.errprint(_("Failed: {0} to {1} because {0} doesn't exist.").format(old, new))
return
if ignore_if_exists and frappe.db.exists(doctype, new):
frappe.errprint(_("Failed: {0} to {1} because {1} already exists.").format(old, new))
return
if old==new:
frappe.errprint(_("Failed: {0} to {1} no changes made.").format(old, new))
return
force = cint(force)
merge = cint(merge)
meta = frappe.get_meta(doctype)
# call before_rename
old_doc = frappe.get_doc(doctype, old)
out = old_doc.run_method("before_rename", old, new, merge) or {}
new = (out.get("new") or new) if isinstance(out, dict) else (out or new)
new = validate_rename(doctype, new, meta, merge, force, ignore_permissions)
if not merge:
rename_parent_and_child(doctype, old, new, meta)
else:
update_assignments(old, new, doctype)
# update link fields' values
link_fields = get_link_fields(doctype)
update_link_field_values(link_fields, old, new, doctype)
rename_dynamic_links(doctype, old, new)
# save the user settings in the db
update_user_settings(old, new, link_fields)
if doctype=='DocType':
rename_doctype(doctype, old, new)
update_customizations(old, new)
update_attachments(doctype, old, new)
rename_versions(doctype, old, new)
rename_eps_records(doctype, old, new)
# call after_rename
new_doc = frappe.get_doc(doctype, new)
# copy any flags if required
new_doc._local = getattr(old_doc, "_local", None)
new_doc.run_method("after_rename", old, new, merge)
if not merge:
rename_password(doctype, old, new)
# update user_permissions
frappe.db.sql("""UPDATE `tabDefaultValue` SET `defvalue`=%s WHERE `parenttype`='User Permission'
AND `defkey`=%s AND `defvalue`=%s""", (new, doctype, old))
if merge:
new_doc.add_comment('Edit', _("merged {0} into {1}").format(frappe.bold(old), frappe.bold(new)))
else:
new_doc.add_comment('Edit', _("renamed from {0} to {1}").format(frappe.bold(old), frappe.bold(new)))
if merge:
frappe.delete_doc(doctype, old)
new_doc.clear_cache()
frappe.clear_cache()
if rebuild_search:
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype', doctype=doctype)
if show_alert:
frappe.msgprint(_('Document renamed from {0} to {1}').format(bold(old), bold(new)), alert=True, indicator='green')
return new
| def rename_doc(
doctype: str,
old: str,
new: str,
force: bool = False,
merge: bool = False,
ignore_permissions: bool = False,
ignore_if_exists: bool = False,
show_alert: bool = True,
rebuild_search: bool = True,
) -> str:
"""Rename a doc(dt, old) to doc(dt, new) and update all linked fields of type "Link"."""
if not frappe.db.exists(doctype, old):
frappe.errprint(_("Failed: {0} to {1} because {0} doesn't exist.").format(old, new))
return
if ignore_if_exists and frappe.db.exists(doctype, new):
frappe.errprint(_("Failed: {0} to {1} because {1} already exists.").format(old, new))
return
if old==new:
frappe.errprint(_("Ignored: {0} to {1} no changes made because old and new name are the same.").format(old, new))
return
force = cint(force)
merge = cint(merge)
meta = frappe.get_meta(doctype)
# call before_rename
old_doc = frappe.get_doc(doctype, old)
out = old_doc.run_method("before_rename", old, new, merge) or {}
new = (out.get("new") or new) if isinstance(out, dict) else (out or new)
new = validate_rename(doctype, new, meta, merge, force, ignore_permissions)
if not merge:
rename_parent_and_child(doctype, old, new, meta)
else:
update_assignments(old, new, doctype)
# update link fields' values
link_fields = get_link_fields(doctype)
update_link_field_values(link_fields, old, new, doctype)
rename_dynamic_links(doctype, old, new)
# save the user settings in the db
update_user_settings(old, new, link_fields)
if doctype=='DocType':
rename_doctype(doctype, old, new)
update_customizations(old, new)
update_attachments(doctype, old, new)
rename_versions(doctype, old, new)
rename_eps_records(doctype, old, new)
# call after_rename
new_doc = frappe.get_doc(doctype, new)
# copy any flags if required
new_doc._local = getattr(old_doc, "_local", None)
new_doc.run_method("after_rename", old, new, merge)
if not merge:
rename_password(doctype, old, new)
# update user_permissions
frappe.db.sql("""UPDATE `tabDefaultValue` SET `defvalue`=%s WHERE `parenttype`='User Permission'
AND `defkey`=%s AND `defvalue`=%s""", (new, doctype, old))
if merge:
new_doc.add_comment('Edit', _("merged {0} into {1}").format(frappe.bold(old), frappe.bold(new)))
else:
new_doc.add_comment('Edit', _("renamed from {0} to {1}").format(frappe.bold(old), frappe.bold(new)))
if merge:
frappe.delete_doc(doctype, old)
new_doc.clear_cache()
frappe.clear_cache()
if rebuild_search:
frappe.enqueue('frappe.utils.global_search.rebuild_for_doctype', doctype=doctype)
if show_alert:
frappe.msgprint(_('Document renamed from {0} to {1}').format(bold(old), bold(new)), alert=True, indicator='green')
return new
|
32,626 | def report_malware(client: Client, args: dict) -> CommandResults:
"""
Returns malware status for given domain
"""
domain = args["domain"]
if not domain:
raise ValueError('domain is missing')
malware = client._api_request(domain=domain, request_type="GET", operation="malware")
return CommandResults(
outputs_prefix="QutteraWebsiteMalwareScanning.malware",
outputs_key_field="error",
outputs=malware
)
| def report_malware(client: Client, args: dict) -> CommandResults:
"""
Returns malware status for given domain
"""
domain = args.get('domain')
if not domain:
raise ValueError('domain is missing')
malware = client._api_request(domain=domain, request_type="GET", operation="malware")
return CommandResults(
outputs_prefix="QutteraWebsiteMalwareScanning.malware",
outputs_key_field="error",
outputs=malware
)
|
40,365 | def get_mesh_laplacian(pos, face):
""" Computes the mesh Laplacian of the mesh given by
:obj:`pos` and :obj:`face`. It is computed as
:math:` \mathbf{L}_{ij} = \begin{cases}
\frac{\cot \angle_{ikj} + \cot \angle_{ilj}}{2 a_{ij}} &
\mbox{if } i, j \mbox{ is an edge,} \\
\sum_{j \in N(i)}{L_{ij}} &
\mbox{if } i \mbox{ is in the diagonal,} \\
0 \mbox{ otherwise.}
\end{cases}`
where :math:`a_{ij}` is the local area element,
i.e. one-third of the neighbouring triangle's area.
Args:
pos (Tensor): The node positions.
face (LongTensor): The face indices.
"""
assert pos.shape[1] == 3
assert face.shape[0] == 3
device = pos.device
dtype = pos.dtype
num_nodes = pos.shape[0]
cot_weight = torch.Tensor().to(dtype).to(device)
area_weight = torch.Tensor().to(dtype).to(device)
edge_index = torch.Tensor().long().to(device)
def add_edge(left, centre, right):
left_pos, central_pos, right_pos = pos[left], pos[centre], pos[right]
left_vec = left_pos - central_pos
right_vec = right_pos - central_pos
dot = torch.einsum('ij, ij -> i', left_vec, right_vec)
cross = torch.norm(torch.cross(left_vec, right_vec, dim=1), dim=1)
cot = dot / cross # cos / sin
nonlocal cot_weight, area_weight, edge_index
cot_weight = torch.cat([cot_weight, cot / 2.0, cot / 2.0])
# one-third of a triangle's area is cross / 6.0
# since each edge is accounted twice, we compute cross / 12.0 instead
area_weight = torch.cat([area_weight, cross / 12.0, cross / 12.0])
edge_index = torch.cat([
edge_index,
torch.stack([left, right], dim=1),
torch.stack([right, left], dim=1)
])
# add all 3 edges of the triangles
add_edge(face[2], face[0], face[1])
add_edge(face[0], face[1], face[2])
add_edge(face[1], face[2], face[0])
# eliminate duplicate matrix entries by adding them together
index_linearizer = torch.Tensor([num_nodes, 1]).to(device)
lin_index = torch.matmul(edge_index.float(), index_linearizer).long()
y, idx = lin_index.unique(return_inverse=True)
edge_index = torch.stack([y // num_nodes, y % num_nodes])
cot_weight = scatter_add(cot_weight, idx, dim=0)
area_weight = scatter_add(area_weight, idx, dim=0)
# compute the diagonal part
row, col = edge_index
cot_deg = scatter_add(cot_weight, row, dim=0, dim_size=num_nodes)
area_deg = scatter_add(area_weight, row, dim=0, dim_size=num_nodes)
deg = cot_deg / area_deg
edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)
edge_weight = torch.cat([-cot_weight, deg], dim=0)
return edge_index, edge_weight
| def get_mesh_laplacian(pos, face):
""" Computes the mesh Laplacian of the mesh given by
:obj:`pos` and :obj:`face`. It is computed as
:math:` \mathbf{L}_{ij} = \begin{cases}
\frac{\cot \angle_{ikj} + \cot \angle_{ilj}}{2 a_{ij}} &
\mbox{if } i, j \mbox{ is an edge,} \\
\sum_{j \in N(i)}{L_{ij}} &
\mbox{if } i \mbox{ is in the diagonal,} \\
0 \mbox{ otherwise.}
\end{cases}`
where :math:`a_{ij}` is the local area element,
i.e. one-third of the neighbouring triangle's area.
Args:
pos (Tensor): The node positions.
face (LongTensor): The face indices.
"""
assert pos.shape[1] == 3
assert face.shape[0] == 3
device = pos.device
dtype = pos.dtype
index = edge_index[0] * num_nodes + edge_index[1]
cot_weight = torch.Tensor().to(dtype).to(device)
area_weight = torch.Tensor().to(dtype).to(device)
edge_index = torch.Tensor().long().to(device)
def add_edge(left, centre, right):
left_pos, central_pos, right_pos = pos[left], pos[centre], pos[right]
left_vec = left_pos - central_pos
right_vec = right_pos - central_pos
dot = torch.einsum('ij, ij -> i', left_vec, right_vec)
cross = torch.norm(torch.cross(left_vec, right_vec, dim=1), dim=1)
cot = dot / cross # cos / sin
nonlocal cot_weight, area_weight, edge_index
cot_weight = torch.cat([cot_weight, cot / 2.0, cot / 2.0])
# one-third of a triangle's area is cross / 6.0
# since each edge is accounted twice, we compute cross / 12.0 instead
area_weight = torch.cat([area_weight, cross / 12.0, cross / 12.0])
edge_index = torch.cat([
edge_index,
torch.stack([left, right], dim=1),
torch.stack([right, left], dim=1)
])
# add all 3 edges of the triangles
add_edge(face[2], face[0], face[1])
add_edge(face[0], face[1], face[2])
add_edge(face[1], face[2], face[0])
# eliminate duplicate matrix entries by adding them together
index_linearizer = torch.Tensor([num_nodes, 1]).to(device)
lin_index = torch.matmul(edge_index.float(), index_linearizer).long()
y, idx = lin_index.unique(return_inverse=True)
edge_index = torch.stack([y // num_nodes, y % num_nodes])
cot_weight = scatter_add(cot_weight, idx, dim=0)
area_weight = scatter_add(area_weight, idx, dim=0)
# compute the diagonal part
row, col = edge_index
cot_deg = scatter_add(cot_weight, row, dim=0, dim_size=num_nodes)
area_deg = scatter_add(area_weight, row, dim=0, dim_size=num_nodes)
deg = cot_deg / area_deg
edge_index, _ = add_self_loops(edge_index, num_nodes=num_nodes)
edge_weight = torch.cat([-cot_weight, deg], dim=0)
return edge_index, edge_weight
|
5,395 | def modify(name, **kwargs):
"""
Modify an existing job in the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.modify job1 function='test.ping' seconds=3600
"""
ret = {"comment": "", "changes": {}, "result": True}
time_conflict = False
for item in ["seconds", "minutes", "hours", "days"]:
if item in kwargs and "when" in kwargs:
time_conflict = True
if item in kwargs and "cron" in kwargs:
time_conflict = True
if time_conflict:
ret["result"] = False
ret[
"comment"
] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.'
return ret
if "when" in kwargs and "cron" in kwargs:
ret["result"] = False
ret["comment"] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
current_schedule = list_(show_all=True, return_yaml=False)
if name not in current_schedule:
ret["comment"] = "Job {} does not exist in schedule.".format(name)
ret["result"] = False
return ret
_current = current_schedule[name]
if "function" not in kwargs:
kwargs["function"] = _current.get("function")
# Remove the auto generated _seconds value
if "_seconds" in _current:
_current["seconds"] = _current["_seconds"]
del _current["_seconds"]
# Copy _current _new, then update values from kwargs
_new = pycopy.deepcopy(_current)
_new.update(kwargs)
# Remove test from kwargs, it's not a valid schedule option
if "test" in _new:
del _new["test"]
if "result" in _new and not _new["result"]:
return _new
if _new == _current:
ret["comment"] = "Job {} in correct state".format(name)
return ret
ret["changes"][name] = {
"old": salt.utils.odict.OrderedDict(_current),
"new": salt.utils.odict.OrderedDict(_new),
}
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be modified in schedule.".format(name)
else:
persist = kwargs.get("persist", True)
if name in list_(show_all=True, where="opts", return_yaml=False):
event_data = {
"name": name,
"schedule": _new,
"func": "modify",
"persist": persist,
}
elif name in list_(show_all=True, where="pillar", return_yaml=False):
event_data = {
"name": name,
"schedule": _new,
"where": "pillar",
"func": "modify",
"persist": False,
}
out = __salt__["event.fire"](event_data, "manage_schedule")
if out:
ret["comment"] = "Modified job: {} in schedule.".format(name)
else:
ret["comment"] = "Failed to modify job {} in schedule.".format(name)
ret["result"] = False
return ret
| def modify(name, **kwargs):
"""
Modify an existing job in the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.modify job1 function='test.ping' seconds=3600
"""
ret = {"comment": "", "changes": {}, "result": True}
time_conflict = False
for item in ["seconds", "minutes", "hours", "days"]:
if item in kwargs and "when" in kwargs:
time_conflict = True
if item in kwargs and "cron" in kwargs:
time_conflict = True
if time_conflict:
ret["result"] = False
ret[
"comment"
] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.'
return ret
if "when" in kwargs and "cron" in kwargs:
ret["result"] = False
ret["comment"] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
current_schedule = list_(show_all=True, return_yaml=False)
if name not in current_schedule:
ret["comment"] = "Job {} does not exist in schedule.".format(name)
ret["result"] = False
return ret
_current = current_schedule[name]
if "function" not in kwargs:
kwargs["function"] = _current.get("function")
# Remove the auto generated _seconds value
if "_seconds" in _current:
_current["seconds"] = _current["_seconds"]
del _current["_seconds"]
# Copy _current _new, then update values from kwargs
_new = pycopy.deepcopy(_current)
_new.update(kwargs)
# Remove test from kwargs, it's not a valid schedule option
_new.pop("test", None)
if "result" in _new and not _new["result"]:
return _new
if _new == _current:
ret["comment"] = "Job {} in correct state".format(name)
return ret
ret["changes"][name] = {
"old": salt.utils.odict.OrderedDict(_current),
"new": salt.utils.odict.OrderedDict(_new),
}
if "test" in kwargs and kwargs["test"]:
ret["comment"] = "Job: {} would be modified in schedule.".format(name)
else:
persist = kwargs.get("persist", True)
if name in list_(show_all=True, where="opts", return_yaml=False):
event_data = {
"name": name,
"schedule": _new,
"func": "modify",
"persist": persist,
}
elif name in list_(show_all=True, where="pillar", return_yaml=False):
event_data = {
"name": name,
"schedule": _new,
"where": "pillar",
"func": "modify",
"persist": False,
}
out = __salt__["event.fire"](event_data, "manage_schedule")
if out:
ret["comment"] = "Modified job: {} in schedule.".format(name)
else:
ret["comment"] = "Failed to modify job {} in schedule.".format(name)
ret["result"] = False
return ret
|
54,083 | def load_arguments(self, _):
from azure.cli.core.commands.parameters import tags_type, get_enum_type, get_three_state_flag
# Managed Cassandra Cluster
for scope in [
'managed-cassandra cluster create',
'managed-cassandra cluster update',
'managed-cassandra cluster show',
'managed-cassandra cluster delete',
'managed-cassandra cluster backup list',
'managed-cassandra cluster backup show']:
with self.argument_context(scope) as c:
c.argument('cluster_name', options_list=['--cluster-name', '-c'], help="Cluster Name", required=True)
# Managed Cassandra Cluster
for scope in [
'managed-cassandra cluster create',
'managed-cassandra cluster update']:
with self.argument_context(scope) as c:
c.argument('tags', arg_type=tags_type)
c.argument('external_gossip_certificates', nargs='+', validator=validate_gossip_certificates, options_list=['--external-gossip-certificates', '-e'], help="A list of certificates that the managed cassandra data center's should accept.")
c.argument('cassandra_version', help="The version of Cassandra chosen.")
c.argument('authentication_method', arg_type=get_enum_type(['None', 'Cassandra', 'Ldap']), help="Authentication mode can be None, Cassandra or Ldap. If None, no authentication will be required to connect to the Cassandra API. If Cassandra, then passwords will be used. Ldap is in preview")
c.argument('hours_between_backups', help="The number of hours between backup attempts.")
c.argument('repair_enabled', help="Enables automatic repair.")
c.argument('client_certificates', nargs='+', validator=validate_client_certificates, help="If specified, enables client certificate authentication to the Cassandra API.")
c.argument('gossip_certificates', help="A list of certificates that should be accepted by on-premise data centers.")
c.argument('external_seed_nodes', nargs='+', validator=validate_seednodes, help="A list of ip addresses of the seed nodes of on-premise data centers.")
c.argument('identity_type', options_list=['--identity-type'], arg_type=get_enum_type(['None', 'SystemAssigned']), help="Type of identity used for Customer Managed Disk Key.")
# Managed Cassandra Cluster
with self.argument_context('managed-cassandra cluster create') as c:
c.argument('location', options_list=['--location', '-l'], help="Azure Location of the Cluster", required=True)
c.argument('delegated_management_subnet_id', options_list=['--delegated-management-subnet-id', '-s'], help="The resource id of a subnet where the ip address of the cassandra management server will be allocated. This subnet must have connectivity to the delegated_subnet_id subnet of each data center.", required=True)
c.argument('initial_cassandra_admin_password', options_list=['--initial-cassandra-admin-password', '-i'], help="The intial password to be configured when a cluster is created for authentication_method Cassandra.")
c.argument('restore_from_backup_id', help="The resource id of a backup. If provided on create, the backup will be used to prepopulate the cluster. The cluster data center count and node counts must match the backup.")
c.argument('cluster_name_override', help="If a cluster must have a name that is not a valid azure resource name, this field can be specified to choose the Cassandra cluster name. Otherwise, the resource name will be used as the cluster name.")
# Managed Cassandra Cluster
for scope in ['managed-cassandra cluster backup show']:
with self.argument_context(scope) as c:
c.argument('backup_id', options_list=['--backup-id'], help="The resource id of the backup", required=True)
# Managed Cassandra Datacenter
for scope in [
'managed-cassandra datacenter create',
'managed-cassandra datacenter update',
'managed-cassandra datacenter show',
'managed-cassandra datacenter delete']:
with self.argument_context(scope) as c:
c.argument('cluster_name', options_list=['--cluster-name', '-c'], help="Cluster Name", required=True)
c.argument('data_center_name', options_list=['--data-center-name', '-d'], help="Datacenter Name", required=True)
# Managed Cassandra Datacenter
for scope in [
'managed-cassandra datacenter create',
'managed-cassandra datacenter update']:
with self.argument_context(scope) as c:
c.argument('node_count', options_list=['--node-count', '-n'], validator=validate_node_count, help="The number of Cassandra virtual machines in this data center. The minimum value is 3.")
c.argument('base64_encoded_cassandra_yaml_fragment', options_list=['--base64-encoded-cassandra-yaml-fragment', '-b'], help="This is a Base64 encoded yaml file that is a subset of cassandra.yaml. Supported fields will be honored and others will be ignored.")
c.argument('data_center_location', options_list=['--data-center-location', '-l'], help="The region where the virtual machine for this data center will be located.")
c.argument('delegated_subnet_id', options_list=['--delegated-subnet-id', '-s'], help="The resource id of a subnet where ip addresses of the Cassandra virtual machines will be allocated. This must be in the same region as data_center_location.")
c.argument('managed_disk_customer_key_uri', options_list=['--managed-disk-customer-key-uri', '-k'], help="Key uri to use for encryption of managed disks. Ensure the system assigned identity of the cluster has been assigned appropriate permissions(key get/wrap/unwrap permissions) on the key.")
c.argument('backup_storage_customer_key_uri', options_list=['--backup-storage-customer-key-uri', '-p'], help="Indicates the Key Uri of the customer key to use for encryption of the backup storage account.")
c.argument('server_hostname', options_list=['--ldap-server-hostname'], help="Hostname of the LDAP server.")
c.argument('server_port', options_list=['--ldap-server-port'], help="Port of the LDAP server. Defaults to 636")
c.argument('service_user_distinguished_name', options_list=['--ldap-service-user-dn'], help="Distinguished name of the look up user account, who can look up user details on authentication.")
c.argument('service_user_password', options_list=['--ldap-svc-user-pwd'], help="Password of the look up user.")
c.argument('search_base_distinguished_name', options_list=['--ldap-search-base-dn'], help="Distinguished name of the object to start the recursive search of users from.")
c.argument('search_filter_template', options_list=['--ldap-search-filter'], help="Template to use for searching. Defaults to (cn=%s) where %s will be replaced by the username used to login. While using this parameter from Windows Powershell (not Windows CommandPrompt or Linux) there is a known issue with escaping special characters, so pass as \"\"\"(cn=%s)\"\"\" instead.")
c.argument('server_certificates', nargs='+', validator=validate_server_certificates, options_list=['--ldap-server-certs'], help="LDAP server certificate. It should have subject alternative name(SAN) DNS Name entry matching the hostname of the LDAP server.")
# Managed Cassandra Datacenter
with self.argument_context('managed-cassandra datacenter create') as c:
c.argument('data_center_location', options_list=['--data-center-location', '-l'], help="Azure Location of the Datacenter", required=True)
c.argument('delegated_subnet_id', options_list=['--delegated-subnet-id', '-s'], help="The resource id of a subnet where ip addresses of the Cassandra virtual machines will be allocated. This must be in the same region as data_center_location.", required=True)
c.argument('node_count', options_list=['--node-count', '-n'], validator=validate_node_count, help="The number of Cassandra virtual machines in this data center. The minimum value is 3.", required=True)
c.argument('sku', options_list=['--sku'], help="Virtual Machine SKU used for data centers. Default value is Standard_DS14_v2")
c.argument('disk_sku', options_list=['--disk-sku'], help="Disk SKU used for data centers. Default value is P30.")
c.argument('disk_capacity', options_list=['--disk-capacity'], help="Number of disk used for data centers. Default value is 4.")
c.argument('availability_zone', options_list=['--availability-zone', '-z'], arg_type=get_three_state_flag(), help="If the data center haves Availability Zone feature, apply it to the Virtual Machine ScaleSet that host the data center virtual machines.")
# Managed Cassandra Datacenter
with self.argument_context('managed-cassandra datacenter list') as c:
c.argument('cluster_name', options_list=['--cluster-name', '-c'], help="Cluster Name", required=True)
# Services
for scope in [
'cosmosdb service create',
'cosmosdb service update']:
with self.argument_context('cosmosdb service') as c:
c.argument('account_name', completer=None, options_list=['--account-name', '-a'], help='Name of the Cosmos DB database account.', id_part=None)
c.argument('resource_group_name', completer=None, options_list=['--resource-group-name', '-g'], help='Name of the resource group of the database account.', id_part=None)
c.argument('service_kind', options_list=['--kind', '-k'], help="Service kind")
c.argument('service_name', options_list=['--name', '-n'], help="Service Name.")
c.argument('instance_count', options_list=['--count', '-c'], help="Instance Count.")
c.argument('instance_size', options_list=['--size'], help="Instance Size. Possible values are: Cosmos.D4s, Cosmos.D8s, Cosmos.D16s etc")
with self.argument_context('cosmosdb service create') as c:
c.argument('instance_size', options_list=['--size'], help="Instance Size. Possible values are: Cosmos.D4s, Cosmos.D8s, Cosmos.D16s etc")
| def load_arguments(self, _):
from azure.cli.core.commands.parameters import tags_type, get_enum_type, get_three_state_flag
# Managed Cassandra Cluster
for scope in [
'managed-cassandra cluster create',
'managed-cassandra cluster update',
'managed-cassandra cluster show',
'managed-cassandra cluster delete',
'managed-cassandra cluster backup list',
'managed-cassandra cluster backup show']:
with self.argument_context(scope) as c:
c.argument('cluster_name', options_list=['--cluster-name', '-c'], help="Cluster Name", required=True)
# Managed Cassandra Cluster
for scope in [
'managed-cassandra cluster create',
'managed-cassandra cluster update']:
with self.argument_context(scope) as c:
c.argument('tags', arg_type=tags_type)
c.argument('external_gossip_certificates', nargs='+', validator=validate_gossip_certificates, options_list=['--external-gossip-certificates', '-e'], help="A list of certificates that the managed cassandra data center's should accept.")
c.argument('cassandra_version', help="The version of Cassandra chosen.")
c.argument('authentication_method', arg_type=get_enum_type(['None', 'Cassandra', 'Ldap']), help="Authentication mode can be None, Cassandra or Ldap. If None, no authentication will be required to connect to the Cassandra API. If Cassandra, then passwords will be used. Ldap is in preview")
c.argument('hours_between_backups', help="The number of hours between backup attempts.")
c.argument('repair_enabled', help="Enables automatic repair.")
c.argument('client_certificates', nargs='+', validator=validate_client_certificates, help="If specified, enables client certificate authentication to the Cassandra API.")
c.argument('gossip_certificates', help="A list of certificates that should be accepted by on-premise data centers.")
c.argument('external_seed_nodes', nargs='+', validator=validate_seednodes, help="A list of ip addresses of the seed nodes of on-premise data centers.")
c.argument('identity_type', options_list=['--identity-type'], arg_type=get_enum_type(['None', 'SystemAssigned']), help="Type of identity used for Customer Managed Disk Key.")
# Managed Cassandra Cluster
with self.argument_context('managed-cassandra cluster create') as c:
c.argument('location', options_list=['--location', '-l'], help="Azure Location of the Cluster", required=True)
c.argument('delegated_management_subnet_id', options_list=['--delegated-management-subnet-id', '-s'], help="The resource id of a subnet where the ip address of the cassandra management server will be allocated. This subnet must have connectivity to the delegated_subnet_id subnet of each data center.", required=True)
c.argument('initial_cassandra_admin_password', options_list=['--initial-cassandra-admin-password', '-i'], help="The intial password to be configured when a cluster is created for authentication_method Cassandra.")
c.argument('restore_from_backup_id', help="The resource id of a backup. If provided on create, the backup will be used to prepopulate the cluster. The cluster data center count and node counts must match the backup.")
c.argument('cluster_name_override', help="If a cluster must have a name that is not a valid azure resource name, this field can be specified to choose the Cassandra cluster name. Otherwise, the resource name will be used as the cluster name.")
# Managed Cassandra Cluster
for scope in ['managed-cassandra cluster backup show']:
with self.argument_context(scope) as c:
c.argument('backup_id', options_list=['--backup-id'], help="The resource id of the backup", required=True)
# Managed Cassandra Datacenter
for scope in [
'managed-cassandra datacenter create',
'managed-cassandra datacenter update',
'managed-cassandra datacenter show',
'managed-cassandra datacenter delete']:
with self.argument_context(scope) as c:
c.argument('cluster_name', options_list=['--cluster-name', '-c'], help="Cluster Name", required=True)
c.argument('data_center_name', options_list=['--data-center-name', '-d'], help="Datacenter Name", required=True)
# Managed Cassandra Datacenter
for scope in [
'managed-cassandra datacenter create',
'managed-cassandra datacenter update']:
with self.argument_context(scope) as c:
c.argument('node_count', options_list=['--node-count', '-n'], validator=validate_node_count, help="The number of Cassandra virtual machines in this data center. The minimum value is 3.")
c.argument('base64_encoded_cassandra_yaml_fragment', options_list=['--base64-encoded-cassandra-yaml-fragment', '-b'], help="This is a Base64 encoded yaml file that is a subset of cassandra.yaml. Supported fields will be honored and others will be ignored.")
c.argument('data_center_location', options_list=['--data-center-location', '-l'], help="The region where the virtual machine for this data center will be located.")
c.argument('delegated_subnet_id', options_list=['--delegated-subnet-id', '-s'], help="The resource id of a subnet where ip addresses of the Cassandra virtual machines will be allocated. This must be in the same region as data_center_location.")
c.argument('managed_disk_customer_key_uri', options_list=['--managed-disk-customer-key-uri', '-k'], help="Key uri to use for encryption of managed disks. Ensure the system assigned identity of the cluster has been assigned appropriate permissions(key get/wrap/unwrap permissions) on the key.")
c.argument('backup_storage_customer_key_uri', options_list=['--backup-storage-customer-key-uri', '-p'], help="Indicates the Key Uri of the customer key to use for encryption of the backup storage account.")
c.argument('server_hostname', options_list=['--ldap-server-hostname'], help="Hostname of the LDAP server.")
c.argument('server_port', options_list=['--ldap-server-port'], help="Port of the LDAP server. Defaults to 636")
c.argument('service_user_distinguished_name', options_list=['--ldap-service-user-dn'], help="Distinguished name of the look up user account, who can look up user details on authentication.")
c.argument('service_user_password', options_list=['--ldap-svc-user-pwd'], help="Password of the look up user.")
c.argument('search_base_distinguished_name', options_list=['--ldap-search-base-dn'], help="Distinguished name of the object to start the recursive search of users from.")
c.argument('search_filter_template', options_list=['--ldap-search-filter'], help="Template to use for searching. Defaults to (cn=%s) where %s will be replaced by the username used to login. While using this parameter from Windows Powershell (not Windows CommandPrompt or Linux) there is a known issue with escaping special characters, so pass as \"\"\"(cn=%s)\"\"\" instead.")
c.argument('server_certificates', nargs='+', validator=validate_server_certificates, options_list=['--ldap-server-certs'], help="LDAP server certificate. It should have subject alternative name(SAN) DNS Name entry matching the hostname of the LDAP server.")
# Managed Cassandra Datacenter
with self.argument_context('managed-cassandra datacenter create') as c:
c.argument('data_center_location', options_list=['--data-center-location', '-l'], help="Azure Location of the Datacenter", required=True)
c.argument('delegated_subnet_id', options_list=['--delegated-subnet-id', '-s'], help="The resource id of a subnet where ip addresses of the Cassandra virtual machines will be allocated. This must be in the same region as data_center_location.", required=True)
c.argument('node_count', options_list=['--node-count', '-n'], validator=validate_node_count, help="The number of Cassandra virtual machines in this data center. The minimum value is 3.", required=True)
c.argument('sku', options_list=['--sku'], help="Virtual Machine SKU used for data centers. Default value is Standard_DS14_v2")
c.argument('disk_sku', options_list=['--disk-sku'], help="Disk SKU used for data centers. Default value is P30.")
c.argument('disk_capacity', options_list=['--disk-capacity'], help="Number of disk used for data centers. Default value is 4.")
c.argument('availability_zone', options_list=['--availability-zone', '-z'], arg_type=get_three_state_flag(), help="If the data center haves Availability Zone feature, apply it to the Virtual Machine ScaleSet that host the data center virtual machines.")
# Managed Cassandra Datacenter
with self.argument_context('managed-cassandra datacenter list') as c:
c.argument('cluster_name', options_list=['--cluster-name', '-c'], help="Cluster Name", required=True)
# Services
with self.argument_context('cosmosdb service') as c:
c.argument('account_name', completer=None, options_list=['--account-name', '-a'], help='Name of the Cosmos DB database account.', id_part=None)
c.argument('resource_group_name', completer=None, options_list=['--resource-group-name', '-g'], help='Name of the resource group of the database account.', id_part=None)
c.argument('service_kind', options_list=['--kind', '-k'], help="Service kind")
c.argument('service_name', options_list=['--name', '-n'], help="Service Name.")
c.argument('instance_count', options_list=['--count', '-c'], help="Instance Count.")
c.argument('instance_size', options_list=['--size'], help="Instance Size. Possible values are: Cosmos.D4s, Cosmos.D8s, Cosmos.D16s etc")
with self.argument_context('cosmosdb service create') as c:
c.argument('instance_size', options_list=['--size'], help="Instance Size. Possible values are: Cosmos.D4s, Cosmos.D8s, Cosmos.D16s etc")
|
57,818 | def main():
try:
args = demisto.args()
last_seen_gte = args.get('from')
last_seen_lte = args.get('to')
limit = args.get('limit', '100')
get_endpoints_args = {'limit': limit}
if last_seen_gte:
get_endpoints_args['last_seen_gte'] = last_seen_gte
if last_seen_lte and last_seen_lte != '0001-01-01T00:00:00Z':
get_endpoints_args['last_seen_lte'] = last_seen_lte
res = demisto.executeCommand('xdr-get-endpoints', get_endpoints_args)
if isError(res[0]):
return_error(f'Error occurred while trying to get XDR endpoints: {res[0].get("Contents")}')
endpoints = res[0]['Contents']
connected_endpoints = 0
for endpoint in endpoints:
if endpoint.get('endpoint_status') == 'CONNECTED':
connected_endpoints = connected_endpoints + 1
return_outputs(str(connected_endpoints))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute XDRConnectedEndpoints. Error: {str(ex)}')
| def main():
try:
args = demisto.args()
last_seen_gte = args.get('from')
last_seen_lte = args.get('to')
limit = args.get('limit', '100')
get_endpoints_args = {'limit': limit}
if last_seen_gte:
get_endpoints_args['last_seen_gte'] = last_seen_gte
if last_seen_lte and last_seen_lte != '0001-01-01T00:00:00Z':
get_endpoints_args['last_seen_lte'] = last_seen_lte
res = demisto.executeCommand('xdr-get-endpoints', get_endpoints_args)
if isError(res[0]):
return_error(f'Error occurred while trying to get XDR endpoints: {res[0].get("Contents")}')
endpoints = res[0]['Contents']
connected_endpoints = [endpoint for endpoint in endpoints if endpoint.get('endpoint_status') == 'CONNECTED']
connected_endpoints_num = len(connected_endpoints)
return_outputs(str(connected_endpoints))
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute XDRConnectedEndpoints. Error: {str(ex)}')
|
6,999 | def getdate(
string_date: Optional["DateTimeLikeObject"] = None, day_first: bool = False
) -> datetime.date | None:
"""
Converts string date (yyyy-mm-dd) to datetime.date object.
If no input is provided, current date is returned.
"""
from dateutil import parser
from dateutil.parser._parser import ParserError
if not string_date:
return get_datetime().date()
if isinstance(string_date, datetime.datetime):
return string_date.date()
elif isinstance(string_date, datetime.date):
return string_date
if is_invalid_date_string(string_date):
return None
try:
return parser.parse(string_date, dayfirst=day_first).date()
except ParserError:
frappe.throw(
frappe._("{} is not a valid date string.").format(frappe.bold(string_date)),
title=frappe._("Invalid Date"),
)
| def getdate(
string_date: Optional["DateTimeLikeObject"] = None, parse_day_first: bool = False
) -> datetime.date | None:
"""
Converts string date (yyyy-mm-dd) to datetime.date object.
If no input is provided, current date is returned.
"""
from dateutil import parser
from dateutil.parser._parser import ParserError
if not string_date:
return get_datetime().date()
if isinstance(string_date, datetime.datetime):
return string_date.date()
elif isinstance(string_date, datetime.date):
return string_date
if is_invalid_date_string(string_date):
return None
try:
return parser.parse(string_date, dayfirst=day_first).date()
except ParserError:
frappe.throw(
frappe._("{} is not a valid date string.").format(frappe.bold(string_date)),
title=frappe._("Invalid Date"),
)
|
8,773 | def is_triggerable(obj):
"""Check if ``obj`` can handle the bot's triggers.
:param obj: any :term:`function` to check.
:return: ``True`` if ``obj`` can handle the bot's triggers.
A triggerable is a callable that will be used by the bot to handle a
particular trigger (i.e. an IRC message): it can be a regex rule, an event,
an intent, a command or nickname command. However, it must not be a job
or an URL callback.
.. seealso::
The :mod:`sopel.module` defines decorators to make a function a
triggerable object.
"""
forbidden = (
'interval',
'url_regex',
)
must_not = not any(hasattr(obj, attr) for attr in forbidden)
allowed = (
'rule',
'event',
'intents',
'commands',
'nickname_commands',
)
return must_not and any(hasattr(obj, attr) for attr in allowed)
| def is_triggerable(obj):
"""Check if ``obj`` can handle the bot's triggers.
:param obj: any :term:`function` to check.
:return: ``True`` if ``obj`` can handle the bot's triggers.
A triggerable is a callable that will be used by the bot to handle a
particular trigger (i.e. an IRC message): it can be a regex rule, an event,
an intent, a command or nickname command. However, it must not be a job
or a URL callback.
.. seealso::
The :mod:`sopel.module` defines decorators to make a function a
triggerable object.
"""
forbidden = (
'interval',
'url_regex',
)
must_not = not any(hasattr(obj, attr) for attr in forbidden)
allowed = (
'rule',
'event',
'intents',
'commands',
'nickname_commands',
)
return must_not and any(hasattr(obj, attr) for attr in allowed)
|
9,093 | def fill_from_encoding(enc:str)->list:
lst=()
for x in range(256):
try:
lst+=(bytes((x,)).decode(enc),)
except:
lst+=(chr(x),)
return lst
_win_encoding = fill_from_encoding("cp1252")
| def fill_from_encoding(enc:str)->list:
lst=()
for x in range(256):
try:
lst+=(bytes((x,)).decode(enc),)
except Exception:
lst+=(chr(x),)
return lst
_win_encoding = fill_from_encoding("cp1252")
|
20,280 | def run(options: 'argparse.Namespace') -> int:
bdir = options.builddir # type: Path
if not bdir.exists():
raise MesonException('Path to builddir {} does not exist!'.format(str(bdir.resolve())))
if not bdir.is_dir():
raise MesonException('builddir path should be a directory.')
cmd = [] # type: T.List[str]
runner = None # type T.Optional[str]
slns = list(bdir.glob('*.sln'))
if (bdir / 'build.ninja').exists():
runner = os.environ.get('NINJA')
if not runner:
if shutil.which('ninja'):
runner = 'ninja'
elif shutil.which('samu'):
runner = 'samu'
if runner is None:
raise MesonException('Cannot find either ninja or samu.')
cmd = [runner, '-C', bdir.as_posix()]
if options.targets and options.clean:
raise MesonException('`target` and `clean` can\'t be used simultaneously')
if options.targets:
with (bdir/'meson-info'/'intro-targets.json').open() as f:
schema = json.load(f)
for target in options.targets:
cmd.extend([generate_ninja_target_name(target, bdir, schema)])
if options.clean:
cmd.append('clean')
# If the value is set to < 1 then don't set anything, which let's
# ninja/samu decide what to do.
if options.jobs > 0:
cmd.extend(['-j', str(options.jobs)])
if options.load_average > 0:
cmd.extend(['-l', str(options.load_average)])
# TODO: with python 3.8 this could be `elif slns := bdir.glob('*.sln'):`
elif slns:
assert len(slns) == 1, 'More than one solution in a project?'
sln = slns[0]
cmd = ['msbuild', str(sln.resolve())]
# In msbuild `-m` with no number means "detect cpus", the default is `-m1`
if options.jobs > 0:
cmd.append('-m{}'.format(options.jobs))
else:
cmd.append('-m')
if options.targets and options.clean:
raise MesonException('`target` and `clean` can\'t be used simultaneously')
if options.targets:
with Path(bdir/'meson-info'/'intro-targets.json').open() as f:
schema = json.load(f)
for target in options.targets:
cmd.extend(['/t:{}'.format(generate_msvs_target_name(target, bdir, schema))])
if options.clean:
cmd.extend(['/t:Clean'])
if options.load_average:
mlog.warning('Msbuild does not have a load-average switch, ignoring.')
# TODO: xcode?
else:
raise MesonException(
'Could not find any runner or backend for directory {}'.format(bdir.resolve().as_posix()))
mlog.log('Found runner:', runner)
p, *_ = mesonlib.Popen_safe(cmd, stdout=sys.stdout.buffer, stderr=sys.stderr.buffer)
return p.returncode
| def run(options: 'argparse.Namespace') -> int:
bdir = options.builddir # type: Path
if not bdir.exists():
raise MesonException('Path to builddir {} does not exist!'.format(str(bdir.resolve())))
if not bdir.is_dir():
raise MesonException('builddir path should be a directory.')
cmd = [] # type: T.List[str]
runner = None # type T.Optional[str]
slns = list(bdir.glob('*.sln'))
if (bdir / 'build.ninja').exists():
runner = os.environ.get('NINJA')
if not runner:
if shutil.which('ninja'):
runner = 'ninja'
elif shutil.which('samu'):
runner = 'samu'
if runner is None:
raise MesonException('Cannot find either ninja or samu.')
cmd = [runner, '-C', bdir.as_posix()]
if options.targets and options.clean:
raise MesonException('`target` and `clean` can\'t be used simultaneously')
if options.targets:
with (bdir/'meson-info'/'intro-targets.json').open() as f:
schema = json.load(f)
cmd.extend([generate_ninja_target_name(t, bdir, schema) for t in options.targets])
if options.clean:
cmd.append('clean')
# If the value is set to < 1 then don't set anything, which let's
# ninja/samu decide what to do.
if options.jobs > 0:
cmd.extend(['-j', str(options.jobs)])
if options.load_average > 0:
cmd.extend(['-l', str(options.load_average)])
# TODO: with python 3.8 this could be `elif slns := bdir.glob('*.sln'):`
elif slns:
assert len(slns) == 1, 'More than one solution in a project?'
sln = slns[0]
cmd = ['msbuild', str(sln.resolve())]
# In msbuild `-m` with no number means "detect cpus", the default is `-m1`
if options.jobs > 0:
cmd.append('-m{}'.format(options.jobs))
else:
cmd.append('-m')
if options.targets and options.clean:
raise MesonException('`target` and `clean` can\'t be used simultaneously')
if options.targets:
with Path(bdir/'meson-info'/'intro-targets.json').open() as f:
schema = json.load(f)
for target in options.targets:
cmd.extend(['/t:{}'.format(generate_msvs_target_name(target, bdir, schema))])
if options.clean:
cmd.extend(['/t:Clean'])
if options.load_average:
mlog.warning('Msbuild does not have a load-average switch, ignoring.')
# TODO: xcode?
else:
raise MesonException(
'Could not find any runner or backend for directory {}'.format(bdir.resolve().as_posix()))
mlog.log('Found runner:', runner)
p, *_ = mesonlib.Popen_safe(cmd, stdout=sys.stdout.buffer, stderr=sys.stderr.buffer)
return p.returncode
|
47,987 | def main():
all_passed = True
index_file_paths = (
OMZ_ROOT / 'models/intel/index.md',
OMZ_ROOT / 'models/public/index.md',
OMZ_ROOT / 'demos/README.md',
)
all_md_files = tuple(find_md_files())
def complain(message):
nonlocal all_passed
all_passed = False
print(message, file=sys.stderr)
index_child_md_links = {}
for index_file_path in index_file_paths:
if not index_file_path.exists():
complain(f'{index_file_path}: file not found')
continue
required_md_links = []
for md_file in all_md_files:
if md_file.name == "README.md" and md_file.parent != index_file_path.parent:
try:
md_rel_path = md_file.relative_to(index_file_path.parent)
except ValueError:
continue
md_intermediate_parents = list(md_rel_path.parents)[1:-1] # removed root and first parent dirs
if not any((index_file_path.parent / parent_dir / 'README.md').exists()
for parent_dir in md_intermediate_parents):
required_md_links.append(md_file)
index_child_md_links[index_file_path] = sorted(required_md_links)
for md_path in sorted(all_md_files):
referenced_md_files = set()
md_path_rel = md_path.relative_to(OMZ_ROOT)
doc_page = omzdocs.DocumentationPage(md_path.read_text(encoding='UTF-8'))
# check local link validity
for url in sorted([ref.url for ref in doc_page.external_references()]):
try:
components = urllib.parse.urlparse(url)
except ValueError:
complain(f'{md_path_rel}: invalid URL reference {url!r}')
continue
if components.scheme: # non-local URLs
continue
if components.netloc or components.path.startswith('/'):
complain(f'{md_path_rel}: non-relative local URL reference "{url}"')
continue
if not components.path: # self-link
continue
target_path = (md_path.parent / urllib.request.url2pathname(components.path)).resolve()
if OMZ_ROOT not in target_path.parents:
complain(f'{md_path_rel}: URL reference "{url}" points outside the OMZ directory')
continue
if not target_path.is_file():
complain(f'{md_path_rel}: URL reference "{url}" target'
' does not exist or is not a file')
continue
if md_path in index_child_md_links:
referenced_md_files.add(target_path)
# check <omz_dir> link validity
for link in sorted([link for link in doc_page.omz_references() if link.startswith('<omz_dir>')]):
file_path = Path(link.replace('<omz_dir>', str(OMZ_ROOT)))
try:
file_relative_path = file_path.relative_to(OMZ_ROOT)
except ValueError:
complain(f'{md_path_rel}: invalid OMZ reference {file_path!r}')
continue
if str(file_relative_path) == md_path_rel: # self-link
continue
if not (file_path.is_file() or file_path.is_dir()):
complain(f'{md_path_rel}: OMZ reference "{file_relative_path}" target'
' does not exist')
# check for existence of links to README.md files of models and demos
if md_path in index_child_md_links:
for md_file in index_child_md_links[md_path]:
if md_file not in referenced_md_files:
complain(f"{md_path_rel}: {md_file.relative_to(OMZ_ROOT)} is not referenced")
# check for HTML fragments that are unsupported by Doxygen
for html_fragment in doc_page.html_fragments():
match = HTML_FRAGMENT_RE.match(html_fragment)
if not match:
complain(f'{md_path_rel}: cannot parse HTML fragment {html_fragment!r}')
continue
if match.group(1).lower() not in ALLOWED_HTML_ELEMENTS:
complain(f'{md_path_rel}: unknown/disallowed HTML element in {html_fragment!r}')
continue
sys.exit(0 if all_passed else 1)
| def main():
all_passed = True
index_file_paths = (
OMZ_ROOT / 'models/intel/index.md',
OMZ_ROOT / 'models/public/index.md',
OMZ_ROOT / 'demos/README.md',
)
all_md_files = tuple(find_md_files())
def complain(message):
nonlocal all_passed
all_passed = False
print(message, file=sys.stderr)
index_child_md_links = {}
for index_file_path in index_file_paths:
if not index_file_path.exists():
complain(f'{index_file_path}: file not found')
continue
required_md_links = []
for md_file in all_md_files:
if md_file.name == "README.md" and md_file.parent != index_file_path.parent:
try:
md_rel_path = md_file.relative_to(index_file_path.parent)
except ValueError:
continue
md_intermediate_parents = list(md_rel_path.parents)[1:-1] # removed root and first parent dirs
if not any((index_file_path.parent / parent_dir / 'README.md').exists()
for parent_dir in md_intermediate_parents):
required_md_links.append(md_file)
index_child_md_links[index_file_path] = sorted(required_md_links)
for md_path in sorted(all_md_files):
referenced_md_files = set()
md_path_rel = md_path.relative_to(OMZ_ROOT)
doc_page = omzdocs.DocumentationPage(md_path.read_text(encoding='UTF-8'))
# check local link validity
for url in sorted([ref.url for ref in doc_page.external_references()]):
try:
components = urllib.parse.urlparse(url)
except ValueError:
complain(f'{md_path_rel}: invalid URL reference {url!r}')
continue
if components.scheme: # non-local URLs
continue
if components.netloc or components.path.startswith('/'):
complain(f'{md_path_rel}: non-relative local URL reference "{url}"')
continue
if not components.path: # self-link
continue
target_path = (md_path.parent / urllib.request.url2pathname(components.path)).resolve()
if OMZ_ROOT not in target_path.parents:
complain(f'{md_path_rel}: URL reference "{url}" points outside the OMZ directory')
continue
if not target_path.is_file():
complain(f'{md_path_rel}: URL reference "{url}" target'
' does not exist or is not a file')
continue
if md_path in index_child_md_links:
referenced_md_files.add(target_path)
# check <omz_dir> reference validity
for link in sorted([link for link in doc_page.omz_references() if link.startswith('<omz_dir>')]):
file_path = Path(link.replace('<omz_dir>', str(OMZ_ROOT)))
try:
file_relative_path = file_path.relative_to(OMZ_ROOT)
except ValueError:
complain(f'{md_path_rel}: invalid OMZ reference {file_path!r}')
continue
if str(file_relative_path) == md_path_rel: # self-link
continue
if not (file_path.is_file() or file_path.is_dir()):
complain(f'{md_path_rel}: OMZ reference "{file_relative_path}" target'
' does not exist')
# check for existence of links to README.md files of models and demos
if md_path in index_child_md_links:
for md_file in index_child_md_links[md_path]:
if md_file not in referenced_md_files:
complain(f"{md_path_rel}: {md_file.relative_to(OMZ_ROOT)} is not referenced")
# check for HTML fragments that are unsupported by Doxygen
for html_fragment in doc_page.html_fragments():
match = HTML_FRAGMENT_RE.match(html_fragment)
if not match:
complain(f'{md_path_rel}: cannot parse HTML fragment {html_fragment!r}')
continue
if match.group(1).lower() not in ALLOWED_HTML_ELEMENTS:
complain(f'{md_path_rel}: unknown/disallowed HTML element in {html_fragment!r}')
continue
sys.exit(0 if all_passed else 1)
|
37,577 | def prepare_state(self, state, qubits=None):
r"""Prpare qubits in a specific state.
Args:
params (str or list or int):
* str: labels of basis states of the Pauli eigenstates Z, X, Y. See
:meth:`~qiskit.quantum_info.states.statevector.Statevector.from_label`.
Notice the order of the labels is reversed with respect to the qubit index to
be applied to. Example label '01' initializes the qubit zero to `|1>` and the
qubit one to `|0>`.
* list: vector of complex amplitudes to initialize to.
* int: an integer that is used as a bitmap indicating which qubits to initialize
to `|1>`. Example: setting params to 5 would initialize qubit 0 and qubit 2
to `|1>` and qubit 1 to `|0>`.
qubits (QuantumRegister or int):
* QuantumRegister: A list of qubits to be initialized [Default: None].
* int: Index of qubit to be initialized [Default: None].
Returns:
qiskit.circuit.Instruction: a handle to the instruction that was just initialized
Examples:
Prepare a qubit in the state :math:`(|0\rangle - |1\rangle) / \sqrt{2}`.
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(1)
circuit.prepare_state([1/np.sqrt(2), -1/np.sqrt(2)], 0)
circuit.draw()
output:
.. parsed-literal::
┌─────────────────────────────────────┐
q_0: ┤ State_preparation(0.70711,-0.70711) ├
└─────────────────────────────────────┘
Prepare from a string two qubits in the state `|10>`.
The order of the labels is reversed with respect to qubit index.
More information about labels for basis states are in
:meth:`~qiskit.quantum_info.states.statevector.Statevector.from_label`.
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
circuit.prepare_state('01', circuit.qubits)
circuit.draw()
output:
.. parsed-literal::
┌─────────────────────────┐
q_0: ┤0 ├
│ State_preparation(0,1) │
q_1: ┤1 ├
└─────────────────────────┘
Initialize two qubits from an array of complex amplitudes
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
circuit.prepare_state([0, 1/np.sqrt(2), -1.j/np.sqrt(2), 0], circuit.qubits)
circuit.draw()
output:
.. parsed-literal::
┌───────────────────────────────────────────┐
q_0: ┤0 ├
│ State_preparation(0,0.70711,-0.70711j,0) │
q_1: ┤1 ├
└───────────────────────────────────────────┘
"""
if qubits is None:
qubits = self.qubits
else:
if isinstance(qubits, int):
qubits = [qubits]
qubits = self._bit_argument_conversion(qubits, self.qubits)
num_qubits = None if not isinstance(state, int) else len(qubits)
return self.append(StatePreparation(state, num_qubits), qubits)
| def prepare_state(self, state, qubits=None):
r"""Prepare qubits in a specific state.
Args:
params (str or list or int):
* str: labels of basis states of the Pauli eigenstates Z, X, Y. See
:meth:`~qiskit.quantum_info.states.statevector.Statevector.from_label`.
Notice the order of the labels is reversed with respect to the qubit index to
be applied to. Example label '01' initializes the qubit zero to `|1>` and the
qubit one to `|0>`.
* list: vector of complex amplitudes to initialize to.
* int: an integer that is used as a bitmap indicating which qubits to initialize
to `|1>`. Example: setting params to 5 would initialize qubit 0 and qubit 2
to `|1>` and qubit 1 to `|0>`.
qubits (QuantumRegister or int):
* QuantumRegister: A list of qubits to be initialized [Default: None].
* int: Index of qubit to be initialized [Default: None].
Returns:
qiskit.circuit.Instruction: a handle to the instruction that was just initialized
Examples:
Prepare a qubit in the state :math:`(|0\rangle - |1\rangle) / \sqrt{2}`.
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(1)
circuit.prepare_state([1/np.sqrt(2), -1/np.sqrt(2)], 0)
circuit.draw()
output:
.. parsed-literal::
┌─────────────────────────────────────┐
q_0: ┤ State_preparation(0.70711,-0.70711) ├
└─────────────────────────────────────┘
Prepare from a string two qubits in the state `|10>`.
The order of the labels is reversed with respect to qubit index.
More information about labels for basis states are in
:meth:`~qiskit.quantum_info.states.statevector.Statevector.from_label`.
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
circuit.prepare_state('01', circuit.qubits)
circuit.draw()
output:
.. parsed-literal::
┌─────────────────────────┐
q_0: ┤0 ├
│ State_preparation(0,1) │
q_1: ┤1 ├
└─────────────────────────┘
Initialize two qubits from an array of complex amplitudes
.. jupyter-execute::
import numpy as np
from qiskit import QuantumCircuit
circuit = QuantumCircuit(2)
circuit.prepare_state([0, 1/np.sqrt(2), -1.j/np.sqrt(2), 0], circuit.qubits)
circuit.draw()
output:
.. parsed-literal::
┌───────────────────────────────────────────┐
q_0: ┤0 ├
│ State_preparation(0,0.70711,-0.70711j,0) │
q_1: ┤1 ├
└───────────────────────────────────────────┘
"""
if qubits is None:
qubits = self.qubits
else:
if isinstance(qubits, int):
qubits = [qubits]
qubits = self._bit_argument_conversion(qubits, self.qubits)
num_qubits = None if not isinstance(state, int) else len(qubits)
return self.append(StatePreparation(state, num_qubits), qubits)
|
32,309 | def main(command: str, demisto_params: dict):
demisto.debug(f'Command being called is {demisto.command()}')
try:
options = DefenderIntegrationOptions.parse_obj(demisto_params)
request = DefenderHTTPRequest.parse_obj(demisto_params)
authenticator = DefenderAuthenticator.parse_obj(demisto_params)
clinet = DefenderClient(request=request, options=options, authenticator=authenticator)
get_events = DefenderGetEvents(client=clinet, options=options)
if command == 'test-module':
return_results(test_module(get_events=get_events))
elif command in ('fetch-events', 'microsoft-365-defender-get-events'):
events = get_events.run()
if command == 'microsoft-365-defender-get-events':
demisto.debug(f'{command=}, publishing events to the context')
human_readable = tableToMarkdown(name="Alerts:", t=events)
return_results(CommandResults('Microsoft365Defender.alerts', 'id', events, readable_output=human_readable))
if events:
# publishing events to XSIAM
demisto.debug(f'{command=}, publishing events to XSIAM')
demisto.setLastRun(get_events.get_last_run(events))
demisto.debug(f'Last run set to {demisto.getLastRun()}')
send_events_to_xsiam(events, vendor='Microsoft', product='Defender 365')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
| def main(command: str, demisto_params: dict):
demisto.debug(f'Command being called is {command}')
try:
options = DefenderIntegrationOptions.parse_obj(demisto_params)
request = DefenderHTTPRequest.parse_obj(demisto_params)
authenticator = DefenderAuthenticator.parse_obj(demisto_params)
clinet = DefenderClient(request=request, options=options, authenticator=authenticator)
get_events = DefenderGetEvents(client=clinet, options=options)
if command == 'test-module':
return_results(test_module(get_events=get_events))
elif command in ('fetch-events', 'microsoft-365-defender-get-events'):
events = get_events.run()
if command == 'microsoft-365-defender-get-events':
demisto.debug(f'{command=}, publishing events to the context')
human_readable = tableToMarkdown(name="Alerts:", t=events)
return_results(CommandResults('Microsoft365Defender.alerts', 'id', events, readable_output=human_readable))
if events:
# publishing events to XSIAM
demisto.debug(f'{command=}, publishing events to XSIAM')
demisto.setLastRun(get_events.get_last_run(events))
demisto.debug(f'Last run set to {demisto.getLastRun()}')
send_events_to_xsiam(events, vendor='Microsoft', product='Defender 365')
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
30,162 | def fetch_exchange(
zone_key1, zone_key2, session=None, target_datetime=None, logger=None
) -> dict:
"""Requests the last known power exchange (in MW) between two countries."""
sorted_keys = "->".join(sorted([zone_key1, zone_key2]))
r = session or requests.session()
url = SOURCE
response = r.get(url)
response.encoding = "utf-8"
html_doc = response.text
start_string = "<script type='text/javascript'>"
start_index = html_doc.find(start_string) + len(start_string)
stop_index = html_doc.find("left:")
soup = BeautifulSoup(html_doc[start_index:stop_index], "html.parser")
data_string = soup.find(text=re.compile("var"))
data_split = data_string.split("\r\n")
GE_1 = re.findall(REGEX, data_split[1])
GE_2 = re.findall(REGEX, data_split[2])
GE_3 = re.findall(REGEX, data_split[3])
NKR_1 = re.findall(REGEX, data_split[4])
NKR_2 = re.findall(REGEX, data_split[5])
IR_1 = re.findall(REGEX, data_split[6])
AM_NKR = float(NKR_1[0]) + float(NKR_2[0])
AM_GE = float(GE_1[0]) + float(GE_2[0]) + float(GE_3[0])
AM_IR = float(IR_1[0])
time_data = [s for s in data_split if "time2" in s][0]
yerevan = tz.gettz(TZ)
date_time = dparser.parse(
time_data.split()[3], default=datetime.now(yerevan), fuzzy=True
)
if sorted_keys == "AM->NKR":
netflow = -1 * AM_NKR
elif sorted_keys == "AM->GE":
netflow = -1 * AM_GE
elif sorted_keys == "AM->IR":
netflow = -1 * AM_IR
else:
raise NotImplementedError("This exchange pair is not implemented")
return {
"sortedZoneKeys": sorted_keys,
"datetime": date_time,
"netFlow": netflow,
"source": SOURCE,
}
| def fetch_exchange(
zone_key1, zone_key2, session=None, target_datetime=None, logger=None
) -> dict:
"""Requests the last known power exchange (in MW) between two countries."""
sorted_keys = "->".join(sorted([zone_key1, zone_key2]))
r = session or requests.session()
response = r.get(SOURCE)
response.encoding = "utf-8"
html_doc = response.text
start_string = "<script type='text/javascript'>"
start_index = html_doc.find(start_string) + len(start_string)
stop_index = html_doc.find("left:")
soup = BeautifulSoup(html_doc[start_index:stop_index], "html.parser")
data_string = soup.find(text=re.compile("var"))
data_split = data_string.split("\r\n")
GE_1 = re.findall(REGEX, data_split[1])
GE_2 = re.findall(REGEX, data_split[2])
GE_3 = re.findall(REGEX, data_split[3])
NKR_1 = re.findall(REGEX, data_split[4])
NKR_2 = re.findall(REGEX, data_split[5])
IR_1 = re.findall(REGEX, data_split[6])
AM_NKR = float(NKR_1[0]) + float(NKR_2[0])
AM_GE = float(GE_1[0]) + float(GE_2[0]) + float(GE_3[0])
AM_IR = float(IR_1[0])
time_data = [s for s in data_split if "time2" in s][0]
yerevan = tz.gettz(TZ)
date_time = dparser.parse(
time_data.split()[3], default=datetime.now(yerevan), fuzzy=True
)
if sorted_keys == "AM->NKR":
netflow = -1 * AM_NKR
elif sorted_keys == "AM->GE":
netflow = -1 * AM_GE
elif sorted_keys == "AM->IR":
netflow = -1 * AM_IR
else:
raise NotImplementedError("This exchange pair is not implemented")
return {
"sortedZoneKeys": sorted_keys,
"datetime": date_time,
"netFlow": netflow,
"source": SOURCE,
}
|
25,971 | def list_skus(cmd, location=None, size=None, zone=None, show_all=None, resource_type=None):
from ._vm_utils import list_sku_info
result = list_sku_info(cmd.cli_ctx, location)
# pylint: disable=too-many-nested-blocks
if not show_all:
available_skus = []
for sku_info in result:
is_available = True
if sku_info.restrictions:
for restriction in sku_info.restrictions:
if restriction.reason_code == 'NotAvailableForSubscription':
# The attribute location_info is not supported in versions 2017-03-30 and earlier
if cmd.supported_api_version(max_api='2017-03-30'):
is_available = False
break
# This SKU is not available only if all zones are restricted
elif not (set(sku_info.location_info[0].zones or []) -
set(restriction.restriction_info.zones or [])):
is_available = False
break
if is_available:
available_skus.append(sku_info)
result = available_skus
if resource_type:
result = [x for x in result if x.resource_type.lower() == resource_type.lower()]
if size:
result = [x for x in result if x.resource_type == 'virtualMachines' and size.lower() in x.name.lower()]
if zone:
result = [x for x in result if x.location_info and x.location_info[0].zones]
return result
| def list_skus(cmd, location=None, size=None, zone=None, show_all=None, resource_type=None):
from ._vm_utils import list_sku_info
result = list_sku_info(cmd.cli_ctx, location)
# pylint: disable=too-many-nested-blocks
if not show_all:
available_skus = []
for sku_info in result:
is_available = True
if sku_info.restrictions:
for restriction in sku_info.restrictions:
if restriction.reason_code == 'NotAvailableForSubscription':
# The attribute location_info is not supported in versions 2017-03-30 and earlier
if cmd.supported_api_version(max_api='2017-03-30'):
is_available = False
break
# This SKU is not available only if all zones are restricted
if not (set(sku_info.location_info[0].zones or []) -
set(restriction.restriction_info.zones or [])):
is_available = False
break
if is_available:
available_skus.append(sku_info)
result = available_skus
if resource_type:
result = [x for x in result if x.resource_type.lower() == resource_type.lower()]
if size:
result = [x for x in result if x.resource_type == 'virtualMachines' and size.lower() in x.name.lower()]
if zone:
result = [x for x in result if x.location_info and x.location_info[0].zones]
return result
|
41,533 | def fetch_hitran(
molecule,
local_databases=None,
databank_name="HITRAN-{molecule}",
isotope=None,
load_wavenum_min=None,
load_wavenum_max=None,
columns=None,
cache=True,
verbose=True,
clean_cache_files=True,
return_local_path=False,
engine="default",
parallel=True,
parse_quanta=True,
):
"""Download all HITRAN lines from HITRAN website. Unzip and build a HDF5 file directly.
Returns a Pandas DataFrame containing all lines.
Parameters
----------
molecule: str
one specific molecule name, listed in HITRAN molecule metadata.
See https://hitran.org/docs/molec-meta/
Example: "H2O", "CO2", etc.
local_databases: str
where to create the RADIS HDF5 files. Default ``"~/.radisdb/hitran"``.
Can be changed in ``radis.config["DEFAULT_DOWNLOAD_PATH"]`` or in ~/radis.json config file
databank_name: str
name of the databank in RADIS :ref:`Configuration file <label_lbl_config_file>`
Default ``"HITRAN-{molecule}"``
isotope: str
load only certain isotopes : ``'2'``, ``'1,2'``, etc. If ``None``, loads
everything. Default ``None``.
load_wavenum_min, load_wavenum_max: float (cm-1)
load only specific wavenumbers.
columns: list of str
list of columns to load. If ``None``, returns all columns in the file.
Other Parameters
----------------
cache: ``True``, ``False``, ``'regen'`` or ``'force'``
if ``True``, use existing HDF5 file. If ``False`` or ``'regen'``, rebuild it.
If ``'force'``, raise an error if cache file cannot be used (useful for debugging).
Default ``True``.
verbose: bool
clean_cache_files: bool
if ``True`` clean downloaded cache files after HDF5 are created.
return_local_path: bool
if ``True``, also returns the path of the local database file.
engine: 'pytables', 'vaex', 'default'
which HDF5 library to use. If 'default' use the value from ~/radis.json
parallel: bool
if ``True``, uses joblib.parallel to load database with multiple processes
parse_quanta: bool
if ``True``, parse local & global quanta (required to identify lines
for non-LTE calculations ; but sometimes lines are not labelled.)
Returns
-------
df: pd.DataFrame
Line list
A HDF5 file is also created in ``local_databases`` and referenced
in the :ref:`RADIS config file <label_lbl_config_file>` with name
``databank_name``
local_path: str
path of local database file if ``return_local_path``
Examples
--------
::
from radis.io.hitran import fetch_hitran
df = fetch_hitran("CO")
print(df.columns)
>>> Index(['id', 'iso', 'wav', 'int', 'A', 'airbrd', 'selbrd', 'El', 'Tdpair',
'Pshft', 'gp', 'gpp', 'branch', 'jl', 'vu', 'vl'],
dtype='object')
.. minigallery:: radis.fetch_hitemp
Notes
-----
if using ``load_only_wavenum_above/below`` or ``isotope``, the whole
database is anyway downloaded and uncompressed to ``local_databases``
fast access .HDF5 files (which will take a long time on first call). Only
the expected wavenumber range & isotopes are returned. The .HFD5 parsing uses
:py:func:`~radis.io.hdf5.hdf2df`
See Also
--------
:py:func:`~radis.io.hitemp.fetch_hitemp`, :py:func:`~radis.io.exomol.fetch_exomol`
:py:func:`~radis.io.hdf5.hdf2df`, :py:meth:`~radis.lbl.loader.DatabankLoader.fetch_databank`
"""
if r"{molecule}" in databank_name:
databank_name = databank_name.format(**{"molecule": molecule})
if local_databases is None:
import radis
local_databases = join(radis.config["DEFAULT_DOWNLOAD_PATH"], "hitran")
local_databases = abspath(local_databases.replace("~", expanduser("~")))
ldb = HITRANDatabaseManager(
databank_name,
molecule=molecule,
local_databases=local_databases,
engine=engine,
verbose=verbose,
parallel=parallel,
)
# Get expected local files for this database:
local_file = ldb.get_filenames()
# Delete files if needed:
if cache == "regen":
ldb.remove_local_files(local_file)
ldb.check_deprecated_files(
ldb.get_existing_files(local_file),
auto_remove=True if cache != "force" else False,
)
# Download files
download_files = ldb.get_missing_files(local_file)
if download_files:
ldb.download_and_parse(download_files, cache=cache, parse_quanta=parse_quanta)
# Register
if not ldb.is_registered():
ldb.register()
if len(download_files) > 0 and clean_cache_files:
ldb.clean_download_files()
# Load and return
df = ldb.load(
local_file,
columns=columns,
isotope=isotope,
load_wavenum_min=load_wavenum_min, # for relevant files, get only the right range
load_wavenum_max=load_wavenum_max,
)
return (df, local_file) if return_local_path else df
| def fetch_hitran(
molecule,
local_databases=None,
databank_name="HITRAN-{molecule}",
isotope=None,
load_wavenum_min=None,
load_wavenum_max=None,
columns=None,
cache=True,
verbose=True,
clean_cache_files=True,
return_local_path=False,
engine="default",
parallel=True,
parse_quanta=True,
):
"""Download all HITRAN lines from HITRAN website. Unzip and build a HDF5 file directly.
Returns a Pandas DataFrame containing all lines.
Parameters
----------
molecule: str
one specific molecule name, listed in HITRAN molecule metadata.
See https://hitran.org/docs/molec-meta/
Example: "H2O", "CO2", etc.
local_databases: str
where to create the RADIS HDF5 files. Default ``"~/.radisdb/hitran"``.
Can be changed in ``radis.config["DEFAULT_DOWNLOAD_PATH"]`` or in ~/radis.json config file
databank_name: str
name of the databank in RADIS :ref:`Configuration file <label_lbl_config_file>`
Default ``"HITRAN-{molecule}"``
isotope: str
load only certain isotopes : ``'2'``, ``'1,2'``, etc. If ``None``, loads
everything. Default ``None``.
load_wavenum_min, load_wavenum_max: float (cm-1)
load only specific wavenumbers.
columns: list of str
list of columns to load. If ``None``, returns all columns in the file.
Other Parameters
----------------
cache: ``True``, ``False``, ``'regen'`` or ``'force'``
if ``True``, use existing HDF5 file. If ``False`` or ``'regen'``, rebuild it.
If ``'force'``, raise an error if cache file cannot be used (useful for debugging).
Default ``True``.
verbose: bool
clean_cache_files: bool
if ``True`` clean downloaded cache files after HDF5 are created.
return_local_path: bool
if ``True``, also returns the path of the local database file.
engine: 'pytables', 'vaex', 'default'
which HDF5 library to use. If 'default' use the value from ~/radis.json
parallel: bool
if ``True``, uses joblib.parallel to load database with multiple processes
parse_quanta: bool
if ``True``, parse local & global quanta (required to identify lines
for non-LTE calculations ; but sometimes lines are not labelled.)
Returns
-------
df: pd.DataFrame
Line list
A HDF5 file is also created in ``local_databases`` and referenced
in the :ref:`RADIS config file <label_lbl_config_file>` with name
``databank_name``
local_path: str
path of local database file if ``return_local_path``
Examples
--------
::
from radis.io.hitran import fetch_hitran
df = fetch_hitran("CO")
print(df.columns)
>>> Index(['id', 'iso', 'wav', 'int', 'A', 'airbrd', 'selbrd', 'El', 'Tdpair',
'Pshft', 'gp', 'gpp', 'branch', 'jl', 'vu', 'vl'],
dtype='object')
.. minigallery:: radis.fetch_hitran
Notes
-----
if using ``load_only_wavenum_above/below`` or ``isotope``, the whole
database is anyway downloaded and uncompressed to ``local_databases``
fast access .HDF5 files (which will take a long time on first call). Only
the expected wavenumber range & isotopes are returned. The .HFD5 parsing uses
:py:func:`~radis.io.hdf5.hdf2df`
See Also
--------
:py:func:`~radis.io.hitemp.fetch_hitemp`, :py:func:`~radis.io.exomol.fetch_exomol`
:py:func:`~radis.io.hdf5.hdf2df`, :py:meth:`~radis.lbl.loader.DatabankLoader.fetch_databank`
"""
if r"{molecule}" in databank_name:
databank_name = databank_name.format(**{"molecule": molecule})
if local_databases is None:
import radis
local_databases = join(radis.config["DEFAULT_DOWNLOAD_PATH"], "hitran")
local_databases = abspath(local_databases.replace("~", expanduser("~")))
ldb = HITRANDatabaseManager(
databank_name,
molecule=molecule,
local_databases=local_databases,
engine=engine,
verbose=verbose,
parallel=parallel,
)
# Get expected local files for this database:
local_file = ldb.get_filenames()
# Delete files if needed:
if cache == "regen":
ldb.remove_local_files(local_file)
ldb.check_deprecated_files(
ldb.get_existing_files(local_file),
auto_remove=True if cache != "force" else False,
)
# Download files
download_files = ldb.get_missing_files(local_file)
if download_files:
ldb.download_and_parse(download_files, cache=cache, parse_quanta=parse_quanta)
# Register
if not ldb.is_registered():
ldb.register()
if len(download_files) > 0 and clean_cache_files:
ldb.clean_download_files()
# Load and return
df = ldb.load(
local_file,
columns=columns,
isotope=isotope,
load_wavenum_min=load_wavenum_min, # for relevant files, get only the right range
load_wavenum_max=load_wavenum_max,
)
return (df, local_file) if return_local_path else df
|
32,437 | def main(installed_images: Union[list, dict], available_images: Union[list, dict],
**kwargs) -> ScriptResult:
"""
Given a table containing installed ("current") PAN-OS Software images, compare with available to determine
which can be upgraded to by looking for newer releases than are currently installed.
:param installed_images: SoftwareVersion table of current images
:param available_images: Complete list of available images for the given platform
:param kwargs: Keyword args !no-auto-argument
"""
versions = []
if type(installed_images) is dict:
installed_images = [installed_images]
if type(available_images) is dict:
available_images = [available_images]
for image_dict in available_images:
image: SoftwareVersion = SoftwareVersion(**image_dict)
for installed_image_dict in installed_images:
installed_image: SoftwareVersion = SoftwareVersion(**installed_image_dict)
if check_versions(image.version, installed_image.version):
if image.version not in [x.version for x in versions]:
versions.append(image)
print([x.version for x in versions])
return ScriptResult(
versions=versions
)
| def main(installed_images: Union[list, dict], available_images: Union[list, dict],
**kwargs) -> ScriptResult:
"""
Given a table containing installed ("current") PAN-OS Software images, compare with available to determine
which can be upgraded to by looking for newer releases than are currently installed.
:param installed_images: SoftwareVersion table of current images
:param available_images: Complete list of available images for the given platform
:param kwargs: Keyword args !no-auto-argument
"""
versions = []
if isinstance(installed_images, dict):
installed_images = [installed_images]
if type(available_images) is dict:
available_images = [available_images]
for image_dict in available_images:
image: SoftwareVersion = SoftwareVersion(**image_dict)
for installed_image_dict in installed_images:
installed_image: SoftwareVersion = SoftwareVersion(**installed_image_dict)
if check_versions(image.version, installed_image.version):
if image.version not in [x.version for x in versions]:
versions.append(image)
print([x.version for x in versions])
return ScriptResult(
versions=versions
)
|
50,649 | def run_ansible_lint(
*argv: str,
cwd: os.PathLike = None,
bin: str = None,
env: Dict[str, str] = None) -> Tuple[str, str]:
"""Run ansible-lint on a given path and returns its output."""
if bin:
command = [bin]
else:
command = [sys.executable, "-m", "ansiblelint"]
if argv:
command.extend(argv)
return subprocess.Popen(
command,
cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False, # needed when command is a list
env=env,
universal_newlines=True
).communicate()
| def run_ansible_lint(
*argv: str,
cwd: os.PathLike = None,
bin: str = None,
env: Dict[str, str] = None) -> Tuple[str, str]:
"""Run ansible-lint on a given path and returns its output."""
if bin:
command = [bin]
else:
command = [sys.executable, "-m", "ansiblelint"]
if argv:
command.extend(argv)
proc_result = subprocess.run(
command,
cwd=cwd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False, # needed when command is a list
env=env,
universal_newlines=True
)
return proc_result.stdout, proc_result.stderr
|
7,201 | def block_reduce(image, block_size, func=np.sum, cval=0, func_kwargs=None):
"""Downsample image by applying function `func` to local blocks.
This function is useful for max and mean pooling, for example.
Parameters
----------
image : ndarray
N-dimensional input image.
block_size : array_like
Array containing down-sampling integer factor along each axis.
func : callable
Function object which is used to calculate the return value for each
local block. This function must implement an ``axis`` parameter.
Primary functions are ``numpy.sum``, ``numpy.min``, ``numpy.max``,
``numpy.mean`` and ``numpy.median``. See also `func_kwargs`.
cval : float
Constant padding value if image is not perfectly divisible by the
block size.
func_kwargs : dict
Keyword arguments passed to `func`. Notably useful for passing dtype
argument to ``np.mean``. Takes dictionary of inputs, i.e.:
``func_kwargs={'dtype': np.float16})``.
Returns
-------
image : ndarray
Down-sampled image with same number of dimensions as input image.
Examples
--------
>>> from skimage.measure import block_reduce
>>> image = np.arange(3*3*4).reshape(3, 3, 4)
>>> image # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]],
[[24, 25, 26, 27],
[28, 29, 30, 31],
[32, 33, 34, 35]]])
>>> block_reduce(image, block_size=(3, 3, 1), func=np.mean)
array([[[16., 17., 18., 19.]]])
>>> image_max1 = block_reduce(image, block_size=(1, 3, 4), func=np.max)
>>> image_max1 # doctest: +NORMALIZE_WHITESPACE
array([[[11]],
[[23]],
[[35]]])
>>> image_max2 = block_reduce(image, block_size=(3, 1, 4), func=np.max)
>>> image_max2 # doctest: +NORMALIZE_WHITESPACE
array([[[27],
[31],
[35]]])
"""
if len(block_size) != image.ndim:
raise ValueError("`block_size` must have the same length "
"as `image.shape`.")
if func_kwargs is None:
func_kwargs = {}
pad_width = []
for i in range(len(block_size)):
if block_size[i] < 1:
raise ValueError("Down-sampling factors must be >= 1. Use "
"`skimage.transform.resize` to up-sample an "
"image.")
if image.shape[i] % block_size[i] != 0:
after_width = block_size[i] - (image.shape[i] % block_size[i])
else:
after_width = 0
pad_width.append((0, after_width))
image = np.pad(image, pad_width=pad_width, mode='constant',
constant_values=cval)
blocked = view_as_blocks(image, block_size)
return func(blocked, axis=tuple(range(image.ndim, blocked.ndim)),
**func_kwargs)
| def block_reduce(image, block_size, func=np.sum, cval=0, func_kwargs=None):
"""Downsample image by applying function `func` to local blocks.
This function is useful for max and mean pooling, for example.
Parameters
----------
image : ndarray
N-dimensional input image.
block_size : array_like
Array containing down-sampling integer factor along each axis.
func : callable
Function object which is used to calculate the return value for each
local block. This function must implement an ``axis`` parameter.
Primary functions are ``numpy.sum``, ``numpy.min``, ``numpy.max``,
``numpy.mean`` and ``numpy.median``. See also `func_kwargs`.
cval : float
Constant padding value if image is not perfectly divisible by the
block size.
func_kwargs : dict
Keyword arguments passed to `func`. Notably useful for passing dtype
argument to ``np.mean``. Takes dictionary of inputs, e.g.:
``func_kwargs={'dtype': np.float16})``.
Returns
-------
image : ndarray
Down-sampled image with same number of dimensions as input image.
Examples
--------
>>> from skimage.measure import block_reduce
>>> image = np.arange(3*3*4).reshape(3, 3, 4)
>>> image # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]],
[[24, 25, 26, 27],
[28, 29, 30, 31],
[32, 33, 34, 35]]])
>>> block_reduce(image, block_size=(3, 3, 1), func=np.mean)
array([[[16., 17., 18., 19.]]])
>>> image_max1 = block_reduce(image, block_size=(1, 3, 4), func=np.max)
>>> image_max1 # doctest: +NORMALIZE_WHITESPACE
array([[[11]],
[[23]],
[[35]]])
>>> image_max2 = block_reduce(image, block_size=(3, 1, 4), func=np.max)
>>> image_max2 # doctest: +NORMALIZE_WHITESPACE
array([[[27],
[31],
[35]]])
"""
if len(block_size) != image.ndim:
raise ValueError("`block_size` must have the same length "
"as `image.shape`.")
if func_kwargs is None:
func_kwargs = {}
pad_width = []
for i in range(len(block_size)):
if block_size[i] < 1:
raise ValueError("Down-sampling factors must be >= 1. Use "
"`skimage.transform.resize` to up-sample an "
"image.")
if image.shape[i] % block_size[i] != 0:
after_width = block_size[i] - (image.shape[i] % block_size[i])
else:
after_width = 0
pad_width.append((0, after_width))
image = np.pad(image, pad_width=pad_width, mode='constant',
constant_values=cval)
blocked = view_as_blocks(image, block_size)
return func(blocked, axis=tuple(range(image.ndim, blocked.ndim)),
**func_kwargs)
|
32,074 | def test_get_threats():
"""
Given
- a threat and a dbot score
When
- calls the function create_dbot_score_entry
Then
- checks if dbot_score_entry is from type DBotScore
"""
threat = THREAT_OUTPUT
dbot_score = translate_score(threat['cylance_score'], FILE_THRESHOLD)
dbot_score_entry = create_dbot_score_entry(THREAT_OUTPUT, dbot_score)
assert type(dbot_score_entry) == Common.DBotScore
| def test_get_threats():
"""
Given
- a threat and a dbot score
When
- calls the function create_dbot_score_entry
Then
- checks if dbot_score_entry is from type DBotScore
"""
threat = THREAT_OUTPUT
dbot_score = translate_score(threat['cylance_score'], FILE_THRESHOLD)
dbot_score_entry = create_dbot_score_entry(THREAT_OUTPUT, dbot_score)
assert isinstance(dbot_score_entry, Common.DBotScore)
|
31,617 | def analyse_sample_file_request(file_entry, should_wait, internet_access, comments='', systems=''):
data = {
'accept-tac': 1,
'internet-access': 1 if internet_access else 0,
}
if comments != '':
data['comments'] = comments
if systems != '':
data['systems[]'] = [s.strip() for s in systems.split(',')] # type: ignore
# removing backslashes from filename as the API does not like it
# if given filename such as dir\file.xlsx - the sample will end with the name file.xlsx
filename = demisto.getFilePath(file_entry)['name']
if isinstance(filename, unicode): # py2 way of checking if a var is of type unicode
filename = filename.encode('ascii', 'ignore')
proper_filename = filename.replace('\\', '/')
with open(demisto.getFilePath(file_entry)['path'], 'rb') as f:
res = http_post('v2/analysis/submit', data=data, files={'sample': (proper_filename, f)})
if res == 'nothing_to_analyze':
return nothing_to_analyze_output
if 'errors' in res:
LOG('Error! in command sample file: file_entry=%s' % (file_entry,))
LOG('got the following errors:\n' + '\n'.join(e['message'] for e in res['errors']))
raise Exception('command failed to run.')
shutil.rmtree(demisto.getFilePath(file_entry)['name'], ignore_errors=True)
if should_wait:
return poll_webid(res['data']['webids'][0])
web_id = res['data']['webids'][0]
result = info_request(web_id)
return analysis_to_entry('Analysis #%s' % (web_id,), result['data'])
| def analyse_sample_file_request(file_entry, should_wait, internet_access, comments='', systems=''):
data = {
'accept-tac': 1,
'internet-access': 1 if internet_access else 0,
}
if comments != '':
data['comments'] = comments
if systems != '':
data['systems[]'] = [s.strip() for s in systems.split(',')] # type: ignore
# removing backslashes from filename as the API does not like it
# if given filename such as dir\file.xlsx - the sample will end with the name file.xlsx
filename = demisto.getFilePath(file_entry)['name']
if isinstance(filename, unicode): # py2 way of checking if a var is of type unicode
filename = filename.encode('ascii', 'ignore')
fixed_filename = filename.replace('\\', '/')
with open(demisto.getFilePath(file_entry)['path'], 'rb') as f:
res = http_post('v2/analysis/submit', data=data, files={'sample': (proper_filename, f)})
if res == 'nothing_to_analyze':
return nothing_to_analyze_output
if 'errors' in res:
LOG('Error! in command sample file: file_entry=%s' % (file_entry,))
LOG('got the following errors:\n' + '\n'.join(e['message'] for e in res['errors']))
raise Exception('command failed to run.')
shutil.rmtree(demisto.getFilePath(file_entry)['name'], ignore_errors=True)
if should_wait:
return poll_webid(res['data']['webids'][0])
web_id = res['data']['webids'][0]
result = info_request(web_id)
return analysis_to_entry('Analysis #%s' % (web_id,), result['data'])
|
46,209 | def draw(layer, event):
"""Draw with the currently selected label to a coordinate.
This method have different behavior when draw is called
with different labeling layer mode.
In PAINT mode the cursor functions like a paint brush changing any
pixels it brushes over to the current label. If the background label
`0` is selected than any pixels will be changed to background and this
tool functions like an eraser. The size and shape of the cursor can be
adjusted in the properties widget.
In FILL mode the cursor functions like a fill bucket replacing pixels
of the label clicked on with the current label. It can either replace
all pixels of that label or just those that are contiguous with the
clicked on pixel. If the background label `0` is selected than any
pixels will be changed to background and this tool functions like an
eraser
"""
# on press
layer._save_history()
layer._block_saving = True
if layer._mode == Mode.PAINT:
layer.paint(layer.coordinates, layer.selected_label)
elif layer._mode == Mode.FILL:
layer.fill(layer.coordinates, layer._value, layer.selected_label)
last_cursor_coord = layer.coordinates
yield
# on move
while event.type == 'mouse_move':
interp_coord = interpolate_coordinates(
last_cursor_coord, layer.coordinates, layer.brush_size
)
for c in interp_coord:
if layer._mode == Mode.PAINT:
layer.paint(c, layer.selected_label, refresh=False)
elif (
layer._mode == Mode.FILL
and layer._value != layer.selected_label
):
layer.fill(
layer.coordinates, layer._value, layer.selected_label
)
layer.refresh()
last_cursor_coord = layer.coordinates
yield
# on release
layer._block_saving = False
| def draw(layer, event):
"""Draw with the currently selected label to a coordinate.
This method have different behavior when draw is called
with different labeling layer mode.
In PAINT mode the cursor functions like a paint brush changing any
pixels it brushes over to the current label. If the background label
`0` is selected than any pixels will be changed to background and this
tool functions like an eraser. The size and shape of the cursor can be
adjusted in the properties widget.
In FILL mode the cursor functions like a fill bucket replacing pixels
of the label clicked on with the current label. It can either replace
all pixels of that label or just those that are contiguous with the
clicked on pixel. If the background label `0` is selected than any
pixels will be changed to background and this tool functions like an
eraser
"""
# on press
layer._save_history()
layer._block_saving = True
if layer._mode == Mode.PAINT:
layer.paint(layer.coordinates, layer.selected_label)
elif layer._mode == Mode.FILL:
layer.fill(layer.coordinates, layer._value, layer.selected_label)
last_cursor_coord = layer.coordinates
yield
# on move
while event.type == 'mouse_move':
interp_coord = interpolate_coordinates(
last_cursor_coord, layer.coordinates, layer.brush_size
)
for c in interp_coord:
if layer._mode == Mode.PAINT:
layer.paint(c, layer.selected_label, refresh=False)
elif (
layer._mode == Mode.FILL
and layer._value != layer.selected_label
):
layer.fill(
c, layer._value, layer.selected_label
)
layer.refresh()
last_cursor_coord = layer.coordinates
yield
# on release
layer._block_saving = False
|
12,249 | def configure_logger(debug_file: Optional[str] = None) -> StringIO:
logger = logging.getLogger("pgactivity")
logger.setLevel(logging.DEBUG)
# The steamhandler is used to print hints to the user when he exists.
# The INFO log level is reserved for this.
memory_string = StringIO()
c_handler = logging.StreamHandler(memory_string)
c_handler.setLevel(logging.INFO)
c_handler.name = "stream_handler"
c_format = logging.Formatter("Hint - %(message)s")
c_handler.setFormatter(c_format)
logger.addHandler(c_handler)
if debug_file is not None:
f_handler = logging.FileHandler(debug_file)
f_handler.setLevel(logging.DEBUG)
f_format = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
f_handler.setFormatter(f_format)
logger.addHandler(f_handler)
return memory_string
| def configure_logger(debug_file: Optional[str] = None) -> StringIO:
logger = logging.getLogger("pgactivity")
logger.setLevel(logging.DEBUG)
# The steamhandler is used to print hints to the user at exit.
# The INFO log level is reserved for this.
memory_string = StringIO()
c_handler = logging.StreamHandler(memory_string)
c_handler.setLevel(logging.INFO)
c_handler.name = "stream_handler"
c_format = logging.Formatter("Hint - %(message)s")
c_handler.setFormatter(c_format)
logger.addHandler(c_handler)
if debug_file is not None:
f_handler = logging.FileHandler(debug_file)
f_handler.setLevel(logging.DEBUG)
f_format = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
f_handler.setFormatter(f_format)
logger.addHandler(f_handler)
return memory_string
|
49,994 | def barbs(u_cube, v_cube, *args, **kwargs):
"""
Draws a barb plot from two vector component cubes.
Args:
* u_cube, v_cube : (:class:`~iris.cube.Cube`)
u and v vector components. Must have same shape and units of knot.
If the cubes have geographic coordinates, the values are treated as
true distance differentials, e.g. windspeeds, and *not* map coordinate
vectors. The components are aligned with the North and East of the
cube coordinate system.
.. Note:
At present, if u_cube and v_cube have geographic coordinates, then they
must be in a lat-lon coordinate system, though it may be a rotated one.
To transform wind values between coordinate systems, use
:func:`iris.analysis.cartography.rotate_vectors`.
To transform coordinate grid points, you will need to create
2-dimensional arrays of x and y values. These can be transformed with
:meth:`cartopy.crs.CRS.transform_points`.
Kwargs:
* coords: (list of :class:`~iris.coords.Coord` or string)
Coordinates or coordinate names. Use the given coordinates as the axes
for the plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.barbs` for details of other valid
keyword arguments.
"""
#
# TODO: check u + v cubes for compatibility.
#
kwargs["_v_data"] = v_cube.data
return _draw_2d_from_points(
"barbs", _vector_component_args, u_cube, *args, **kwargs
)
| def barbs(u_cube, v_cube, *args, **kwargs):
"""
Draws a barb plot from two vector component cubes.
Args:
* u_cube, v_cube : (:class:`~iris.cube.Cube`)
u and v vector components. Must have same shape and units of knot.
If the cubes have geographic coordinates, the values are treated as
true distance differentials, e.g. windspeeds, and *not* map coordinate
vectors. The components are aligned with the North and East of the
cube coordinate system.
.. Note::
At present, if u_cube and v_cube have geographic coordinates, then they
must be in a lat-lon coordinate system, though it may be a rotated one.
To transform wind values between coordinate systems, use
:func:`iris.analysis.cartography.rotate_grid_vectors`.
"""
#
# TODO: check u + v cubes for compatibility.
#
kwargs["_v_data"] = v_cube.data
return _draw_2d_from_points(
"barbs", _vector_component_args, u_cube, *args, **kwargs
)
|
6,408 | def set_tasks_as_overdue():
tasks = frappe.get_all("Task", filters={'status':['not in',['Cancelled', 'Closed']]})
for task in tasks:
if frappe.db.get_value("Task", task.name, "status") in 'Pending Review':
if getdate(frappe.db.get_value("Task", task.name, "review_date")) > getdate(today()):
continue
frappe.get_doc("Task", task.name).update_status()
| def set_tasks_as_overdue():
tasks = frappe.get_all("Task", filters={'status':['not in',['Cancelled', 'Closed']]}, fields=["name", "review_date"])
for task in tasks:
if frappe.db.get_value("Task", task.name, "status") in 'Pending Review':
if getdate(frappe.db.get_value("Task", task.name, "review_date")) > getdate(today()):
continue
frappe.get_doc("Task", task.name).update_status()
|
9,792 | def main():
module = AnsibleModule(
argument_spec=dict(
action=dict(type='str', required=True),
server=dict(type='str'),
basedir=dict(type='str'),
workdir=dict(type='str'),
conffile=dict(type='str'),
force=dict(type='bool', default=False),
key=dict(type='str'),
),
supports_check_mode=True,
)
actions = ['fertch', 'fetch_install', 'install', 'rollback', 'IDS', 'cron']
action = module.params['action']
msg = "Unexpected failure!"
if action not in actions:
msg = "Unexpected action"
server = module.params.get('server')
basedir = module.params.get('basedir')
workdir = module.params.get('workdir')
conffile = module.params.get('conffile')
force = module.params.get('force')
key = module.params.get('key')
freebsd_update_bin = module.get_bin_path('freebsd-update', True)
cmd = []
cmd.append(freebsd_update_bin)
if server is not None:
cmd.extend(('-s', server))
if basedir is not None:
cmd.extend(('-b', basedir))
if workdir is not None:
cmd.extend(('-d', workdir))
if conffile is not None:
cmd.extend(('-f', conffile))
if force:
cmd.append('-F')
if key is not None:
cmd.extend(('-k', key))
if action == 'fetch_install':
cmd.extend(('fetch', 'install'))
else:
cmd.append(action)
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
database = 'freebsd-update'
dbtree = '_%s' % database
results = {dbtree: {}}
if rc == 0:
results[dbtree]['output'] = []
for line in out.splitlines():
results[dbtree]['output'].append(line)
results[dbtree]['command'] = cmd
changed = True
if 'No updates' in out:
changed = False
module.exit_json(ansible_facts=results, changed=changed)
else:
msg = ' '.join(cmd) + ' failed'
module.fail_json(msg=msg)
| def main():
module = AnsibleModule(
argument_spec=dict(
action=dict(type='str', required=True),
server=dict(type='str'),
basedir=dict(type='str'),
workdir=dict(type='str'),
conffile=dict(type='str'),
force=dict(type='bool', default=False),
key=dict(type='str'),
),
supports_check_mode=True,
)
actions = ['fertch', 'fetch_install', 'install', 'rollback', 'IDS', 'cron']
action = module.params['action']
msg = "Unexpected failure!"
if action not in actions:
msg = "Unexpected action"
server = module.params.get('server')
basedir = module.params.get('basedir')
workdir = module.params.get('workdir')
conffile = module.params.get('conffile')
force = module.params.get('force')
key = module.params.get('key')
freebsd_update_bin = module.get_bin_path('freebsd-update', True)
cmd = [freebsd_update_bin]
cmd.append(freebsd_update_bin)
if server is not None:
cmd.extend(('-s', server))
if basedir is not None:
cmd.extend(('-b', basedir))
if workdir is not None:
cmd.extend(('-d', workdir))
if conffile is not None:
cmd.extend(('-f', conffile))
if force:
cmd.append('-F')
if key is not None:
cmd.extend(('-k', key))
if action == 'fetch_install':
cmd.extend(('fetch', 'install'))
else:
cmd.append(action)
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
database = 'freebsd-update'
dbtree = '_%s' % database
results = {dbtree: {}}
if rc == 0:
results[dbtree]['output'] = []
for line in out.splitlines():
results[dbtree]['output'].append(line)
results[dbtree]['command'] = cmd
changed = True
if 'No updates' in out:
changed = False
module.exit_json(ansible_facts=results, changed=changed)
else:
msg = ' '.join(cmd) + ' failed'
module.fail_json(msg=msg)
|
45,812 | def load_camera_data(file_name):
"""Loads the camera data using the sintel SDK and converts to torch.Tensor."""
if not os.path.isfile(file_name):
raise AssertionError("Invalid file {}".format(file_name))
import sintel_io
intrinsic, extrinsic = sintel_io.cam_read(file_name)
return intrinsic, extrinsic
| def load_camera_data(file_name):
"""Loads the camera data using the sintel SDK and converts to torch.Tensor."""
if not os.path.isfile(file_name):
raise FileExistsError("Invalid file {}".format(file_name))
import sintel_io
intrinsic, extrinsic = sintel_io.cam_read(file_name)
return intrinsic, extrinsic
|
31,755 | def build_body_request_for_update_user(old_user_data, new_user_data):
operations = []
for key, value in new_user_data.items():
operation = {
"op": "replace" if key in old_user_data.keys() else "add",
"path": key,
"value": [value] if key in ("emails", "phoneNumbers", "address") else value
}
operations.append(operation)
data = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": operations
}
return data
| def build_body_request_for_update_user(old_user_data, new_user_data):
operations = []
for key, value in new_user_data.items():
operation = {
"op": "replace" if key in old_user_data.keys() else "add",
"path": key,
"value": [value] if key in ("emails", "phoneNumbers", "address") else value,
}
operations.append(operation)
data = {
"schemas": ["urn:ietf:params:scim:api:messages:2.0:PatchOp"],
"Operations": operations
}
return data
|
41,558 | def run_command(context, n_gif=0, thr_increment=None, resume_training=False):
"""Run main command.
This function is central in the ivadomed project as training / testing / evaluation commands
are run via this function. All the process parameters are defined in the config.
Args:
context (dict): Dictionary containing all parameters that are needed for a given process. See
:doc:`configuration_file` for more details.
n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The
parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows
predictions of a given slice from the validation sub-dataset. They are saved within the output path.
thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and
the training + validation sub-dataset to find the optimal binarization threshold. The specified value
indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1).
resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the output directory specified with flag "--path-output" or via the config file "output_path" ' This training state is saved everytime a new best model is saved in the log
argument) for resume training directory.
Returns:
float or pandas.DataFrame or None:
* If "train" command: Returns floats: best loss score for both training and validation.
* If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of
the testing sub-dataset and return the prediction metrics before evaluation.
* If "segment" command: No return value.
"""
command = copy.deepcopy(context["command"])
path_output = set_output_path(context)
# Create a log with the version of the Ivadomed software and the version of the Annexed dataset (if present)
create_dataset_and_ivadomed_version_log(context)
cuda_available, device = imed_utils.define_device(context['gpu_ids'][0])
# BACKWARDS COMPATIBILITY: If bids_path is string, assign to list - Do this here so it propagates to all functions
context['loader_parameters']['path_data'] = imed_utils.format_path_data(context['loader_parameters']['path_data'])
# Loader params
loader_params = set_loader_params(context, command == "train")
# Get transforms for each subdataset
transform_train_params, transform_valid_params, transform_test_params = \
imed_transforms.get_subdatasets_transforms(context["transformation"])
# MODEL PARAMETERS
model_params, loader_params = set_model_params(context, loader_params)
if command == 'segment':
run_segment_command(context, model_params)
return
# Get subject lists. "segment" command uses all participants of data path, hence no need to split
train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list(context["split_dataset"],
context['loader_parameters']
['path_data'],
path_output,
context["loader_parameters"]
['subject_selection'])
# TESTING PARAMS
# Aleatoric uncertainty
if context['uncertainty']['aleatoric'] and context['uncertainty']['n_it'] > 0:
transformation_dict = transform_train_params
else:
transformation_dict = transform_test_params
undo_transforms = imed_transforms.UndoCompose(imed_transforms.Compose(transformation_dict, requires_undo=True))
testing_params = copy.deepcopy(context["training_parameters"])
testing_params.update({'uncertainty': context["uncertainty"]})
testing_params.update({'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms,
'slice_axis': loader_params['slice_axis']})
if command == "train":
imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"])
imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"])
elif command == "test":
imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"])
# Check if multiple raters
check_multiple_raters(command != "train", loader_params)
if command == 'train':
# Get Validation dataset
ds_valid = get_dataset(loader_params, valid_lst, transform_valid_params, cuda_available, device, 'validation')
# Get Training dataset
ds_train = get_dataset(loader_params, train_lst, transform_train_params, cuda_available, device, 'training')
metric_fns = imed_metrics.get_metric_fns(ds_train.task)
# If FiLM, normalize data
if 'film_layers' in model_params and any(model_params['film_layers']):
model_params, ds_train, ds_valid, train_onehotencoder = \
film_normalize_data(context, model_params, ds_train, ds_valid, path_output)
# Model directory
create_path_model(context, model_params, ds_train, path_output, train_onehotencoder)
save_config_file(context, path_output)
# RUN TRAINING
best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train(
model_params=model_params,
dataset_train=ds_train,
dataset_val=ds_valid,
training_params=context["training_parameters"],
path_output=path_output,
device=device,
cuda_available=cuda_available,
metric_fns=metric_fns,
n_gif=n_gif,
resume_training=resume_training,
debugging=context["debugging"])
if thr_increment:
# LOAD DATASET
if command != 'train': # If command == train, then ds_valid already load
# Get Validation dataset
ds_valid = get_dataset(loader_params, valid_lst, transform_valid_params, cuda_available, device, 'validation')
# Get Training dataset with no Data Augmentation
ds_train = get_dataset(loader_params, train_lst, transform_valid_params, cuda_available, device, 'training')
# Choice of optimisation metric
metric = "recall_specificity" if model_params["name"] in imed_utils.CLASSIFIER_LIST else "dice"
# Model path
model_path = os.path.join(path_output, "best_model.pt")
# Run analysis
thr = imed_testing.threshold_analysis(model_path=model_path,
ds_lst=[ds_train, ds_valid],
model_params=model_params,
testing_params=testing_params,
metric=metric,
increment=thr_increment,
fname_out=os.path.join(path_output, "roc.png"),
cuda_available=cuda_available)
# Update threshold in config file
context["postprocessing"]["binarize_prediction"] = {"thr": thr}
save_config_file(context, path_output)
if command == 'train':
return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss
if command == 'test':
# LOAD DATASET
ds_test = imed_loader.load_dataset(**{**loader_params, **{'data_list': test_lst,
'transforms_params': transformation_dict,
'dataset_type': 'testing',
'requires_undo': True}}, device=device,
cuda_available=cuda_available)
metric_fns = imed_metrics.get_metric_fns(ds_test.task)
if 'film_layers' in model_params and any(model_params['film_layers']):
ds_test, model_params = update_model_params(context, ds_test, model_params, path_output)
# RUN INFERENCE
pred_metrics = imed_testing.test(model_params=model_params,
dataset_test=ds_test,
testing_params=testing_params,
path_output=path_output,
device=device,
cuda_available=cuda_available,
metric_fns=metric_fns,
postprocessing=context['postprocessing'])
# RUN EVALUATION
df_results = imed_evaluation.evaluate(path_data=loader_params['path_data'],
path_output=path_output,
target_suffix=loader_params["target_suffix"],
eval_params=context["evaluation_parameters"])
return df_results, pred_metrics
| def run_command(context, n_gif=0, thr_increment=None, resume_training=False):
"""Run main command.
This function is central in the ivadomed project as training / testing / evaluation commands
are run via this function. All the process parameters are defined in the config.
Args:
context (dict): Dictionary containing all parameters that are needed for a given process. See
:doc:`configuration_file` for more details.
n_gif (int): Generates a GIF during training if larger than zero, one frame per epoch for a given slice. The
parameter indicates the number of 2D slices used to generate GIFs, one GIF per slice. A GIF shows
predictions of a given slice from the validation sub-dataset. They are saved within the output path.
thr_increment (float): A threshold analysis is performed at the end of the training using the trained model and
the training + validation sub-dataset to find the optimal binarization threshold. The specified value
indicates the increment between 0 and 1 used during the ROC analysis (e.g. 0.1).
resume_training (bool): Load a saved model ("checkpoint.pth.tar" in the output directory specified with flag "--path-output" or via the config file "output_path" ' This training state is saved everytime a new best model is saved in the log
argument) for resume training directory.
Returns:
float or pandas.DataFrame or None:
* If "train" command: Returns floats: best loss score for both training and validation.
* If "test" command: Returns a pandas Dataframe: of metrics computed for each subject of
the testing sub-dataset and return the prediction metrics before evaluation.
* If "segment" command: No return value.
"""
command = copy.deepcopy(context["command"])
path_output = set_output_path(context)
# Create a log with the version of the Ivadomed software and the version of the Annexed dataset (if present)
create_dataset_and_ivadomed_version_log(context)
cuda_available, device = imed_utils.define_device(context['gpu_ids'][0])
# BACKWARDS COMPATIBILITY: If bids_path is string, assign to list - Do this here so it propagates to all functions
context['loader_parameters']['path_data'] = imed_utils.format_path_data(context['loader_parameters']['path_data'])
# Loader params
loader_params = set_loader_params(context, command == "train")
# Get transforms for each subdataset
transform_train_params, transform_valid_params, transform_test_params = \
imed_transforms.get_subdatasets_transforms(context["transformation"])
# MODEL PARAMETERS
model_params, loader_params = set_model_params(context, loader_params)
if command == 'segment':
run_segment_command(context, model_params)
return
# Get subject lists. "segment" command uses all participants of data path, hence no need to split
train_lst, valid_lst, test_lst = imed_loader_utils.get_subdatasets_subjects_list(context["split_dataset"],
context['loader_parameters']
['path_data'],
path_output,
context["loader_parameters"]
['subject_selection'])
# TESTING PARAMS
# Aleatoric uncertainty
if context['uncertainty']['aleatoric'] and context['uncertainty']['n_it'] > 0:
transformation_dict = transform_train_params
else:
transformation_dict = transform_test_params
undo_transforms = imed_transforms.UndoCompose(imed_transforms.Compose(transformation_dict, requires_undo=True))
testing_params = copy.deepcopy(context["training_parameters"])
testing_params.update({'uncertainty': context["uncertainty"]})
testing_params.update({'target_suffix': loader_params["target_suffix"], 'undo_transforms': undo_transforms,
'slice_axis': loader_params['slice_axis']})
if command == "train":
imed_utils.display_selected_transfoms(transform_train_params, dataset_type=["training"])
imed_utils.display_selected_transfoms(transform_valid_params, dataset_type=["validation"])
elif command == "test":
imed_utils.display_selected_transfoms(transformation_dict, dataset_type=["testing"])
# Check if multiple raters
check_multiple_raters(command != "train", loader_params)
if command == 'train':
# Get Validation dataset
ds_valid = get_dataset(loader_params, valid_lst, transform_valid_params, cuda_available, device, 'validation')
# Get Training dataset
ds_train = get_dataset(loader_params, train_lst, transform_train_params, cuda_available, device, 'training')
metric_fns = imed_metrics.get_metric_fns(ds_train.task)
# If FiLM, normalize data
if 'film_layers' in model_params and any(model_params['film_layers']):
model_params, ds_train, ds_valid, train_onehotencoder = \
film_normalize_data(context, model_params, ds_train, ds_valid, path_output)
# Model directory
create_path_model(context, model_params, ds_train, path_output, train_onehotencoder)
save_config_file(context, path_output)
# RUN TRAINING
best_training_dice, best_training_loss, best_validation_dice, best_validation_loss = imed_training.train(
model_params=model_params,
dataset_train=ds_train,
dataset_val=ds_valid,
training_params=context["training_parameters"],
path_output=path_output,
device=device,
cuda_available=cuda_available,
metric_fns=metric_fns,
n_gif=n_gif,
resume_training=resume_training,
debugging=context["debugging"])
if thr_increment:
# LOAD DATASET
if command != 'train': # If command == train, then ds_valid already load
# Get Validation dataset
ds_valid = get_dataset(loader_params, valid_lst, transform_valid_params, cuda_available, device, 'validation')
# Get Training dataset with no Data Augmentation
ds_train = get_dataset(loader_params, train_lst, transform_valid_params, cuda_available, device, 'training')
# Choice of optimisation metric
metric = "recall_specificity" if model_params["name"] in imed_utils.CLASSIFIER_LIST else "dice"
# Model path
model_path = os.path.join(path_output, "best_model.pt")
# Run analysis
thr = imed_testing.threshold_analysis(model_path=model_path,
ds_lst=[ds_train, ds_valid],
model_params=model_params,
testing_params=testing_params,
metric=metric,
increment=thr_increment,
fname_out=os.path.join(path_output, "roc.png"),
cuda_available=cuda_available)
# Update threshold in config file
context["postprocessing"]["binarize_prediction"] = {"thr": thr}
save_config_file(context, path_output)
if command == 'train':
return best_training_dice, best_training_loss, best_validation_dice, best_validation_loss
if command == 'test':
# LOAD DATASET
ds_test = imed_loader.load_dataset(**{**loader_params, **{'data_list': test_lst,
'transforms_params': transformation_dict,
'dataset_type': 'testing',
'requires_undo': True}}, device=device,
cuda_available=cuda_available)
metric_fns = imed_metrics.get_metric_fns(ds_test.task)
if 'film_layers' in model_params and any(model_params['film_layers']):
ds_test, model_params = update_film_model_params(context, ds_test, model_params, path_output)
# RUN INFERENCE
pred_metrics = imed_testing.test(model_params=model_params,
dataset_test=ds_test,
testing_params=testing_params,
path_output=path_output,
device=device,
cuda_available=cuda_available,
metric_fns=metric_fns,
postprocessing=context['postprocessing'])
# RUN EVALUATION
df_results = imed_evaluation.evaluate(path_data=loader_params['path_data'],
path_output=path_output,
target_suffix=loader_params["target_suffix"],
eval_params=context["evaluation_parameters"])
return df_results, pred_metrics
|
58,013 | def fetch_incidents(client, method, token, maxResults):
"""
Fetch alert details from server for creating incidents in XSOAR
:param client: instace of client to communicate with server
:param method: Requests method to be used
:param token: server access token
:param maxResults: limit for single fetch from server
:return: incidents from server
"""""
last_run = demisto.getLastRun()
if 'total_alert_count' not in last_run.keys():
last_run['total_alert_count'] = 0
if 'fetched_alert_count' not in last_run.keys():
last_run['fetched_alert_count'] = 0
if 'event_pull_start_date' not in last_run.keys():
last_run['event_pull_start_date'] = date.today().strftime("%Y/%m/%d")
params = {
'token': token,
'from': int(last_run['fetched_alert_count']),
'limit': int(MAX_EVENT_ITEMS) if maxResults > 50 else int(maxResults),
'start_date': last_run['event_pull_start_date'],
'end_date': date.today().strftime("%Y/%m/%d"),
'order_by': 'Ascending'
}
events_url = r'/api/v2/events/all'
result = client.get_alerts(method, events_url, params)
incidents: List[Dict[str, Any]] = []
if result is not None:
last_run['total_alert_count'] = result['total_count']
last_run['fetched_alert_count'] += len(result['data'])
eventTypes = get_event_types(client, "GET", token)
events = format_incidents(result, eventTypes)
try:
for eachinci in events:
inci = {
'name': eachinci['name'],
'severity': eachinci['severity'],
'occurred': eachinci['occurred'],
'rawJSON': json.dumps(eachinci)
}
incidents.append(inci)
except Exception as e:
demisto.error("Error formating incidents, {}".format(e))
if last_run['event_pull_start_date'] < date.today().strftime("%Y/%m/%d"):
last_run['event_pull_start_date'] = date.today().strftime("%Y/%m/%d")
last_run['total_alert_count'] = 0
last_run['fetched_alert_count'] = 0
demisto.setLastRun(last_run)
return incidents
| def fetch_incidents(client, method, token, maxResults):
"""
Fetch alert details from server for creating incidents in XSOAR
:param client: instace of client to communicate with server
:param method: Requests method to be used
:param token: server access token
:param maxResults: limit for single fetch from server
:return: incidents from server
"""""
last_run = demisto.getLastRun()
if 'total_alert_count' not in last_run.keys():
last_run['total_alert_count'] = 0
if 'fetched_alert_count' not in last_run.keys():
last_run['fetched_alert_count'] = 0
if 'event_pull_start_date' not in last_run.keys():
last_run['event_pull_start_date'] = date.today().strftime("%Y/%m/%d")
params = {
'token': token,
'from': int(last_run.get('fetched_alert_count', '0')),
'limit': int(MAX_EVENT_ITEMS) if maxResults > 50 else int(maxResults),
'start_date': last_run.get('event_pull_start_date', '0'),
'end_date': date.today().strftime("%Y/%m/%d"),
'order_by': 'Ascending'
}
events_url = r'/api/v2/events/all'
result = client.get_alerts(method, events_url, params)
incidents: List[Dict[str, Any]] = []
if result is not None:
last_run['total_alert_count'] = result['total_count']
last_run['fetched_alert_count'] += len(result['data'])
eventTypes = get_event_types(client, "GET", token)
events = format_incidents(result, eventTypes)
try:
for eachinci in events:
inci = {
'name': eachinci['name'],
'severity': eachinci['severity'],
'occurred': eachinci['occurred'],
'rawJSON': json.dumps(eachinci)
}
incidents.append(inci)
except Exception as e:
demisto.error("Error formating incidents, {}".format(e))
if last_run['event_pull_start_date'] < date.today().strftime("%Y/%m/%d"):
last_run['event_pull_start_date'] = date.today().strftime("%Y/%m/%d")
last_run['total_alert_count'] = 0
last_run['fetched_alert_count'] = 0
demisto.setLastRun(last_run)
return incidents
|
42,814 | def test_name_sep_no_names_to(df_checks):
"""Raise ValuError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
| def test_name_sep_no_names_to(df_checks):
"""Raise ValueError if names_sep and names_to is None."""
with pytest.raises(ValueError):
df_checks.pivot_longer(names_to=None, names_sep="_")
|
57,966 | def url_command(**kwargs):
"""
Execute url command.
Args:
params (dict): query parameters.
Returns:
CommandResults.
"""
url = demisto.args().get('url')
try:
url_information = query_url_information(url, kwargs.get('api_url'), kwargs.get('use_ssl')).json()
except UnicodeEncodeError:
return_results(CommandResults(
readable_output='Service Does not support special characters.',
))
return
return process_query_info(url_information, url, **kwargs)
| def url_command(**kwargs):
"""
Execute url command.
Args:
params (dict): query parameters.
Returns:
CommandResults.
"""
url = demisto.args().get('url')
try:
url_information = query_url_information(url, kwargs.get('api_url'), kwargs.get('use_ssl')).json()
except UnicodeEncodeError:
return CommandResults(
readable_output='Service Does not support special characters.',
)
return process_query_info(url_information, url, **kwargs)
|
43,935 | def _hermite_coulomb(t, u, v, n, p, dr):
"""Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion
integrals.
These integrals are computed recursively starting from the Boys function
[`Helgaker (1995) p817 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]:
.. math::
R_{000}^n = (-2p)^n F_n(pR_{CP}^2),
where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two
Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the
center of the composite Gaussian centered at :math:`P` and the electrostatic potential at
:math:`C`. The following recursive equations are used to compute the evaluate the higher order
Hermite integrals
.. math::
R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1}
R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1}
R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1}
where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`.
Args:
t (integer): order of Hermite derivative in x
u (integer): order of Hermite derivative in y
v (float): order of Hermite derivative in z
n (integer): order of the Boys function
p (float): sum of the Gaussian exponents
dr (array[float]): distance between the center of the composite Gaussian and the nucleus
Returns:
array[float]: value of the Hermite integral
"""
x, y, z = dr[0], dr[1], dr[2]
T = p * (dr ** 2).sum(axis=0)
r = 0
if t == u == v == 0:
f = []
for term in T.flatten():
f.append(_boys(n, term))
return ((-2 * p) ** n) * anp.array(f).reshape(T.shape)
if t == u == 0:
if v > 1:
r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr)
r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr)
return r
if t == 0:
if u > 1:
r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr)
r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr)
return r
if t > 1:
r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr)
r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr)
return r
| def _hermite_coulomb(t, u, v, n, p, dr):
"""Evaluate Hermite integral needed to compute the nuclear attraction and electron repulsion
integrals.
These integrals are computed recursively starting from the Boys function
[`Helgaker (1995) p817 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]:
.. math::
R_{000}^n = (-2p)^n F_n(pR_{CP}^2),
where :math:`F_n` is the Boys function, :math:`p` is computed from the exponents of the two
Gaussian functions as :math:`p = \alpha + \beta`, and :math:`R_{CP}` is the distance between the
center of the composite Gaussian centered at :math:`P` and the electrostatic potential at
:math:`C`. The following recursive equations are used to compute the evaluate the higher order
Hermite integrals
.. math::
R_{t+1, u, v}^n = t R_{t-1, u, v}^{n+1} + x R_{t, u, v}^{n+1}
R_{t, u+1, v}^n = u R_{t, u-1, v}^{n+1} + y R_{t, u, v}^{n+1}
R_{t, u, v+1}^n = v R_{t, u, v-1}^{n+1} + z R_{t, u, v}^{n+1},
where :math:`x`, :math:`y` and :math:`z` are the Cartesian components of :math:`R_{CP}`.
Args:
t (integer): order of Hermite derivative in x
u (integer): order of Hermite derivative in y
v (float): order of Hermite derivative in z
n (integer): order of the Boys function
p (float): sum of the Gaussian exponents
dr (array[float]): distance between the center of the composite Gaussian and the nucleus
Returns:
array[float]: value of the Hermite integral
"""
x, y, z = dr[0], dr[1], dr[2]
T = p * (dr ** 2).sum(axis=0)
r = 0
if t == u == v == 0:
f = []
for term in T.flatten():
f.append(_boys(n, term))
return ((-2 * p) ** n) * anp.array(f).reshape(T.shape)
if t == u == 0:
if v > 1:
r = r + (v - 1) * _hermite_coulomb(t, u, v - 2, n + 1, p, dr)
r = r + z * _hermite_coulomb(t, u, v - 1, n + 1, p, dr)
return r
if t == 0:
if u > 1:
r = r + (u - 1) * _hermite_coulomb(t, u - 2, v, n + 1, p, dr)
r = r + y * _hermite_coulomb(t, u - 1, v, n + 1, p, dr)
return r
if t > 1:
r = r + (t - 1) * _hermite_coulomb(t - 2, u, v, n + 1, p, dr)
r = r + x * _hermite_coulomb(t - 1, u, v, n + 1, p, dr)
return r
|
41,494 | def import_root_histogram(rootdir, filename, path, name, filecache=None):
global __FILECACHE__
filecache = filecache or __FILECACHE__
# strip leading slashes as uproot doesn't use "/" for top-level
path = path or ''
path = path.strip('/')
fullpath = Path().joinpath(rootdir, filename).as_posix()
if not fullpath in filecache:
f = uproot.open(fullpath)
filecache[fullpath] = f
else:
f = filecache[fullpath]
try:
h = f[name]
except KeyError:
try:
h = f[Path().joinpath(path, name).as_posix()]
except KeyError:
raise KeyError(
f'Both {name} and {Path().joinpath(path, name)} were tried and not found'
f' in {Path().joinpath(rootdir, filename)}'
)
return h.numpy()[0].tolist(), extract_error(h)
| def import_root_histogram(rootdir, filename, path, name, filecache=None):
global __FILECACHE__
filecache = filecache or __FILECACHE__
# strip leading slashes as uproot doesn't use "/" for top-level
path = path or ''
path = path.strip('/')
fullpath = Path().joinpath(rootdir, filename).as_posix()
if not fullpath in filecache:
f = uproot.open(fullpath)
filecache[fullpath] = f
else:
f = filecache[fullpath]
try:
h = f[name]
except KeyError:
try:
h = f[Path(path).joinpath(name)]
except KeyError:
raise KeyError(
f'Both {name} and {Path().joinpath(path, name)} were tried and not found'
f' in {Path().joinpath(rootdir, filename)}'
)
return h.numpy()[0].tolist(), extract_error(h)
|
43,977 | def dipole(hf_file, core=None, active=None, mapping="jordan_wigner", cutoff=1.0e-12, wires=None):
r"""Computes the electric dipole moment operator in the Pauli basis.
The second quantized dipole moment operator :math:`\hat{D}` of a molecule is given by
.. math::
\hat{D} = \sum_{\alpha, \beta} \langle \alpha \vert {\bf r} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}] + \hat{D}_\mathrm{n}.
In the equation above, the indices :math:`\alpha, \beta` run over the basis of Hartree-Fock
molecular orbitals, the operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
electron creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert {\bf r} \vert \beta \rangle` denotes
the matrix elements of the position operator :math:`\hat{{\bf r}`. These matrix elements
are calculated as
.. math::
\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle = \sum_{i, j} C_{\alpha i}^*C_{\beta j}
\langle i \vert {\bf r} \vert j \rangle,
where :math:`\vert i \rangle` is the wave function of the atomic orbitals and
:math:`C_{\alpha i}` and :math:`\langle i \vert \hat{{\bf r}} \vert j \rangle`
are the representations of the molecular orbitals and the operator
:math:`\hat{{\bf r}}` in the atomic basis.
The contribution of the nuclei to the dipole operator is given by
.. math::
\hat{D}_\mathrm{n} = -\sum_{i=1}^{N_\mathrm{atoms}} Z_i {\bf R}_i \hat{I},
where :math:`Z_i` and :math:`{\bf R}_i` are, respectively, the atomic number and the
position vector of the :math:`i`-th atom of the molecule.
Args:
hf_file (str): Absolute path to the hdf5-formatted file with the Hartree-Fock
electronic structure. This file can be generated using the
:func:`~.meanfield` function.
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals, i.e., the orbitals used to
build the correlated many-body wave function
mapping (str): Specifies the transformation to map the fermionic operator to the
Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
cutoff (float): Cutoff value for including the matrix elements
:math:`\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle`. The matrix elements
with absolute value less than ``cutoff`` are neglected.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
list[pennylane.Hamiltonian]: the qubit observables corresponding to the components
:math:`\hat{D}_x`, :math:`\hat{D}_y` and :math:`\hat{D}_z` of the dipole operator in
atomic units (Bohr radii).
**Example**
>>> dipole_obs = dipole("./h3p.hdf5")
>>> print(dipole_obs)
[<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=1, wires=[0]>]
>>> print(dipole_obs[0])
(-1.4861475511479285) [Z0]
+ (-1.4861475511479285) [Z1]
+ (-1.0207535180657459) [Z2]
+ (-1.0207535180657459) [Z3]
+ (-0.38409271341166346) [Z4]
+ (-0.38409271341166346) [Z5]
+ (2.9129875652506754) [I0]
+ (-1.0463884953059674) [Y0 Z1 Y2]
+ (-1.0463884953059674) [X0 Z1 X2]
+ (-1.0463884953059674) [Y1 Z2 Y3]
+ (-1.0463884953059674) [X1 Z2 X3]
+ (-0.2949628258407493) [Y2 Z3 Y4]
+ (-0.2949628258407493) [X2 Z3 X4]
+ (-0.2949628258407493) [Y3 Z4 Y5]
+ (-0.2949628258407493) [X3 Z4 X5]
+ (-0.10008920247855208) [Y0 Z1 Z2 Z3 Y4]
+ (-0.10008920247855208) [X0 Z1 Z2 Z3 X4]
+ (-0.10008920247855208) [Y1 Z2 Z3 Z4 Y5]
+ (-0.10008920247855208) [X1 Z2 Z3 Z4 X5]
"""
bohr_angs = 0.529177210903
atomic_numbers = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
}
hf = openfermion.MolecularData(filename=hf_file.strip())
if hf.multiplicity != 1:
raise ValueError(
"Currently, this functionality is constrained to closed-shell Hartree-Fock states;"
" got spin multiplicity 2S+1 = {}".format(hf.multiplicity)
)
for i in hf.geometry:
print(i[0])
if i[0] not in atomic_numbers:
raise ValueError(
"Currently, only first- or second-row elements of the periodic table are supported;"
" got element {}".format(i[0])
)
# Load dipole matrix elements in the atomic basis
# pylint: disable=import-outside-toplevel
from pyscf import gto
mol = gto.M(
atom=hf.geometry, basis=hf.basis, charge=hf.charge, spin=0.5 * (hf.multiplicity - 1)
)
dip_ao = mol.intor_symmetric("int1e_r", comp=3).real
# Transform dipole matrix elements to the MO basis
n_orbs = hf.n_orbitals
c_hf = hf.canonical_orbitals
dip_mo = np.zeros((3, n_orbs, n_orbs))
for comp in range(3):
for alpha in range(n_orbs):
for beta in range(alpha + 1):
dip_mo[comp, alpha, beta] = c_hf[alpha] @ dip_ao[comp] @ c_hf[beta]
dip_mo[comp] += dip_mo[comp].T - np.diag(np.diag(dip_mo[comp]))
# Compute the nuclear contribution
dip_n = np.zeros(3)
for comp in range(3):
for i in hf.geometry:
dip_n[comp] -= atomic_numbers[i[0]] * i[1][comp] / bohr_angs
# Build the observable
dip_obs = []
for i in range(3):
fermion_obs = one_particle(dip_mo[i], core=core, active=active, cutoff=cutoff)
dip_obs.append(observable([fermion_obs], init_term=dip_n[i], mapping=mapping, wires=wires))
return dip_obs
| def dipole(hf_file, core=None, active=None, mapping="jordan_wigner", cutoff=1.0e-12, wires=None):
r"""Computes the electric dipole moment operator in the Pauli basis.
The second quantized dipole moment operator :math:`\hat{D}` of a molecule is given by
.. math::
\hat{D} = \sum_{\alpha, \beta} \langle \alpha \vert {\bf r} \vert \beta \rangle
[\hat{c}_{\alpha\uparrow}^\dagger \hat{c}_{\beta\uparrow} +
\hat{c}_{\alpha\downarrow}^\dagger \hat{c}_{\beta\downarrow}] + \hat{D}_\mathrm{n}.
In the equation above, the indices :math:`\alpha, \beta` run over the basis of Hartree-Fock
molecular orbitals, the operators :math:`\hat{c}^\dagger` and :math:`\hat{c}` are the
electron creation and annihilation operators, respectively, and
:math:`\langle \alpha \vert {\bf r} \vert \beta \rangle` denotes
the matrix elements of the position operator :math:`\hat{{\bf r}`. These matrix elements
are calculated as
.. math::
\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle = \sum_{i, j} C_{\alpha i}^*C_{\beta j}
\langle i \vert {\bf r} \vert j \rangle,
where :math:`\vert i \rangle` is the wave function of the atomic orbitals and
:math:`C_{\alpha i}` and :math:`\langle i \vert \hat{{\bf r}} \vert j \rangle`
are the representations of the molecular orbitals and the operator
:math:`\hat{{\bf r}}` in the atomic basis.
The contribution of the nuclei to the dipole operator is given by
.. math::
\hat{D}_\mathrm{n} = -\sum_{i=1}^{N_\mathrm{atoms}} Z_i {\bf R}_i \hat{I},
where :math:`Z_i` and :math:`{\bf R}_i` are, respectively, the atomic number and the
position vector of the :math:`i`-th atom of the molecule.
Args:
hf_file (str): Absolute path to the hdf5-formatted file with the Hartree-Fock
electronic structure. This file can be generated using the
:func:`~.meanfield` function.
core (list): indices of core orbitals, i.e., the orbitals that are
not correlated in the many-body wave function
active (list): indices of active orbitals
mapping (str): Specifies the transformation to map the fermionic operator to the
Pauli basis. Input values can be ``'jordan_wigner'`` or ``'bravyi_kitaev'``.
cutoff (float): Cutoff value for including the matrix elements
:math:`\langle \alpha \vert \hat{{\bf r}} \vert \beta \rangle`. The matrix elements
with absolute value less than ``cutoff`` are neglected.
wires (Wires, list, tuple, dict): Custom wire mapping used to convert the qubit operator
to an observable measurable in a PennyLane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identity map (e.g. 0->0, 1->1, ...).
Returns:
list[pennylane.Hamiltonian]: the qubit observables corresponding to the components
:math:`\hat{D}_x`, :math:`\hat{D}_y` and :math:`\hat{D}_z` of the dipole operator in
atomic units (Bohr radii).
**Example**
>>> dipole_obs = dipole("./h3p.hdf5")
>>> print(dipole_obs)
[<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=19, wires=[0, 1, 2, 3, 4, 5]>,
<Hamiltonian: terms=1, wires=[0]>]
>>> print(dipole_obs[0])
(-1.4861475511479285) [Z0]
+ (-1.4861475511479285) [Z1]
+ (-1.0207535180657459) [Z2]
+ (-1.0207535180657459) [Z3]
+ (-0.38409271341166346) [Z4]
+ (-0.38409271341166346) [Z5]
+ (2.9129875652506754) [I0]
+ (-1.0463884953059674) [Y0 Z1 Y2]
+ (-1.0463884953059674) [X0 Z1 X2]
+ (-1.0463884953059674) [Y1 Z2 Y3]
+ (-1.0463884953059674) [X1 Z2 X3]
+ (-0.2949628258407493) [Y2 Z3 Y4]
+ (-0.2949628258407493) [X2 Z3 X4]
+ (-0.2949628258407493) [Y3 Z4 Y5]
+ (-0.2949628258407493) [X3 Z4 X5]
+ (-0.10008920247855208) [Y0 Z1 Z2 Z3 Y4]
+ (-0.10008920247855208) [X0 Z1 Z2 Z3 X4]
+ (-0.10008920247855208) [Y1 Z2 Z3 Z4 Y5]
+ (-0.10008920247855208) [X1 Z2 Z3 Z4 X5]
"""
bohr_angs = 0.529177210903
atomic_numbers = {
"H": 1,
"He": 2,
"Li": 3,
"Be": 4,
"B": 5,
"C": 6,
"N": 7,
"O": 8,
"F": 9,
"Ne": 10,
}
hf = openfermion.MolecularData(filename=hf_file.strip())
if hf.multiplicity != 1:
raise ValueError(
"Currently, this functionality is constrained to closed-shell Hartree-Fock states;"
" got spin multiplicity 2S+1 = {}".format(hf.multiplicity)
)
for i in hf.geometry:
print(i[0])
if i[0] not in atomic_numbers:
raise ValueError(
"Currently, only first- or second-row elements of the periodic table are supported;"
" got element {}".format(i[0])
)
# Load dipole matrix elements in the atomic basis
# pylint: disable=import-outside-toplevel
from pyscf import gto
mol = gto.M(
atom=hf.geometry, basis=hf.basis, charge=hf.charge, spin=0.5 * (hf.multiplicity - 1)
)
dip_ao = mol.intor_symmetric("int1e_r", comp=3).real
# Transform dipole matrix elements to the MO basis
n_orbs = hf.n_orbitals
c_hf = hf.canonical_orbitals
dip_mo = np.zeros((3, n_orbs, n_orbs))
for comp in range(3):
for alpha in range(n_orbs):
for beta in range(alpha + 1):
dip_mo[comp, alpha, beta] = c_hf[alpha] @ dip_ao[comp] @ c_hf[beta]
dip_mo[comp] += dip_mo[comp].T - np.diag(np.diag(dip_mo[comp]))
# Compute the nuclear contribution
dip_n = np.zeros(3)
for comp in range(3):
for i in hf.geometry:
dip_n[comp] -= atomic_numbers[i[0]] * i[1][comp] / bohr_angs
# Build the observable
dip_obs = []
for i in range(3):
fermion_obs = one_particle(dip_mo[i], core=core, active=active, cutoff=cutoff)
dip_obs.append(observable([fermion_obs], init_term=dip_n[i], mapping=mapping, wires=wires))
return dip_obs
|
57,161 | def _compare_file_count(
first_dir_list: List[str], second_dir_list: List[str]) -> None:
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError. The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
print('Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
| def _compare_file_count(
first_dir_list: List[str], second_dir_list: List[str]
) -> None:
"""Ensure that the total count of files in all directories in the first
list matches the count of files in all the directories in the second list.
Args:
first_dir_list: list(str). List of directories to compare.
second_dir_list: list(str). List of directories to compare.
Raises:
ValueError. The source directory list does not have the same file
count as the target directory list.
"""
file_counts = [0, 0]
for first_dir_path in first_dir_list:
file_counts[0] += get_file_count(first_dir_path)
for second_dir_path in second_dir_list:
file_counts[1] += get_file_count(second_dir_path)
if file_counts[0] != file_counts[1]:
print('Comparing %s vs %s' % (first_dir_list, second_dir_list))
raise ValueError(
'%s files in first dir list != %s files in second dir list' % (
file_counts[0], file_counts[1]))
|
33,054 | def read_chunked(blob: BinaryIO) -> bytes:
'''Read a file or binary blob in chunks'''
while(True):
data = blob.read(DEFAULT_BUFFER_SIZE)
if not data:
break
yield data
| def read_chunked(blob: BinaryIO) -> Iterator[bytes]:
'''Read a file or binary blob in chunks'''
while(True):
data = blob.read(DEFAULT_BUFFER_SIZE)
if not data:
break
yield data
|
30,661 | def file_command(client, args):
files = argToList(args.get('file'))
all_results = []
for file in files:
hash_type = get_hash_type(file)
if hash_type != "Unknown":
res = client.get_hash_reputation(hash_type, file)
analysis_info = {
hash_type.upper(): file,
'Found': res.get('found'),
'Verdict': res.get('verdict'),
'Score': res.get('score'),
'Malware-families': res.get('malware_families')
}
score = Common.DBotScore.NONE
malicious = None
if res["found"]:
if res["verdict"]:
score = Common.DBotScore.BAD
malicious = "TDS Polygon score: {}".format(res['score'])
if res.get('malware_families'):
malicious += ", {}".format(", ".join(res["malware_families"]))
else:
score = Common.DBotScore.GOOD
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=INTEGRATION_NAME,
score=score,
malicious_description=malicious
)
indicator = Common.File(**{hash_type: file, "dbot_score": dbot_score})
result = CommandResults(
outputs_prefix="Polygon.Analysis",
outputs_key_field=hash_type.upper(),
outputs=analysis_info,
indicators=[indicator]
)
return_results(result)
all_results.append(result)
return all_results
| def file_command(client, args):
files = argToList(args.get('file'))
all_results = []
for file in files:
hash_type = get_hash_type(file)
if hash_type != "Unknown":
res = client.get_hash_reputation(hash_type, file)
analysis_info = {
hash_type.upper(): file,
'Found': res.get('found'),
'Verdict': res.get('verdict'),
'Score': res.get('score'),
'Malware-families': res.get('malware_families')
}
score = Common.DBotScore.NONE
malicious = None
if res.get("found"):
if res["verdict"]:
score = Common.DBotScore.BAD
malicious = "TDS Polygon score: {}".format(res['score'])
if res.get('malware_families'):
malicious += ", {}".format(", ".join(res["malware_families"]))
else:
score = Common.DBotScore.GOOD
dbot_score = Common.DBotScore(
indicator=file,
indicator_type=DBotScoreType.FILE,
integration_name=INTEGRATION_NAME,
score=score,
malicious_description=malicious
)
indicator = Common.File(**{hash_type: file, "dbot_score": dbot_score})
result = CommandResults(
outputs_prefix="Polygon.Analysis",
outputs_key_field=hash_type.upper(),
outputs=analysis_info,
indicators=[indicator]
)
return_results(result)
all_results.append(result)
return all_results
|
7,114 | def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circular symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern.
* 'default': original local binary pattern which is gray scale but not
rotation invariant.
* 'ror': extension of default implementation which is gray scale and
rotation invariant.
* 'uniform': improved rotation invariance with uniform patterns and
finer quantization of the angular space which is gray scale and
rotation invariant.
* 'nri_uniform': non rotation-invariant uniform patterns variant
which is only gray scale invariant [2]_.
* 'var': rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
assert_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
| def local_binary_pattern(image, P, R, method='default'):
"""Gray scale and rotation invariant LBP (Local Binary Patterns).
LBP is an invariant descriptor that can be used for texture classification.
Parameters
----------
image : (N, M) array
Graylevel image.
P : int
Number of circularly symmetric neighbour set points (quantization of
the angular space).
R : float
Radius of circle (spatial resolution of the operator).
method : {'default', 'ror', 'uniform', 'var'}
Method to determine the pattern.
* 'default': original local binary pattern which is gray scale but not
rotation invariant.
* 'ror': extension of default implementation which is gray scale and
rotation invariant.
* 'uniform': improved rotation invariance with uniform patterns and
finer quantization of the angular space which is gray scale and
rotation invariant.
* 'nri_uniform': non rotation-invariant uniform patterns variant
which is only gray scale invariant [2]_.
* 'var': rotation invariant variance measures of the contrast of local
image texture which is rotation but not gray scale invariant.
Returns
-------
output : (N, M) array
LBP image.
References
----------
.. [1] Multiresolution Gray-Scale and Rotation Invariant Texture
Classification with Local Binary Patterns.
Timo Ojala, Matti Pietikainen, Topi Maenpaa.
http://www.ee.oulu.fi/research/mvmp/mvg/files/pdf/pdf_94.pdf, 2002.
.. [2] Face recognition with local binary patterns.
Timo Ahonen, Abdenour Hadid, Matti Pietikainen,
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.214.6851,
2004.
"""
assert_nD(image, 2)
methods = {
'default': ord('D'),
'ror': ord('R'),
'uniform': ord('U'),
'nri_uniform': ord('N'),
'var': ord('V')
}
image = np.ascontiguousarray(image, dtype=np.double)
output = _local_binary_pattern(image, P, R, methods[method.lower()])
return output
|
43,463 | def AmplitudeEmbedding(features, wires, pad):
r"""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.
If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``.
The absolute square of all elements in ``features`` has to add up to one.
.. note::
AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with
devices that implement this function.
Args:
features (array): Input array of shape ``(2**n,)``
wires (Sequence[int]): sequence of qubit indices that the template acts on
pad (Boolean): controls the activation of the padding option
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if pad==True and 2**len(wires) != len(features):
features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant')
if pad==False and 2**len(wires) != len(features):
raise ValueError("AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "
"got {}.".format(2 ** len(wires), len(features)))
if np.linalg.norm(features,2) != 1:
raise ValueError("AmplitudeEmbedding requires a normalized feature vector.")
QubitStateVector(features, wires=wires)
| def AmplitudeEmbedding(features, wires, pad):
r"""Encodes :math:`2^n` features into the amplitude vector of :math:`n` qubits.
If the total number of features to embed are less than the :math:`2^n` available amplitudes, non-informative constants (zeros) can be padded to ``features``. To avail this, the argument ``pad`` should be set to ``True``.
The absolute square of all elements in ``features`` has to add up to one.
.. note::
AmplitudeEmbedding uses PennyLane's :class:`~pennylane.ops.QubitStateVector` and only works in conjunction with
devices that implement this function.
Args:
features (array): Input array of shape ``(2**n,)``
wires (Sequence[int]): sequence of qubit indices that the template acts on
pad (Boolean): controls the activation of the padding option
"""
if not isinstance(wires, Iterable):
raise ValueError("Wires needs to be a list of wires that the embedding uses; got {}.".format(wires))
if pad==True and 2**len(wires) != len(features):
features = np.pad(features, (0, 2**len(wires)-len(features)), 'constant')
if not pad and 2**len(wires) != len(features):
raise ValueError("AmplitudeEmbedding with no padding requires a feature vector of size 2**len(wires), which is {}; "
"got {}.".format(2 ** len(wires), len(features)))
if np.linalg.norm(features,2) != 1:
raise ValueError("AmplitudeEmbedding requires a normalized feature vector.")
QubitStateVector(features, wires=wires)
|
39,574 | def test_get_no_proxy_env():
"""
Test geting proxy configuration through environment with no_proxy modification.
"""
config = Mock()
config.no_proxy = None
config.proxy = None
config.base_url = "redhat.com"
connection = InsightsConnection(config)
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "*"}, clear=True):
connection.get_proxies()
assert connection.proxies is None
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "redhat.com"}, clear=True):
connection.get_proxies()
assert connection.proxies is None
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "redhat.com,example.com"}, clear=True):
connection.get_proxies()
assert connection.proxies is None
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "url.com,example.com"}, clear=True):
connection.get_proxies()
assert connection.proxies == {'https': 'env.proxy.example.com'}
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "url.com"}, clear=True):
connection.get_proxies()
assert connection.proxies == {'https': 'env.proxy.example.com'}
| def test_get_no_proxy_env():
"""
Test geting proxy configuration through environment with no_proxy modification.
"""
config = Mock()
config.no_proxy = None
config.proxy = None
config.base_url = "redhat.com"
connection = InsightsConnection(config)
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "*"}, clear=True):
connection.get_proxies()
assert connection.proxies is None
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "redhat.com"}, clear=True):
connection.get_proxies()
assert connection.proxies is None
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "example.com,redhat.com"}, clear=True):
connection.get_proxies()
assert connection.proxies is None
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "url.com,example.com"}, clear=True):
connection.get_proxies()
assert connection.proxies == {'https': 'env.proxy.example.com'}
with patch.dict(os_environ, {"HTTPS_PROXY": "env.proxy.example.com", "NO_PROXY": "url.com"}, clear=True):
connection.get_proxies()
assert connection.proxies == {'https': 'env.proxy.example.com'}
|
19,628 | def build_info_files_json_v1(m, prefix, files, files_with_prefix):
log = utils.get_logger(__name__)
no_link_files = m.get_value('build/no_link')
files_json = []
files_inodes = get_inodes(files, prefix)
for fi in sorted(files):
prefix_placeholder, file_mode = has_prefix(fi, files_with_prefix)
path = os.path.join(prefix, fi)
short_path = get_short_path(m, fi)
if short_path:
short_path = short_path.replace('\\', '/').replace('\\\\', '/')
file_info = {
"_path": short_path,
"sha256": utils.sha256_checksum(path),
"path_type": path_type(path),
}
if file_info["path_type"] == PathType.hardlink:
file_info["size_in_bytes"] = os.stat(path).st_size
elif not isfile(path):
# this is a softlink that points to nowhere, so is zero bytes
file_info["size_in_bytes"] = 0
log.warn('file %s is a symlink with no target', path)
else:
# softlink that points somewhere
file_info["size_in_bytes"] = os.stat(path).st_size
no_link = is_no_link(no_link_files, fi)
if no_link:
file_info["no_link"] = no_link
if prefix_placeholder and file_mode:
file_info["prefix_placeholder"] = prefix_placeholder
file_info["file_mode"] = file_mode
if file_info.get("path_type") == PathType.hardlink and CrossPlatformStLink.st_nlink(
path) > 1:
target_short_path_inode = get_inode(path)
inode_paths = [files[index] for index, ino in enumerate(files_inodes) if ino == target_short_path_inode]
file_info["inode_paths"] = inode_paths
files_json.append(file_info)
return files_json
| def build_info_files_json_v1(m, prefix, files, files_with_prefix):
log = utils.get_logger(__name__)
no_link_files = m.get_value('build/no_link')
files_json = []
files_inodes = get_inodes(files, prefix)
for fi in sorted(files):
prefix_placeholder, file_mode = has_prefix(fi, files_with_prefix)
path = os.path.join(prefix, fi)
short_path = get_short_path(m, fi)
if short_path:
short_path = short_path.replace('\\', '/').replace('\\\\', '/')
file_info = {
"_path": short_path,
"sha256": utils.sha256_checksum(path),
"path_type": path_type(path),
}
if file_info["path_type"] == PathType.hardlink:
file_info["size_in_bytes"] = os.stat(path).st_size
elif not isfile(path):
# this is a softlink that points to nowhere, so is zero bytes
file_info["size_in_bytes"] = 0
warnings.warn('file %s is a symlink with no target' % path, UserWarning)
else:
# softlink that points somewhere
file_info["size_in_bytes"] = os.stat(path).st_size
no_link = is_no_link(no_link_files, fi)
if no_link:
file_info["no_link"] = no_link
if prefix_placeholder and file_mode:
file_info["prefix_placeholder"] = prefix_placeholder
file_info["file_mode"] = file_mode
if file_info.get("path_type") == PathType.hardlink and CrossPlatformStLink.st_nlink(
path) > 1:
target_short_path_inode = get_inode(path)
inode_paths = [files[index] for index, ino in enumerate(files_inodes) if ino == target_short_path_inode]
file_info["inode_paths"] = inode_paths
files_json.append(file_info)
return files_json
|
39,622 | def process_set_as_subquery(
ir_set: irast.Set, stmt: pgast.SelectStmt, *,
ctx: context.CompilerContextLevel) -> SetRVars:
is_objtype_path = ir_set.path_id.is_objtype_path()
expr = ir_set.expr
assert isinstance(expr, irast.Stmt)
ir_source: Optional[irast.Set]
if ir_set.rptr is not None:
ir_source = ir_set.rptr.source
if not is_objtype_path:
source_is_visible = True
else:
# Non-scalar computable pointer. Check if path source is
# visible in the outer scope.
outer_fence = ctx.scope_tree.parent_branch
assert outer_fence is not None
source_is_visible = outer_fence.is_visible(ir_source.path_id)
if source_is_visible:
get_set_rvar(ir_source, ctx=ctx)
# Force a source rvar so that trivial computed pointers
# on erroneous objects (like a bad array deref) fail.
# (Most sensible computables will end up requiring the
# source rvar anyway.)
ensure_source_rvar(ir_source, ctx.rel, ctx=ctx)
else:
ir_source = None
source_is_visible = False
with ctx.new() as newctx:
inner_set = expr.result
outer_id = ir_set.path_id
inner_id = inner_set.path_id
semi_join = False
if ir_source is not None:
if ir_source.path_id != ctx.current_insert_path_id:
# This is a computable pointer. In order to ensure that
# the volatile functions in the pointer expression are called
# the necessary number of times, we must inject a
# "volatility reference" into function expressions.
# The volatility_ref is the identity of the pointer source.
# If the source is an insert that we are in the middle
# of doing, we don't have a volatility ref to add, so
# skip it based on the current_insert_path_id check.
path_id = ir_source.path_id
newctx.volatility_ref += (
lambda xctx: relctx.maybe_get_path_var(
stmt, path_id=path_id, aspect='identity',
ctx=xctx),)
if is_objtype_path and not source_is_visible:
# Non-scalar computable semi-join.
semi_join = True
# We need to compile the source and include it in,
# since we need to do the semi-join deduplication here
# on the outside, and not when the source is used in a
# path inside the computable.
# (See test_edgeql_scope_computables_09 for an example.)
with newctx.subrel() as _, _.newscope() as subctx:
get_set_rvar(ir_source, ctx=subctx)
subrvar = relctx.rvar_for_rel(subctx.rel, ctx=subctx)
# Force a source rvar. See above.
ensure_source_rvar(ir_source, subctx.rel, ctx=subctx)
relctx.include_rvar(
stmt, subrvar, ir_source.path_id, ctx=ctx)
# If we are looking at a materialized computable, running
# get_set_rvar on the source above may have made it show
# up. So try to lookup the rvar again, and if we find it,
# skip compiling the computable.
if ir_source and (new_rvar := _lookup_set_rvar(ir_set, ctx=ctx)):
if semi_join:
# We need to use DISTINCT, instead of doing an actual
# semi-join, unfortunately: we need to extract data
# out from stmt, which we can't do with a semi-join.
value_var = pathctx.get_rvar_path_var(
new_rvar, outer_id, aspect='value', env=ctx.env)
stmt.distinct_clause = (
pathctx.get_rvar_output_var_as_col_list(
subrvar, value_var, aspect='value', env=ctx.env))
return _new_subquery_stmt_set_rvar(ir_set, stmt, ctx=ctx)
if inner_id != outer_id:
pathctx.put_path_id_map(ctx.rel, outer_id, inner_id)
if (isinstance(ir_set.expr, irast.MutatingStmt)
and ir_set.expr in ctx.dml_stmts):
# The DML table-routing logic may result in the same
# DML subquery to be visited twice, such as in the case
# of a nested INSERT declaring link properties, so guard
# against generating a duplicate DML CTE.
with newctx.substmt() as subrelctx:
dml_cte = ctx.dml_stmts[ir_set.expr]
dml.wrap_dml_cte(ir_set.expr, dml_cte, ctx=subrelctx)
else:
dispatch.visit(ir_set.expr, ctx=newctx)
if semi_join:
set_rvar = relctx.new_root_rvar(ir_set, ctx=newctx)
tgt_ref = pathctx.get_rvar_path_identity_var(
set_rvar, ir_set.path_id, env=ctx.env)
pathctx.get_path_identity_output(
stmt, ir_set.path_id, env=ctx.env)
cond_expr = astutils.new_binop(tgt_ref, stmt, 'IN')
# Make a new stmt, join in the new root, and semi join on
# the original statement.
stmt = pgast.SelectStmt()
relctx.include_rvar(stmt, set_rvar, ir_set.path_id, ctx=ctx)
stmt.where_clause = astutils.extend_binop(
stmt.where_clause, cond_expr)
rvars = _new_subquery_stmt_set_rvar(ir_set, stmt, ctx=ctx)
# If the inner set also exposes a pointer path souce, we need to
# also expose a pointer path source. See tests like
# test_edgeql_select_linkprop_rebind_01
if pathctx.maybe_get_path_rvar(
stmt, inner_id.ptr_path(), aspect='source', env=ctx.env):
rvars.new.append(
SetRVar(
rvars.main.rvar,
outer_id.ptr_path(),
aspects=('source',),
)
)
return rvars
| def process_set_as_subquery(
ir_set: irast.Set, stmt: pgast.SelectStmt, *,
ctx: context.CompilerContextLevel) -> SetRVars:
is_objtype_path = ir_set.path_id.is_objtype_path()
expr = ir_set.expr
assert isinstance(expr, irast.Stmt)
ir_source: Optional[irast.Set]
if ir_set.rptr is not None:
ir_source = ir_set.rptr.source
if not is_objtype_path:
source_is_visible = True
else:
# Non-scalar computable pointer. Check if path source is
# visible in the outer scope.
outer_fence = ctx.scope_tree.parent_branch
assert outer_fence is not None
source_is_visible = outer_fence.is_visible(ir_source.path_id)
if source_is_visible:
get_set_rvar(ir_source, ctx=ctx)
# Force a source rvar so that trivial computed pointers
# on erroneous objects (like a bad array deref) fail.
# (Most sensible computables will end up requiring the
# source rvar anyway.)
ensure_source_rvar(ir_source, ctx.rel, ctx=ctx)
else:
ir_source = None
source_is_visible = False
with ctx.new() as newctx:
inner_set = expr.result
outer_id = ir_set.path_id
inner_id = inner_set.path_id
semi_join = False
if ir_source is not None:
if ir_source.path_id != ctx.current_insert_path_id:
# This is a computable pointer. In order to ensure that
# the volatile functions in the pointer expression are called
# the necessary number of times, we must inject a
# "volatility reference" into function expressions.
# The volatility_ref is the identity of the pointer source.
# If the source is an insert that we are in the middle
# of doing, we don't have a volatility ref to add, so
# skip it based on the current_insert_path_id check.
path_id = ir_source.path_id
newctx.volatility_ref += (
lambda xctx: relctx.maybe_get_path_var(
stmt, path_id=path_id, aspect='identity',
ctx=xctx),)
if is_objtype_path and not source_is_visible:
# Non-scalar computable semi-join.
semi_join = True
# We need to compile the source and include it in,
# since we need to do the semi-join deduplication here
# on the outside, and not when the source is used in a
# path inside the computable.
# (See test_edgeql_scope_computables_09 for an example.)
with newctx.subrel() as _, _.newscope() as subctx:
get_set_rvar(ir_source, ctx=subctx)
subrvar = relctx.rvar_for_rel(subctx.rel, ctx=subctx)
# Force a source rvar. See above.
ensure_source_rvar(ir_source, subctx.rel, ctx=subctx)
relctx.include_rvar(
stmt, subrvar, ir_source.path_id, ctx=ctx)
# If we are looking at a materialized computable, running
# get_set_rvar on the source above may have made it show
# up. So try to lookup the rvar again, and if we find it,
# skip compiling the computable.
if ir_source and (new_rvar := _lookup_set_rvar(ir_set, ctx=ctx)):
if semi_join:
# We need to use DISTINCT, instead of doing an actual
# semi-join, unfortunately: we need to extract data
# out from stmt, which we can't do with a semi-join.
value_var = pathctx.get_rvar_path_var(
new_rvar, outer_id, aspect='value', env=ctx.env)
stmt.distinct_clause = (
pathctx.get_rvar_output_var_as_col_list(
subrvar, value_var, aspect='value', env=ctx.env))
return _new_subquery_stmt_set_rvar(ir_set, stmt, ctx=ctx)
if inner_id != outer_id:
pathctx.put_path_id_map(ctx.rel, outer_id, inner_id)
if (isinstance(ir_set.expr, irast.MutatingStmt)
and ir_set.expr in ctx.dml_stmts):
# The DML table-routing logic may result in the same
# DML subquery to be visited twice, such as in the case
# of a nested INSERT declaring link properties, so guard
# against generating a duplicate DML CTE.
with newctx.substmt() as subrelctx:
dml_cte = ctx.dml_stmts[ir_set.expr]
dml.wrap_dml_cte(ir_set.expr, dml_cte, ctx=subrelctx)
else:
dispatch.visit(ir_set.expr, ctx=newctx)
if semi_join:
set_rvar = relctx.new_root_rvar(ir_set, ctx=newctx)
tgt_ref = pathctx.get_rvar_path_identity_var(
set_rvar, ir_set.path_id, env=ctx.env)
pathctx.get_path_identity_output(
stmt, ir_set.path_id, env=ctx.env)
cond_expr = astutils.new_binop(tgt_ref, stmt, 'IN')
# Make a new stmt, join in the new root, and semi join on
# the original statement.
stmt = pgast.SelectStmt()
relctx.include_rvar(stmt, set_rvar, ir_set.path_id, ctx=ctx)
stmt.where_clause = astutils.extend_binop(
stmt.where_clause, cond_expr)
rvars = _new_subquery_stmt_set_rvar(ir_set, stmt, ctx=ctx)
# If the inner set also exposes a pointer path source, we need to
# also expose a pointer path source. See tests like
# test_edgeql_select_linkprop_rebind_01
if pathctx.maybe_get_path_rvar(
stmt, inner_id.ptr_path(), aspect='source', env=ctx.env):
rvars.new.append(
SetRVar(
rvars.main.rvar,
outer_id.ptr_path(),
aspects=('source',),
)
)
return rvars
|
30,292 | def get_indicator_severity(indicator):
"""
Extracts and returns severity value from indicator's nested key.
In case the severity value was not found in indicator dictionary,
the severity value will be low.
"""
try:
severity = indicator['meta']['severity']
except KeyError:
severity = 'low'
finally:
return severity
| def get_indicator_severity(indicator):
"""
Extracts and returns severity value from indicator's nested key.
In case the severity value was not found in indicator dictionary,
the severity value will be low.
"""
try:
return indicator['meta'].get('severity', 'low')
except KeyError:
severity = 'low'
finally:
return severity
|
2,820 | def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ... .
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 distances between the row vectors of `X` and the row vectors
of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.eye(3, k=1)
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
| def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 distances between the row vectors of `X` and the row vectors
of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.eye(3, k=1)
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
|
42,865 | def bloch_messiah(S, tol=10):
r""" Performs the Bloch-Messiah decomposition of a symplectic matrix in terms of
two symplectic unitaries and squeezing transformation.
It automatically sorts the squeezers so that they respect the canonical symplectic form.
Note that it is assumed that the symplectic form is
..math:: \Omega = \begin{bmatrix}0&I\\-I&0\end{bmatrix}
where :math:`I` is the identity matrix and :math:`0` is the zero matrix.
For more info see:
https://math.stackexchange.com/questions/1886038/finding-euler-decomposition-of-a-symplectic-matrix
Args:
S (array): A symplectic matrix S
tol (int): the number of decimal places to use when determining if the matrix is symplectic
Returns:
tuple[array]: Returns the tuple ``(ut1, st1, vt1)``. ``ut1`` and ``vt1`` are symplectic unitaries,
and ``st1`` is diagonal and of the form :math:`= \text{diag}(s1,\dots,s_n, 1/s_1,\dots,1/s_n)`
such that :math:`S = ut1 st1 v1`
"""
(n, m) = S.shape
if n != m:
raise ValueError("The input matrix is not square")
if n%2 != 0:
raise ValueError("The input matrix must have an even number of rows/columns")
n = n//2
omega = sympmat(n)
if np.round(np.linalg.norm(np.transpose(S) @ omega @ S - omega), tol) != 0.0:
raise ValueError("The input matrix is not symplectic")
u, sigma = polar(S, side='left')
ss, uss = takagi(sigma)
## Apply a permutation matrix so that the squeezers appear in the order
## s_1,...,s_n, 1/s_1,...1/s_n
perm = np.array(list(range(0, n)) + list(reversed(range(n, 2*n))))
pmat = np.identity(2*n)[perm, :]
ut = uss @ pmat
## Apply a second permutation matrix to permute s
## (and their corresonding inverses) to get the canonical symplectic form
qomega = np.transpose(ut) @ (omega) @ ut
st = pmat @ np.diag(ss) @ pmat
# Identifying degenrate subspaces
result = []
for _k, g in groupby(np.diag(st)[:n]):
result.append(list(g))
stop_is = list(np.cumsum([len(res) for res in result]))
start_is = [0] + stop_is[:-1]
# Rotation matrices (not permutations) based on svd.
# See Appending B2 of Serafini's book for more details.
u_list, v_list = [], []
for start_i, stop_i in zip(start_is, stop_is):
x = qomega[start_i: stop_i, n + start_i: n + stop_i].real
u_svd, _s_svd, v_svd = np.linalg.svd(x)
u_list = u_list + [u_svd]
v_list = v_list + [v_svd.T]
pmat1 = block_diag(*(u_list + v_list))
st1 = pmat1.T @ pmat @ np.diag(ss) @ pmat @ pmat1
ut1 = uss @ pmat @ pmat1
v1 = np.transpose(ut1) @ u
return ut1, st1, v1
| def bloch_messiah(S, tol=10):
r""" Performs the Bloch-Messiah decomposition of a symplectic matrix in terms of
two symplectic unitaries and squeezing transformation.
It automatically sorts the squeezers so that they respect the canonical symplectic form.
Note that it is assumed that the symplectic form is
..math:: \Omega = \begin{bmatrix}0&I\\-I&0\end{bmatrix}
where :math:`I` is the identity matrix and :math:`0` is the zero matrix.
For more info see:
https://math.stackexchange.com/questions/1886038/finding-euler-decomposition-of-a-symplectic-matrix
Args:
S (array): A symplectic matrix S
tol (int): the number of decimal places to use when determining if the matrix is symplectic
Returns:
tuple[array]: Returns the tuple ``(ut1, st1, vt1)``. ``ut1`` and ``vt1`` are symplectic unitaries,
and ``st1`` is diagonal and of the form :math:`= \text{diag}(s1,\dots,s_n, 1/s_1,\dots,1/s_n)`
such that :math:`S = ut1 st1 v1`
"""
(n, m) = S.shape
if n != m:
raise ValueError("The input matrix is not square")
if n%2 != 0:
raise ValueError("The input matrix must have an even number of rows/columns")
n = n//2
omega = sympmat(n)
if np.round(np.linalg.norm(np.transpose(S) @ omega @ S - omega), tol) != 0.0:
raise ValueError("The input matrix is not symplectic")
u, sigma = polar(S, side='left')
ss, uss = takagi(sigma)
## Apply a permutation matrix so that the squeezers appear in the order
## s_1,...,s_n, 1/s_1,...1/s_n
perm = np.array(list(range(0, n)) + list(reversed(range(n, 2*n))))
pmat = np.identity(2*n)[perm, :]
ut = uss @ pmat
## Apply a second permutation matrix to permute s
## (and their corresonding inverses) to get the canonical symplectic form
qomega = np.transpose(ut) @ (omega) @ ut
st = pmat @ np.diag(ss) @ pmat
# Identifying degenrate subspaces
result = []
for _k, g in groupby(np.diag(st)[:n]):
result.append(list(g))
stop_is = list(np.cumsum([len(res) for res in result]))
start_is = [0] + stop_is[:-1]
# Rotation matrices (not permutations) based on svd.
# See Appendix B2 of Serafini's book for more details.
u_list, v_list = [], []
for start_i, stop_i in zip(start_is, stop_is):
x = qomega[start_i: stop_i, n + start_i: n + stop_i].real
u_svd, _s_svd, v_svd = np.linalg.svd(x)
u_list = u_list + [u_svd]
v_list = v_list + [v_svd.T]
pmat1 = block_diag(*(u_list + v_list))
st1 = pmat1.T @ pmat @ np.diag(ss) @ pmat @ pmat1
ut1 = uss @ pmat @ pmat1
v1 = np.transpose(ut1) @ u
return ut1, st1, v1
|
2,604 | def setup_package():
# TODO: Require Python 3.8 for PyPy when PyPy3.8 is ready
# https://github.com/conda-forge/conda-forge-pinning-feedstock/issues/2089
if platform.python_implementation() == "PyPy":
python_requires = ">=3.7"
required_python_version = (3, 7)
else:
python_requires = ">=3.8"
required_python_version = (3, 8)
metadata = dict(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
version=VERSION,
long_description=LONG_DESCRIPTION,
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Development Status :: 5 - Production/Stable",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
cmdclass=cmdclass,
python_requires=python_requires,
install_requires=min_deps.tag_to_packages["install"],
package_data={"": ["*.pxd"]},
**extra_setuptools_args,
)
commands = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
if all(
command in ("egg_info", "dist_info", "clean", "check") for command in commands
):
# These actions are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
# These commands use setup from setuptools
from setuptools import setup
metadata["version"] = VERSION
else:
if sys.version_info < required_python_version:
required_version = "%d.%d" % required_python_version
raise RuntimeError(
"Scikit-learn requires Python %s or later. The current"
" Python version is %s installed in %s."
% (required_version, platform.python_version(), sys.executable)
)
check_package_status("numpy", min_deps.NUMPY_MIN_VERSION)
check_package_status("scipy", min_deps.SCIPY_MIN_VERSION)
# These commands require the setup from numpy.distutils because they
# may use numpy.distutils compiler classes.
from numpy.distutils.core import setup
# Monkeypatchs CCompiler.spawn to prevent random wheel build errors on Windows
# The build errors on Windows was because msvccompiler spawn was not threadsafe
# https://github.com/pypa/distutils/issues/5
# https://github.com/scikit-learn/scikit-learn/issues/22310
# https://github.com/numpy/numpy/pull/20640
from numpy.distutils.ccompiler import replace_method
from distutils.ccompiler import CCompiler
from sklearn.externals._numpy_complier_patch import CCompiler_spawn # noqa
replace_method(CCompiler, "spawn", CCompiler_spawn)
metadata["configuration"] = configuration
setup(**metadata)
| def setup_package():
# TODO: Require Python 3.8 for PyPy when PyPy3.8 is ready
# https://github.com/conda-forge/conda-forge-pinning-feedstock/issues/2089
if platform.python_implementation() == "PyPy":
python_requires = ">=3.7"
required_python_version = (3, 7)
else:
python_requires = ">=3.8"
required_python_version = (3, 8)
metadata = dict(
name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
version=VERSION,
long_description=LONG_DESCRIPTION,
classifiers=[
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"License :: OSI Approved",
"Programming Language :: C",
"Programming Language :: Python",
"Topic :: Software Development",
"Topic :: Scientific/Engineering",
"Development Status :: 5 - Production/Stable",
"Operating System :: Microsoft :: Windows",
"Operating System :: POSIX",
"Operating System :: Unix",
"Operating System :: MacOS",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
],
cmdclass=cmdclass,
python_requires=python_requires,
install_requires=min_deps.tag_to_packages["install"],
package_data={"": ["*.pxd"]},
**extra_setuptools_args,
)
commands = [arg for arg in sys.argv[1:] if not arg.startswith("-")]
if all(
command in ("egg_info", "dist_info", "clean", "check") for command in commands
):
# These actions are required to succeed without Numpy for example when
# pip is used to install Scikit-learn when Numpy is not yet present in
# the system.
# These commands use setup from setuptools
from setuptools import setup
metadata["version"] = VERSION
else:
if sys.version_info < required_python_version:
required_version = "%d.%d" % required_python_version
raise RuntimeError(
"Scikit-learn requires Python %s or later. The current"
" Python version is %s installed in %s."
% (required_version, platform.python_version(), sys.executable)
)
check_package_status("numpy", min_deps.NUMPY_MIN_VERSION)
check_package_status("scipy", min_deps.SCIPY_MIN_VERSION)
# These commands require the setup from numpy.distutils because they
# may use numpy.distutils compiler classes.
from numpy.distutils.core import setup
# Monkeypatchs CCompiler.spawn to prevent random wheel build errors on Windows
# The build errors on Windows was because msvccompiler spawn was not threadsafe
# https://github.com/pypa/distutils/issues/5
# https://github.com/scikit-learn/scikit-learn/issues/22310
# https://github.com/numpy/numpy/pull/20640
from numpy.distutils.ccompiler import replace_method
from distutils.ccompiler import CCompiler
from sklearn.externals._numpy_compiler_patch import CCompiler_spawn # noqa
replace_method(CCompiler, "spawn", CCompiler_spawn)
metadata["configuration"] = configuration
setup(**metadata)
|
5,648 | def test_ncx2_gh12731():
# test that gh-12731 is resolved; previously these were all 0.5
nc = 10**np.arange(5,10)
assert_allclose(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
| def test_ncx2_gh12731():
# test that gh-12731 is resolved; previously these were all 0.5
nc = 10**np.arange(5,10)
assert_equal(stats.ncx2.cdf(1e4, df=1, nc=nc), 0)
|
46,141 | def modify_document(self, doc):
from bokeh.io.doc import set_curdoc as bk_set_curdoc
from ..config import config
if config.autoreload:
path = self._runner.path
argv = self._runner._argv
handler = type(self)(filename=path, argv=argv)
self._runner = handler._runner
module = self._runner.new_module()
# If no module was returned it means the code runner has some permanent
# unfixable problem, e.g. the configured source code has a syntax error
if module is None:
return
# One reason modules are stored is to prevent the module
# from being gc'd before the document is. A symptom of a
# gc'd module is that its globals become None. Additionally
# stored modules are used to provide correct paths to
# custom models resolver.
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
bk_set_curdoc(doc)
old_io = self._monkeypatch_io()
if config.autoreload:
set_curdoc(doc)
state.onload(autoreload_watcher)
try:
def post_check():
newdoc = curdoc()
# script is supposed to edit the doc not replace it
if newdoc is not doc:
raise RuntimeError("%s at '%s' replaced the output document" % (self._origin, self._runner.path))
self._runner.run(module, post_check)
finally:
self._unmonkeypatch_io(old_io)
set_curdoc(old_doc)
| def modify_document(self, doc):
from bokeh.io.doc import set_curdoc as bk_set_curdoc
from ..config import config
if config.autoreload:
path = self._runner.path
argv = self._runner._argv
handler = type(self)(filename=path, argv=argv)
self._runner = handler._runner
module = self._runner.new_module()
# If no module was returned it means the code runner has some permanent
# unfixable problem, e.g. the configured source code has a syntax error
if module is None:
return
# One reason modules are stored is to prevent the module
# from being gc'd before the document is. A symptom of a
# gc'd module is that its globals become None. Additionally
# stored modules are used to provide correct paths to
# custom models resolver.
sys.modules[module.__name__] = module
doc._modules.append(module)
old_doc = curdoc()
bk_set_curdoc(doc)
old_io = self._monkeypatch_io()
if config.autoreload:
set_curdoc(doc)
state.onload(autoreload_watcher)
try:
def post_check():
newdoc = curdoc()
# script is supposed to edit the doc not replace it
if newdoc is not doc:
raise RuntimeError("%s at '%s' replaced the output document" % (self._origin, self._runner.path))
self._runner.run(module, post_check)
finally:
self._unmonkeypatch_io(old_io)
bk_set_curdoc(old_doc)
|
1,699 | def _set_check_estimator_ids(obj):
"""Create pytest ids for checks.
When `obj` is an estimator, this returns the pprint version of the
estimator (with `print_changed_only=True`). When `obj` is a function, the
name of the function is returned with its keyworld arguments.
`_set_check_estimator_ids` is designed to be used as the `id` in
`pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`
is yielding estimators and checks.
Parameters
----------
obj : estimator or function
Items generated by `check_estimator`
Returns
-------
id : string or None
See also
--------
check_estimator
"""
if callable(obj):
if not isinstance(obj, partial):
return obj.__name__
if not obj.keywords:
return obj.func.__name__
kwstring = ",".join(["{}={}".format(k, v)
for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
if hasattr(obj, "get_params"):
with config_context(print_changed_only=True):
return re.sub(r"\s", "", str(obj))
| def _set_check_estimator_ids(obj):
"""Create pytest ids for checks.
When `obj` is an estimator, this returns the pprint version of the
estimator (with `print_changed_only=True`). When `obj` is a function, the
name of the function is returned with its keyworld arguments.
`_set_check_estimator_ids` is designed to be used as the `id` in
`pytest.mark.parametrize` where `check_estimator(..., generate_only=True)`
is yielding estimators and checks.
Parameters
----------
obj : estimator or function
Items generated by `check_estimator`
Returns
-------
id : string or None
See also
--------
check_estimator
"""
if callable(obj):
if not isinstance(obj, partial):
return obj.__name__
if not obj.keywords:
return obj.func.__name__
kwstring = ", ".join(["{}={}".format(k, v)
for k, v in obj.keywords.items()])
return "{}({})".format(obj.func.__name__, kwstring)
if hasattr(obj, "get_params"):
with config_context(print_changed_only=True):
return re.sub(r"\s", "", str(obj))
|
35,292 | def tensor_train_OI(data_tensor, rank, n_iter = 1, trajectory = False, return_errors = True, **context):
""" Perform tensor-train orthogonal iteration (TTOI) for tensor train decomposition
Reference paper: Zhou Y, Zhang AR, Zheng L, Wang Y. "Optimal high-order tensor svd via tensor-train orthogonal iteration."
Parameters
----------
data_tensor: tl.tensor
observed tensor data
rank : tuple
rank of the TT decomposition
must verify rank[0] == rank[-1] == 1 (boundary conditions)
and len(rank) == len(tl.shape(data_tensor))+1
n_iter : int
half the number of iterations
trajectory : bool, optional, default is False
if True, the output of each iteration of TTOI is returned: 2*n_iter outputs
otherwise, only the output of the last iteration is returned
return_errors : bool, optional, default is True
if True, the approximation/reconstruction error of each iteration of TTOI is returned: 2*n_iter outputs
Returns
-------
factors : list of n_iter tensors or one tensor
* n_iter tensors (if `trajectory` is True) : each list contains the output of each iteration, one full tensor and list of tensor factors
* one tensor (otherwise): output of the last iteration, one full tensor and list of tensor factors
full_tensor : list of n_iter tensors or one tensor
* n_iter tensors (if `trajectory` is True) : each list contains the output of each iteration, one full tensor and list of tensor factoros
* one tensor (otherwise): output of the last iteration, one full tensor and list of tensor factors
"""
context = tl.context(data_tensor)
shape = tl.shape(data_tensor)
n_dim = len(shape)
rank = validate_tt_rank(shape, rank)
# Make sure it's not a tuple but a list
rank = list(rank)
# Add two one-dimensional mode to data_tensor
data_tensor_extended = tl.reshape(data_tensor,(1, ) + shape + (1, ))
if trajectory:
factors = list()
full_tensor = list()
if return_errors:
error_list = list()
# perform TTOI for n_iter iterations
for n in range(n_iter):
# first perform forward update
# U_arr will be a list including estimated left singular spaces at the current iteration
U_arr = list()
# initialize R_tilde_arr (sequential unfolding of data_tensor multiplied by U_arr sequentially on the left, useful for backward update to obtain V_arr)
R_tilde_arr = list()
# estimate the first left singular spaces
# Here, R_tmp is the first sequential unfolding compressed on the right by previous updated V_arr (if exists)
R_tmp_l = data_tensor_extended
if n == 0:
R_tmp = R_tmp_l
else:
R_tmp = sequential_prod(R_tmp_l,V_arr,"right")
U_tmp = tl.partial_svd(tl.reshape(R_tmp,(shape[0],-1)),rank[1])[0]
U_arr.append(tl.reshape(U_tmp,(rank[0],shape[0],rank[1])))
# estimate the 2nd to (d-1)th left singular spaces
for k in range(n_dim-2):
# compress the (k+2)th sequential unfolding of data_tensor from the left
R_tmp_l = sequential_prod(R_tmp_l,[U_arr[k]],"left")
# R_tmp_l will be useful for backward update
R_tilde_arr.append(R_tmp_l)
# compress the (k+2)th sequential unfolding of data_tensor from the right (if n>0)
if n == 0:
R_tmp = R_tmp_l
else:
R_tmp = sequential_prod(R_tmp_l,V_arr[0:(n_dim-k-2)],"right")
U_tmp = tl.partial_svd(tl.reshape(R_tmp,(rank[k+1]*shape[k+1],-1)),rank[k+2])[0]
U_arr.append(tl.reshape(U_tmp,(rank[k+1],shape[k+1],rank[k+2])))
# forward update is done; output the final residual
R_tilde_arr.append(sequential_prod(R_tmp_l,[U_arr[n_dim-2]],"left"))
if trajectory or return_errors:
factors_list_tmp = list()
for k in range(n_dim-1):
factors_list_tmp.append(tl.tensor(U_arr[k],**context))
factors_list_tmp.append(tl.tensor(R_tilde_arr[n_dim-2],**context))
full_tensor_tmp = tl.tensor(tt_to_tensor(factors_list_tmp),**context)
if return_errors:
error_list.append(tl.norm(full_tensor_tmp-data_tensor,2))
if trajectory:
factors.append(factors_list_tmp)
full_tensor.append(full_tensor_tmp)
# perform backward update
# initialize V_arr: V_arr will be a list of estimated right singular spaces at the current or previous iteration
V_arr = list()
V_tmp = tl.transpose(tl.partial_svd(tl.reshape(R_tilde_arr[n_dim-2],(rank[n_dim-1],shape[n_dim-1])),rank[n_dim-1])[2])
V_arr.append(tl.reshape(V_tmp,(rank[n_dim],shape[n_dim-1],rank[n_dim-1])))
# estimate the 2nd to (d-1)th right singular spaces
for k in range(n_dim-2):
# compress R_tilde_arr from the right
R_tmp_r = sequential_prod(R_tilde_arr[n_dim-k-3],V_arr[0:(k+1)],"right")
V_tmp = tl.transpose(tl.partial_svd(tl.reshape(R_tmp_r,(rank[n_dim-k-2],shape[n_dim-k-2]*rank[n_dim-k-1])),rank[n_dim-k-2])[2])
V_arr.append(tl.reshape(V_tmp,(rank[n_dim-k-1],shape[n_dim-k-2],rank[n_dim-k-2])))
Residual_right = sequential_prod(data_tensor_extended,V_arr,"right")
if trajectory or return_errors or n==n_iter-1:
factors_list_tmp = list()
factors_list_tmp.append(tl.tensor(Residual_right,**context))
for k in range(n_dim-1):
factors_list_tmp.append(tl.tensor(tl.transpose(V_arr[n_dim-k-2]),**context))
full_tensor_tmp = tl.tensor(tt_to_tensor(factors_list_tmp),**context)
if return_errors:
error_list.append(tl.norm(full_tensor_tmp-data_tensor,2))
if trajectory:
factors.append(factors_list_tmp)
full_tensor.append(full_tensor_tmp)
if n == n_iter-1:
factors = factors_list_tmp
full_tensor = full_tensor_tmp
# return final results
if return_errors:
return factors, full_tensor, error_list
else:
return factors, full_tensor
| def tensor_train_OI(data_tensor, rank, n_iter = 1, trajectory = False, return_errors = True, **context):
""" Perform tensor-train orthogonal iteration (TTOI) for tensor train decomposition
Reference paper: Zhou Y, Zhang AR, Zheng L, Wang Y. "Optimal high-order tensor svd via tensor-train orthogonal iteration."
Parameters
----------
data_tensor: tl.tensor
observed tensor data
rank : tuple
rank of the TT decomposition
must verify rank[0] == rank[-1] == 1 (boundary conditions)
and len(rank) == len(tl.shape(data_tensor))+1
n_iter : int
half the number of iterations
trajectory : bool, optional, default is False
if True, the output of each iteration of TTOI is returned: 2*n_iter outputs
otherwise, only the output of the last iteration is returned
return_errors : bool, optional, default is True
if True, the approximation/reconstruction error of each iteration of TTOI is returned: 2*n_iter outputs
Returns
-------
factors : list of n_iter tensors or one tensor
* n_iter tensors (if `trajectory` is True) : each list contains the output of each iteration, one full tensor and list of tensor factors
* one tensor (otherwise): output of the last iteration, one full tensor and list of tensor factors
full_tensor : list of n_iter tensors or one tensor
* n_iter tensors (if `trajectory` is True) : each list contains the output of each iteration, one full tensor and list of tensor factoros
* one tensor (otherwise): output of the last iteration, one full tensor and list of tensor factors
"""
context = tl.context(data_tensor)
shape = tl.shape(data_tensor)
n_dim = len(shape)
rank = validate_tt_rank(shape, rank)
# Make sure it's not a tuple but a list
rank = list(rank)
# Add two one-dimensional mode to data_tensor
data_tensor_extended = tl.reshape(data_tensor,(1, ) + shape + (1, ))
if trajectory:
factors = list()
full_tensor = list()
if return_errors:
error_list = list()
# perform TTOI for n_iter iterations
for n in range(n_iter):
# first perform forward update
# U_arr will be a list including estimated left singular spaces at the current iteration
U_arr = list()
# initialize R_tilde_arr (sequential unfolding of data_tensor multiplied by U_arr sequentially on the left, useful for backward update to obtain V_arr)
R_tilde_arr = list()
# estimate the first left singular spaces
# Here, R_tmp is the first sequential unfolding compressed on the right by previous updated V_arr (if exists)
R_tmp_l = data_tensor_extended
if n == 0:
R_tmp = R_tmp_l
else:
R_tmp = sequential_prod(R_tmp_l,V_arr,"right")
U_tmp = tl.partial_svd(tl.reshape(R_tmp,(shape[0],-1)),rank[1])[0]
U_arr.append(tl.reshape(U_tmp,(rank[0],shape[0],rank[1])))
# estimate the 2nd to (d-1)th left singular spaces
for mode in range(n_dim-2):
# compress the (k+2)th sequential unfolding of data_tensor from the left
R_tmp_l = sequential_prod(R_tmp_l,[U_arr[k]],"left")
# R_tmp_l will be useful for backward update
R_tilde_arr.append(R_tmp_l)
# compress the (k+2)th sequential unfolding of data_tensor from the right (if n>0)
if n == 0:
R_tmp = R_tmp_l
else:
R_tmp = sequential_prod(R_tmp_l,V_arr[0:(n_dim-k-2)],"right")
U_tmp = tl.partial_svd(tl.reshape(R_tmp,(rank[k+1]*shape[k+1],-1)),rank[k+2])[0]
U_arr.append(tl.reshape(U_tmp,(rank[k+1],shape[k+1],rank[k+2])))
# forward update is done; output the final residual
R_tilde_arr.append(sequential_prod(R_tmp_l,[U_arr[n_dim-2]],"left"))
if trajectory or return_errors:
factors_list_tmp = list()
for k in range(n_dim-1):
factors_list_tmp.append(tl.tensor(U_arr[k],**context))
factors_list_tmp.append(tl.tensor(R_tilde_arr[n_dim-2],**context))
full_tensor_tmp = tl.tensor(tt_to_tensor(factors_list_tmp),**context)
if return_errors:
error_list.append(tl.norm(full_tensor_tmp-data_tensor,2))
if trajectory:
factors.append(factors_list_tmp)
full_tensor.append(full_tensor_tmp)
# perform backward update
# initialize V_arr: V_arr will be a list of estimated right singular spaces at the current or previous iteration
V_arr = list()
V_tmp = tl.transpose(tl.partial_svd(tl.reshape(R_tilde_arr[n_dim-2],(rank[n_dim-1],shape[n_dim-1])),rank[n_dim-1])[2])
V_arr.append(tl.reshape(V_tmp,(rank[n_dim],shape[n_dim-1],rank[n_dim-1])))
# estimate the 2nd to (d-1)th right singular spaces
for k in range(n_dim-2):
# compress R_tilde_arr from the right
R_tmp_r = sequential_prod(R_tilde_arr[n_dim-k-3],V_arr[0:(k+1)],"right")
V_tmp = tl.transpose(tl.partial_svd(tl.reshape(R_tmp_r,(rank[n_dim-k-2],shape[n_dim-k-2]*rank[n_dim-k-1])),rank[n_dim-k-2])[2])
V_arr.append(tl.reshape(V_tmp,(rank[n_dim-k-1],shape[n_dim-k-2],rank[n_dim-k-2])))
Residual_right = sequential_prod(data_tensor_extended,V_arr,"right")
if trajectory or return_errors or n==n_iter-1:
factors_list_tmp = list()
factors_list_tmp.append(tl.tensor(Residual_right,**context))
for k in range(n_dim-1):
factors_list_tmp.append(tl.tensor(tl.transpose(V_arr[n_dim-k-2]),**context))
full_tensor_tmp = tl.tensor(tt_to_tensor(factors_list_tmp),**context)
if return_errors:
error_list.append(tl.norm(full_tensor_tmp-data_tensor,2))
if trajectory:
factors.append(factors_list_tmp)
full_tensor.append(full_tensor_tmp)
if n == n_iter-1:
factors = factors_list_tmp
full_tensor = full_tensor_tmp
# return final results
if return_errors:
return factors, full_tensor, error_list
else:
return factors, full_tensor
|
43,673 | def _qubit_operator_to_terms(qubit_operator, wires=None):
r"""Converts OpenFermion ``QubitOperator`` to a 2-tuple of coefficients and
PennyLane Pauli observables.
**Example usage:**
>>> q_op = 0.1*QubitOperator('X0') + 0.2*QubitOperator('Y0 Z2')
>>> q_op
0.1 [X0] +
0.2 [Y0 Z2]
>>> _qubit_operator_to_terms(q_op, wires=['w0','w1','w2','extra_wire'])
(array([0.1, 0.2]), [Tensor(PauliX(wires=['w0'])), Tensor(PauliY(wires=['w0']), PauliZ(wires=['w2']))])
Args:
qubit_operator (QubitOperator): Fermionic-to-qubit transformed operator in terms of
Pauli matrices
wires (Wires, list, tuple, dict): Custom wire mapping for connecting to Pennylane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identiy map. Defaults to None.
Returns:
tuple[array[float], Iterable[pennylane.operation.Observable]]: coefficients and their
corresponding PennyLane observables in the Pauli basis
"""
n_wires = (
1 + max([max([i for i, _ in t]) if t else 1 for t in qubit_operator.terms])
if qubit_operator.terms
else 1
)
wires = _proc_wires(wires, n_wires=n_wires)
if not qubit_operator.terms: # added since can't unpack empty zip to (coeffs, ops) below
return np.array([0.0]), [qml.operation.Tensor(qml.Identity(wires[0]))]
xyz2pauli = {"X": qml.PauliX, "Y": qml.PauliY, "Z": qml.PauliZ}
coeffs, ops = zip(
*[
(
coef,
qml.operation.Tensor(*[xyz2pauli[q[1]](wires=wires[q[0]]) for q in term])
if term
else qml.operation.Tensor(qml.Identity(wires[0]))
# example term: ((0,'X'), (2,'Z'), (3,'Y'))
)
for term, coef in qubit_operator.terms.items()
]
)
return np.real(np.array(coeffs)), list(ops)
| def _qubit_operator_to_terms(qubit_operator, wires=None):
r"""Converts OpenFermion ``QubitOperator`` to a 2-tuple of coefficients and
PennyLane Pauli observables.
**Example**
>>> q_op = 0.1*QubitOperator('X0') + 0.2*QubitOperator('Y0 Z2')
>>> q_op
0.1 [X0] +
0.2 [Y0 Z2]
>>> _qubit_operator_to_terms(q_op, wires=['w0','w1','w2','extra_wire'])
(array([0.1, 0.2]), [Tensor(PauliX(wires=['w0'])), Tensor(PauliY(wires=['w0']), PauliZ(wires=['w2']))])
Args:
qubit_operator (QubitOperator): Fermionic-to-qubit transformed operator in terms of
Pauli matrices
wires (Wires, list, tuple, dict): Custom wire mapping for connecting to Pennylane ansatz.
For types Wires/list/tuple, each item in the iterable represents a wire label
corresponding to the qubit number equal to its index.
For type dict, only int-keyed dict (for qubit-to-wire conversion) is accepted.
If None, will use identiy map. Defaults to None.
Returns:
tuple[array[float], Iterable[pennylane.operation.Observable]]: coefficients and their
corresponding PennyLane observables in the Pauli basis
"""
n_wires = (
1 + max([max([i for i, _ in t]) if t else 1 for t in qubit_operator.terms])
if qubit_operator.terms
else 1
)
wires = _proc_wires(wires, n_wires=n_wires)
if not qubit_operator.terms: # added since can't unpack empty zip to (coeffs, ops) below
return np.array([0.0]), [qml.operation.Tensor(qml.Identity(wires[0]))]
xyz2pauli = {"X": qml.PauliX, "Y": qml.PauliY, "Z": qml.PauliZ}
coeffs, ops = zip(
*[
(
coef,
qml.operation.Tensor(*[xyz2pauli[q[1]](wires=wires[q[0]]) for q in term])
if term
else qml.operation.Tensor(qml.Identity(wires[0]))
# example term: ((0,'X'), (2,'Z'), (3,'Y'))
)
for term, coef in qubit_operator.terms.items()
]
)
return np.real(np.array(coeffs)), list(ops)
|
2,607 | def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric returns a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked. Negative values in y_true may result in an output
that is not between 0 and 1. These negative values are deprecated, and
may cause an error in the future.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If `None`, use all
outputs.
sample_weight : ndarray of shape (n_samples,), default=None
Sample weights. If `None`, all samples are given the same weight.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : float in [0., 1.]
The averaged NDCG scores for all samples.
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
References
----------
`Wikipedia entry for Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_
Jarvelin, K., & Kekalainen, J. (2002).
Cumulated gain-based evaluation of IR techniques. ACM Transactions on
Information Systems (TOIS), 20(4), 422-446.
Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).
A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th
Annual Conference on Learning Theory (COLT 2013)
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import ndcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
>>> # we predict some scores (relevance) for the answers
>>> scores = np.asarray([[.1, .2, .3, 4, 70]])
>>> ndcg_score(true_relevance, scores)
0.69...
>>> scores = np.asarray([[.05, 1.1, 1., .5, .0]])
>>> ndcg_score(true_relevance, scores)
0.49...
>>> # we can set k to truncate the sum; only top k answers contribute.
>>> ndcg_score(true_relevance, scores, k=4)
0.35...
>>> # the normalization takes k into account so a perfect answer
>>> # would still get 1.0
>>> ndcg_score(true_relevance, true_relevance, k=4)
1.0
>>> # now we have some ties in our prediction
>>> scores = np.asarray([[1, 0, 0, 0, 1]])
>>> # by default ties are averaged, so here we get the average (normalized)
>>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75
>>> ndcg_score(true_relevance, scores, k=1)
0.75
>>> # we can choose to ignore ties for faster results, but only
>>> # if we know there aren't ties in our scores, otherwise we get
>>> # wrong results:
>>> ndcg_score(true_relevance,
... scores, k=1, ignore_ties=True)
0.5
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
_check_dcg_target_type(y_true)
gain = _ndcg_sample_scores(y_true, y_score, k=k, ignore_ties=ignore_ties)
if (isinstance(y_true, np.ndarray)):
if (y_true.min() < 0):
warnings.warn(
"ndcg_score should not use negative y_true values",
DeprecationWarning,
)
else:
for value in y_true:
if (value < 0):
warnings.warn(
"ndcg_score should not use negative y_true values",
DeprecationWarning,
)
return np.average(gain, weights=sample_weight)
| def ndcg_score(y_true, y_score, *, k=None, sample_weight=None, ignore_ties=False):
"""Compute Normalized Discounted Cumulative Gain.
Sum the true scores ranked in the order induced by the predicted scores,
after applying a logarithmic discount. Then divide by the best possible
score (Ideal DCG, obtained for a perfect ranking) to obtain a score between
0 and 1.
This ranking metric returns a high value if true labels are ranked high by
``y_score``.
Parameters
----------
y_true : ndarray of shape (n_samples, n_labels)
True targets of multilabel classification, or true scores of entities
to be ranked. Negative values in `y_true` may result in an output
that is not between 0 and 1. These negative values are deprecated, and
may cause an error in the future.
y_score : ndarray of shape (n_samples, n_labels)
Target scores, can either be probability estimates, confidence values,
or non-thresholded measure of decisions (as returned by
"decision_function" on some classifiers).
k : int, default=None
Only consider the highest k scores in the ranking. If `None`, use all
outputs.
sample_weight : ndarray of shape (n_samples,), default=None
Sample weights. If `None`, all samples are given the same weight.
ignore_ties : bool, default=False
Assume that there are no ties in y_score (which is likely to be the
case if y_score is continuous) for efficiency gains.
Returns
-------
normalized_discounted_cumulative_gain : float in [0., 1.]
The averaged NDCG scores for all samples.
See Also
--------
dcg_score : Discounted Cumulative Gain (not normalized).
References
----------
`Wikipedia entry for Discounted Cumulative Gain
<https://en.wikipedia.org/wiki/Discounted_cumulative_gain>`_
Jarvelin, K., & Kekalainen, J. (2002).
Cumulated gain-based evaluation of IR techniques. ACM Transactions on
Information Systems (TOIS), 20(4), 422-446.
Wang, Y., Wang, L., Li, Y., He, D., Chen, W., & Liu, T. Y. (2013, May).
A theoretical analysis of NDCG ranking measures. In Proceedings of the 26th
Annual Conference on Learning Theory (COLT 2013)
McSherry, F., & Najork, M. (2008, March). Computing information retrieval
performance measures efficiently in the presence of tied scores. In
European conference on information retrieval (pp. 414-421). Springer,
Berlin, Heidelberg.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import ndcg_score
>>> # we have groud-truth relevance of some answers to a query:
>>> true_relevance = np.asarray([[10, 0, 0, 1, 5]])
>>> # we predict some scores (relevance) for the answers
>>> scores = np.asarray([[.1, .2, .3, 4, 70]])
>>> ndcg_score(true_relevance, scores)
0.69...
>>> scores = np.asarray([[.05, 1.1, 1., .5, .0]])
>>> ndcg_score(true_relevance, scores)
0.49...
>>> # we can set k to truncate the sum; only top k answers contribute.
>>> ndcg_score(true_relevance, scores, k=4)
0.35...
>>> # the normalization takes k into account so a perfect answer
>>> # would still get 1.0
>>> ndcg_score(true_relevance, true_relevance, k=4)
1.0
>>> # now we have some ties in our prediction
>>> scores = np.asarray([[1, 0, 0, 0, 1]])
>>> # by default ties are averaged, so here we get the average (normalized)
>>> # true relevance of our top predictions: (10 / 10 + 5 / 10) / 2 = .75
>>> ndcg_score(true_relevance, scores, k=1)
0.75
>>> # we can choose to ignore ties for faster results, but only
>>> # if we know there aren't ties in our scores, otherwise we get
>>> # wrong results:
>>> ndcg_score(true_relevance,
... scores, k=1, ignore_ties=True)
0.5
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
_check_dcg_target_type(y_true)
gain = _ndcg_sample_scores(y_true, y_score, k=k, ignore_ties=ignore_ties)
if (isinstance(y_true, np.ndarray)):
if (y_true.min() < 0):
warnings.warn(
"ndcg_score should not use negative y_true values",
DeprecationWarning,
)
else:
for value in y_true:
if (value < 0):
warnings.warn(
"ndcg_score should not use negative y_true values",
DeprecationWarning,
)
return np.average(gain, weights=sample_weight)
|
13,593 | def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate optimal Basis for the Image of the Inputmatrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
| def rand_QB(A, target_rank=None, distribution='normal', oversampling=0, powerIterations=0):
"""
randomisierte QB-Zerlegung
See Algorithm 3.1 in [EMKB19]_.
Parameters
----------
A :
The |VectorArray| for which the randomized QB Decomposition is to be computed.
target_rank : int
The desired rank for the decomposition. If None rank = len(A).
distribution : str
Distribution used for the random projectionmatrix Omega. (`'normal'` or `'uniform'`)
oversampling : int
Oversamplingparameter. Number of extra columns of the projectionmatrix.
powerIterations : int
Number of power Iterations.
Returns
-------
Q :
|VectorArray| containig an approximate basis for the image of the input matrix A.
len(Q) = target_rank
B :
Numpy Array. Projection of the Input Matrix into the lower dimensional subspace.
"""
assert isinstance(A, VectorArray)
assert target_rank is None or target_rank <= len(A)
assert distribution in ('normal', 'uniform')
if A.dim == 0 or len(A) == 0:
return A.space.zeros(), np.zeros((target_rank, len(A)))
rank = len(A) if target_rank is None else target_rank + oversampling
target_rank = len(A) if target_rank is None else target_rank
Omega = np.random.normal(0, 1, (rank, len(A))) if distribution == 'normal' else np.random.rand(rank, len(A))
Y = A.lincomb(Omega)[:target_rank]
# Power Iterations
if(powerIterations > 0):
for i in range(powerIterations):
Q = gram_schmidt(Y)[:target_rank]
Z, _ = spla.qr(A.inner(Q))
Y = A.lincomb(Z)[:target_rank]
Q = gram_schmidt(Y)[:target_rank]
B = Q.inner(A)
return Q, B
|
12,159 | def pack(obj, compress=True):
if bypass_serialization is True:
assert(any([obj[:len(pattern)] == pattern
for pattern in decode_lookup]))
return obj
blob = b"mYm\0"
blob += pack_obj(obj)
if compress:
compressed = b'ZL123\0' + np.uint64(len(blob)).tostring() + zlib.compress(blob)
if len(compressed) < len(blob):
blob = compressed
return blob
| def pack(obj, compress=True):
if bypass_serialization:
assert(any([obj[:len(pattern)] == pattern
for pattern in decode_lookup]))
return obj
blob = b"mYm\0"
blob += pack_obj(obj)
if compress:
compressed = b'ZL123\0' + np.uint64(len(blob)).tostring() + zlib.compress(blob)
if len(compressed) < len(blob):
blob = compressed
return blob
|
43,820 | def apply_controlled_Q(fn, wires, target_wire, control_wire, work_wires):
r"""Provides the circuit to apply a controlled version of the :math:`\mathcal{Q}` unitary
defined in `this <https://arxiv.org/abs/1805.00109>`__ paper.
Given a callable ``fn`` input corresponding to the :math:`\mathcal{F}` unitary in the above
paper, this function transforms the circuit into a controlled-version of the :math:`\mathcal{Q}`
unitary which forms part of the quantum Monte Carlo algorithm. In this algorithm, one of the
wires acted upon by :math:`\mathcal{F}`, specified by ``target_wire``, is used to embed a
Monte Carlo estimation problem. The :math:`\mathcal{Q}` unitary is then designed to encode the
target expectation value as a phase in one of its eigenvalues.
This function transforms to a controlled version of :math:`\mathcal{Q}` that is compatible with
quantum phase estimation (see :class:`~.QuantumPhaseEstimation` for more details).
Args:
fn (Callable): a quantum function that applies quantum operations according to the
:math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
contained within ``wires``.
control_wire (Union[Wires, int]): the control wire from the register of phase estimation
qubits
work_wires (Union[Wires, Sequence[int], or int]): additional work wires used when
decomposing :math:`\mathcal{Q}`
Returns:
function: The input function transformed to the :math:`\mathcal{Q}` unitary
Raises:
ValueError: if ``target_wire`` is not in ``wires``
"""
fn_inv = adjoint(fn)
wires = Wires(wires)
target_wire = Wires(target_wire)
control_wire = Wires(control_wire)
work_wires = Wires(work_wires)
if not wires.contains_wires(target_wire):
raise ValueError("The target wire must be contained within wires")
@wraps(fn)
def wrapper(*args, **kwargs):
_apply_controlled_v(target_wire=target_wire, control_wire=control_wire)
fn_inv(*args, **kwargs)
_apply_controlled_z(wires=wires, control_wire=control_wire, work_wires=work_wires)
fn(*args, **kwargs)
_apply_controlled_v(target_wire=target_wire, control_wire=control_wire)
fn_inv(*args, **kwargs)
_apply_controlled_z(wires=wires, control_wire=control_wire, work_wires=work_wires)
fn(*args, **kwargs)
return wrapper
| def apply_controlled_Q(fn, wires, target_wire, control_wire, work_wires):
r"""Provides the circuit to apply a controlled version of the :math:`\mathcal{Q}` unitary
defined in `this <https://arxiv.org/abs/1805.00109>`__ paper.
Given a callable ``fn`` input corresponding to the :math:`\mathcal{F}` unitary in the above
paper, this function transforms the circuit into a controlled version of the :math:`\mathcal{Q}`
unitary which forms part of the quantum Monte Carlo algorithm. In this algorithm, one of the
wires acted upon by :math:`\mathcal{F}`, specified by ``target_wire``, is used to embed a
Monte Carlo estimation problem. The :math:`\mathcal{Q}` unitary is then designed to encode the
target expectation value as a phase in one of its eigenvalues.
This function transforms to a controlled version of :math:`\mathcal{Q}` that is compatible with
quantum phase estimation (see :class:`~.QuantumPhaseEstimation` for more details).
Args:
fn (Callable): a quantum function that applies quantum operations according to the
:math:`\mathcal{F}` unitary used as part of quantum Monte Carlo estimation
wires (Union[Wires or Sequence[int]]): the wires acted upon by the ``fn`` circuit
target_wire (Union[Wires, int]): The wire in which the expectation value is encoded. Must be
contained within ``wires``.
control_wire (Union[Wires, int]): the control wire from the register of phase estimation
qubits
work_wires (Union[Wires, Sequence[int], or int]): additional work wires used when
decomposing :math:`\mathcal{Q}`
Returns:
function: The input function transformed to the :math:`\mathcal{Q}` unitary
Raises:
ValueError: if ``target_wire`` is not in ``wires``
"""
fn_inv = adjoint(fn)
wires = Wires(wires)
target_wire = Wires(target_wire)
control_wire = Wires(control_wire)
work_wires = Wires(work_wires)
if not wires.contains_wires(target_wire):
raise ValueError("The target wire must be contained within wires")
@wraps(fn)
def wrapper(*args, **kwargs):
_apply_controlled_v(target_wire=target_wire, control_wire=control_wire)
fn_inv(*args, **kwargs)
_apply_controlled_z(wires=wires, control_wire=control_wire, work_wires=work_wires)
fn(*args, **kwargs)
_apply_controlled_v(target_wire=target_wire, control_wire=control_wire)
fn_inv(*args, **kwargs)
_apply_controlled_z(wires=wires, control_wire=control_wire, work_wires=work_wires)
fn(*args, **kwargs)
return wrapper
|
1,209 | def check_arr_seq_view(seq_view, seq):
assert seq_view._is_view is True
assert (seq_view is not seq) is True
assert (np.may_share_memory(seq_view._data, seq._data)) is True
assert seq_view._offsets is not seq._offsets
assert seq_view._lengths is not seq._lengths
| def check_arr_seq_view(seq_view, seq):
assert seq_view._is_view is True
assert (seq_view is not seq) is True
assert np.may_share_memory(seq_view._data, seq._data)
assert seq_view._offsets is not seq._offsets
assert seq_view._lengths is not seq._lengths
|
10,494 | def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
no_remove=dict(type='bool', default=False, aliases=['no-remove']),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
# We skip cache update in auto install the dependency if the
# user explicitly declared it with update_cache=no.
if module.params.get('update_cache') is False:
module.warn("Auto-installing missing dependency without updating cache: %s" % PYTHON_APT)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
no_remove = p['no_remove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache with exponential backoff
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = random.randint(0, 1000) / 1000.0
for retry in range(update_cache_retries):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
else:
module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, no_remove=no_remove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
no_remove=no_remove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
| def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
fail_on_autoremove=dict(type='bool', default=False),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
# We skip cache update in auto install the dependency if the
# user explicitly declared it with update_cache=no.
if module.params.get('update_cache') is False:
module.warn("Auto-installing missing dependency without updating cache: %s" % PYTHON_APT)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
no_remove = p['no_remove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache with exponential backoff
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = random.randint(0, 1000) / 1000.0
for retry in range(update_cache_retries):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
else:
module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, no_remove=no_remove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
no_remove=no_remove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
|
31,465 | def stix_to_indicator(stix_obj, tags: list = [], tlp_color: Optional[str] = None):
indicator: Dict[str, Any] = {}
try:
ext_obj = stix_obj.get("external_references", [])
ext_id = ""
if ext_obj[0]:
ext_id = ext_obj[0].get("external_id")
event_obj = stix_obj.get("x_sixgill_info", {}).get("event", {})
nvd_obj = stix_obj.get("x_sixgill_info", {}).get("nvd", {})
score_obj = stix_obj.get("x_sixgill_info", {}).get("score", {})
fields = create_fields(stix_obj, event_obj, nvd_obj, score_obj, ext_id)
fields = get_description(fields)
indicator["value"] = ext_id
indicator["type"] = "CVE"
indicator["rawJSON"] = {"value": ext_id, "type": "CVE"}
indicator["rawJSON"].update(stix_obj)
indicator["score"] = "3"
indicator["fields"] = fields
if tlp_color:
indicator["fields"]["trafficlightprotocol"] = str(tlp_color)
if tags:
indicator["fields"]["tags"] = ",".join(list(set(tags)))
except Exception as err:
demisto.error(err)
demisto.error(traceback.format_exc())
return indicator
| def stix_to_indicator(stix_obj, tags: list = [], tlp_color: Optional[str] = None):
indicator: Dict[str, Any] = {}
try:
ext_obj = stix_obj.get("external_references", [])
ext_id = ""
if ext_obj and ext_obj[0]:
ext_id = ext_obj[0].get("external_id")
event_obj = stix_obj.get("x_sixgill_info", {}).get("event", {})
nvd_obj = stix_obj.get("x_sixgill_info", {}).get("nvd", {})
score_obj = stix_obj.get("x_sixgill_info", {}).get("score", {})
fields = create_fields(stix_obj, event_obj, nvd_obj, score_obj, ext_id)
fields = get_description(fields)
indicator["value"] = ext_id
indicator["type"] = "CVE"
indicator["rawJSON"] = {"value": ext_id, "type": "CVE"}
indicator["rawJSON"].update(stix_obj)
indicator["score"] = "3"
indicator["fields"] = fields
if tlp_color:
indicator["fields"]["trafficlightprotocol"] = str(tlp_color)
if tags:
indicator["fields"]["tags"] = ",".join(list(set(tags)))
except Exception as err:
demisto.error(err)
demisto.error(traceback.format_exc())
return indicator
|
5,935 | def main(args: (Optional[List[str]]) = None) -> int:
"""This is an internal API only meant for use by pip's own console scripts.
For additional details, see https://github.com/pypa/pip/issues/7498.
"""
from pip._internal.utils.entrypoints import _wrapper
return _wrapper(args)
| def main(args: Optional[List[str]] = None) -> int:
"""This is an internal API only meant for use by pip's own console scripts.
For additional details, see https://github.com/pypa/pip/issues/7498.
"""
from pip._internal.utils.entrypoints import _wrapper
return _wrapper(args)
|
952 | def piecewise_canonical(expr, *, skip_nan=False):
"""
Return a canonical Piecewise, i.e., where exactly one codition is True.
SymPy normally represents the condition in an "if-elif"-fashion, which
leads to that more than one condition can be True. This is sometimes not
wanted when representing the Piecewise mathematically.
Note that further manipulation of the resulting Piecewise, e.g. simplifying
it, will most likely make it non-canonical. Hence, this is primarily a
function to be used in conjunction with printing the Piecewise.
``piecewise_canonical`` will also explicitly add a final NaN segment to the
Piecewise, unless all cases are covered. This can be avoided by passing
``skip_nan=True`` as a final argument. It can also be used in some
situations where SymPy cannot determine that all cases are covered.
Examples
========
>>> from sympy import piecewise_canonical, Symbol, Piecewise, S
>>> x = Symbol('x', real=True)
>>> p = Piecewise((0, x < 0), (S.Half, x <= 0), (1, True))
>>> piecewise_canonical(p)
Piecewise((0, x < 0), (1/2, Eq(x, 0)), (1, x > 0))
>>> piecewise_canonical(Piecewise((2, x > 1)))
Piecewise((2, x > 1), (nan, x <= 1))
>>> piecewise_canonical(Piecewise((2, x > 1)), skip_nan=True)
Piecewise((2, x > 1))
"""
if not expr.has(Piecewise):
return expr
if isinstance(expr, Piecewise):
cumcond = false
newargs = []
for arg in expr.args:
cancond = And(arg.cond, Not(cumcond)).simplify()
cumcond = Or(arg.cond, cumcond).simplify()
newargs.append(
ExprCondPair(piecewise_canonical(arg.expr, skip_nan=skip_nan),
cancond))
if not skip_nan and cumcond is not true:
newargs.append(ExprCondPair(Undefined, Not(cumcond).simplify()))
return Piecewise(*newargs, evaluate=False)
return expr.func(*[piecewise_canonical(arg, skip_nan=skip_nan)
for arg in expr.args],
evaluate=False)
| def piecewise_canonical(expr, *, skip_nan=False):
"""
Return a canonical Piecewise, i.e., where exactly one condition is True.
SymPy normally represents the condition in an "if-elif"-fashion, which
leads to that more than one condition can be True. This is sometimes not
wanted when representing the Piecewise mathematically.
Note that further manipulation of the resulting Piecewise, e.g. simplifying
it, will most likely make it non-canonical. Hence, this is primarily a
function to be used in conjunction with printing the Piecewise.
``piecewise_canonical`` will also explicitly add a final NaN segment to the
Piecewise, unless all cases are covered. This can be avoided by passing
``skip_nan=True`` as a final argument. It can also be used in some
situations where SymPy cannot determine that all cases are covered.
Examples
========
>>> from sympy import piecewise_canonical, Symbol, Piecewise, S
>>> x = Symbol('x', real=True)
>>> p = Piecewise((0, x < 0), (S.Half, x <= 0), (1, True))
>>> piecewise_canonical(p)
Piecewise((0, x < 0), (1/2, Eq(x, 0)), (1, x > 0))
>>> piecewise_canonical(Piecewise((2, x > 1)))
Piecewise((2, x > 1), (nan, x <= 1))
>>> piecewise_canonical(Piecewise((2, x > 1)), skip_nan=True)
Piecewise((2, x > 1))
"""
if not expr.has(Piecewise):
return expr
if isinstance(expr, Piecewise):
cumcond = false
newargs = []
for arg in expr.args:
cancond = And(arg.cond, Not(cumcond)).simplify()
cumcond = Or(arg.cond, cumcond).simplify()
newargs.append(
ExprCondPair(piecewise_canonical(arg.expr, skip_nan=skip_nan),
cancond))
if not skip_nan and cumcond is not true:
newargs.append(ExprCondPair(Undefined, Not(cumcond).simplify()))
return Piecewise(*newargs, evaluate=False)
return expr.func(*[piecewise_canonical(arg, skip_nan=skip_nan)
for arg in expr.args],
evaluate=False)
|
23,122 | def test_to_dataframe_optimize_graph():
x = db.from_sequence(
[{"name": "test1", "v1": 1}, {"name": "test2", "v1": 2}], npartitions=2
)
# linear operations will be fused by graph optimization
y = x.map(lambda a: dict(**a, v2=a["v1"] + 1))
y = y.map(lambda a: dict(**a, v3=a["v2"] + 1))
# with optimizations
d = y.to_dataframe()["v3"]
assert len([k for k in d.dask if k[0].startswith("getitem")]) == 2
# no optimizations
d2 = y.to_dataframe(optimize_graph=False)["v3"]
# due to fusing the unoptimized graph will be larger
assert len(dict(d2.dask)) > len(dict(d.dask))
assert (d.compute() == d2.compute()).all()
| def test_to_dataframe_optimize_graph():
x = db.from_sequence(
[{"name": "test1", "v1": 1}, {"name": "test2", "v1": 2}], npartitions=2
)
# linear operations will be fused by graph optimization
y = x.map(lambda a: dict(**a, v2=a["v1"] + 1))
y = y.map(lambda a: dict(**a, v3=a["v2"] + 1))
# with optimizations
d = y.to_dataframe()["v3"]
assert len([k for k in d.dask if k[0].startswith("getitem")]) == 2
# no optimizations
d2 = y.to_dataframe(optimize_graph=False)["v3"]
# due to fusing the unoptimized graph will be larger
assert len(dict(d2.dask)) > len(dict(d.dask))
assert_eq(d, d2)
|
24,962 | def integer_sum(a: int, b: int):
"""Returns sum of two integers
:param a: first integer
:param b: second integer
:raises ValueError: One of parameters is not an integer.
"""
if not (isinstance(a, int) and isinstance(b, int)):
raise ValueError('Function supports only integer parameters.')
return a + b
| def integer_sum(a: int, b: int):
"""Returns sum of two integers
:param a: first integer
:param b: second integer
:raises ValueError: One of the parameters is not an integer.
"""
if not (isinstance(a, int) and isinstance(b, int)):
raise ValueError('Function supports only integer parameters.')
return a + b
|
11,975 | def main(argv, session):
args = docopt(__doc__, argv=argv)
ERRORS = False
# Validate args.
s = Schema({
str: Use(bool),
'<identifier>': Or(None, And(str, validate_s3_identifier,
error=('<identifier> should be between 3 and 80 characters in length, and '
'can only contain alphanumeric characters, periods ".", '
'underscores "_", or dashes "-". However, <identifier> cannot begin '
'with periods, underscores, or dashes.'))),
'<file>': And(
And(lambda f: all(os.path.exists(x) for x in f if x != '-'),
error='<file> should be a readable file or directory.'),
And(lambda f: False if f == ['-'] and not args['--remote-name'] else True,
error='--remote-name must be provided when uploading from stdin.')),
'--remote-name': Or(None, str),
'--spreadsheet': Or(None, os.path.isfile,
error='--spreadsheet should be a readable file.'),
'--file-metadata': Or(None, os.path.isfile,
error='--file-metadata should be a readable file.'),
'--metadata': Or(None, And(Use(get_args_dict), dict),
error='--metadata must be formatted as --metadata="key:value"'),
'--header': Or(None, And(Use(get_args_dict), dict),
error='--header must be formatted as --header="key:value"'),
'--retries': Use(lambda x: int(x[0]) if x else 0),
'--sleep': Use(lambda l: int(l[0]), error='--sleep value must be an integer.'),
'--size-hint': Or(Use(lambda l: str(l[0]) if l else None), int, None,
error='--size-hint value must be an integer.'),
'--status-check': bool,
})
try:
args = s.validate(args)
except SchemaError as exc:
print(f'{exc}\n{printable_usage(__doc__)}', file=sys.stderr)
sys.exit(1)
# Make sure the collection being uploaded to exists.
collection_id = args['--metadata'].get('collection')
if collection_id and not args['--no-collection-check'] and not args['--status-check']:
if isinstance(collection_id, list):
collection_id = collection_id[0]
collection = session.get_item(collection_id)
if not collection.exists:
print('You must upload to a collection that exists. '
f'"{collection_id}" does not exist.\n{printable_usage(__doc__)}',
file=sys.stderr)
sys.exit(1)
# Status check.
if args['<identifier>']:
item = session.get_item(args['<identifier>'])
if args['--status-check']:
if session.s3_is_overloaded(identifier=args['<identifier>'], access_key=session.access_key):
print(f'warning: {args["<identifier>"]} is over limit, and not accepting requests. '
'Expect 503 SlowDown errors.',
file=sys.stderr)
sys.exit(1)
elif item.item_size >= MAX_ITEM_SIZE:
print(f'warning: {args["<identifier>"]} is exceeding the maximum item size '
'and not accepting uploads.', file=sys.stderr)
sys.exit(1)
else:
print(f'success: {args["<identifier>"]} is accepting requests.')
sys.exit()
# Upload keyword arguments.
if args['--size-hint']:
args['--header']['x-archive-size-hint'] = args['--size-hint']
# Upload with backups turned on by default.
if not args['--header'].get('x-archive-keep-old-version') and not args['--no-backup']:
args['--header']['x-archive-keep-old-version'] = '1'
queue_derive = True if args['--no-derive'] is False else False
verbose = True if args['--quiet'] is False else False
if args['--file-metadata']:
try:
with open(args['--file-metadata']) as fh:
args['<file>'] = json.load(fh)
except JSONDecodeError:
args['<file>'] = []
with open(args['--file-metadata']) as fh:
for line in fh:
j = json.loads(line.strip())
args['<file>'].append(j)
upload_kwargs = {
'metadata': args['--metadata'],
'headers': args['--header'],
'debug': args['--debug'],
'queue_derive': queue_derive,
'verbose': verbose,
'verify': args['--verify'],
'checksum': args['--checksum'],
'retries': args['--retries'],
'retries_sleep': args['--sleep'],
'delete': args['--delete'],
'validate_identifier': True,
}
# Upload files.
if not args['--spreadsheet']:
if args['-']:
local_file = TemporaryFile()
# sys.stdin normally has the buffer attribute which returns bytes.
# However, this might not always be the case, e.g. on mocking for test purposes.
# Fall back to reading as str and encoding back to bytes.
# Note that the encoding attribute might also be None. In that case, fall back to
# locale.getpreferredencoding, the default of io.TextIOWrapper and open().
if hasattr(sys.stdin, 'buffer'):
def read():
return sys.stdin.buffer.read(1048576)
else:
encoding = sys.stdin.encoding or getpreferredencoding(False)
def read():
return sys.stdin.read(1048576).encode(encoding)
while True:
data = read()
if not data:
break
local_file.write(data)
local_file.seek(0)
else:
local_file = args['<file>']
if isinstance(local_file, (list, tuple, set)) and args['--remote-name']:
local_file = local_file[0]
if args['--remote-name']:
files = {args['--remote-name']: local_file}
elif args['--keep-directories']:
files = {f: f for f in local_file}
else:
files = local_file
for _r in _upload_files(item, files, upload_kwargs):
if args['--debug']:
break
if (not _r.status_code) or (not _r.ok):
ERRORS = True
else:
if args['--open-after-upload']:
url = f'{session.protocol}//{session.host}/details/{item.identifier}'
webbrowser.open_new_tab(url)
# Bulk upload using spreadsheet.
else:
# Use the same session for each upload request.
with open(args['--spreadsheet'], 'r', newline='', encoding='utf-8-sig') as csvfp:
spreadsheet = csv.DictReader(csvfp)
prev_identifier = None
for row in spreadsheet:
for metadata_key in row:
if not is_valid_metadata_key(metadata_key):
print(f'error: "{metadata_key}" is not a valid metadata key.',
file=sys.stderr)
sys.exit(1)
upload_kwargs_copy = deepcopy(upload_kwargs)
if row.get('REMOTE_NAME'):
local_file = {row['REMOTE_NAME']: row['file']}
del row['REMOTE_NAME']
elif args['--keep-directories']:
local_file = {row['file']: row['file']}
else:
local_file = row['file']
identifier = row.get('item', row.get('identifier'))
if not identifier:
if not prev_identifier:
print('error: no identifier column on spreadsheet.',
file=sys.stderr)
sys.exit(1)
identifier = prev_identifier
del row['file']
if 'identifier' in row:
del row['identifier']
if 'item' in row:
del row['item']
item = session.get_item(identifier)
# TODO: Clean up how indexed metadata items are coerced
# into metadata.
md_args = [f'{k.lower()}:{v}' for (k, v) in row.items() if v]
metadata = get_args_dict(md_args)
upload_kwargs_copy['metadata'].update(metadata)
r = _upload_files(item, local_file, upload_kwargs_copy, prev_identifier,
session)
for _r in r:
if args['--debug']:
break
if (not _r.status_code) or (not _r.ok):
ERRORS = True
else:
if args['--open-after-upload']:
url = f'{session.protocol}//{session.host}/details/{identifier}'
webbrowser.open_new_tab(url)
prev_identifier = identifier
if ERRORS:
sys.exit(1)
| def main(argv, session):
args = docopt(__doc__, argv=argv)
ERRORS = False
# Validate args.
s = Schema({
str: Use(bool),
'<identifier>': Or(None, And(str, validate_s3_identifier,
error=('<identifier> should be between 3 and 80 characters in length, and '
'can only contain alphanumeric characters, periods ".", '
'underscores "_", or dashes "-". However, <identifier> cannot begin '
'with periods, underscores, or dashes.'))),
'<file>': And(
And(lambda f: all(os.path.exists(x) for x in f if x != '-'),
error='<file> should be a readable file or directory.'),
And(lambda f: False if f == ['-'] and not args['--remote-name'] else True,
error='--remote-name must be provided when uploading from stdin.')),
'--remote-name': Or(None, str),
'--spreadsheet': Or(None, os.path.isfile,
error='--spreadsheet should be a readable file.'),
'--file-metadata': Or(None, os.path.isfile,
error='--file-metadata should be a readable file.'),
'--metadata': Or(None, And(Use(get_args_dict), dict),
error='--metadata must be formatted as --metadata="key:value"'),
'--header': Or(None, And(Use(get_args_dict), dict),
error='--header must be formatted as --header="key:value"'),
'--retries': Use(lambda x: int(x[0]) if x else 0),
'--sleep': Use(lambda l: int(l[0]), error='--sleep value must be an integer.'),
'--size-hint': Or(Use(lambda l: str(l[0]) if l else None), int, None,
error='--size-hint value must be an integer.'),
'--status-check': bool,
})
try:
args = s.validate(args)
except SchemaError as exc:
print(f'{exc}\n{printable_usage(__doc__)}', file=sys.stderr)
sys.exit(1)
# Make sure the collection being uploaded to exists.
collection_id = args['--metadata'].get('collection')
if collection_id and not args['--no-collection-check'] and not args['--status-check']:
if isinstance(collection_id, list):
collection_id = collection_id[0]
collection = session.get_item(collection_id)
if not collection.exists:
print('You must upload to a collection that exists. '
f'"{collection_id}" does not exist.\n{printable_usage(__doc__)}',
file=sys.stderr)
sys.exit(1)
# Status check.
if args['<identifier>']:
item = session.get_item(args['<identifier>'])
if args['--status-check']:
if session.s3_is_overloaded(identifier=args['<identifier>'], access_key=session.access_key):
print(f'warning: {args["<identifier>"]} is over limit, and not accepting requests. '
'Expect 503 SlowDown errors.',
file=sys.stderr)
sys.exit(1)
elif item.item_size > MAX_ITEM_SIZE:
print(f'warning: {args["<identifier>"]} is exceeding the maximum item size '
'and not accepting uploads.', file=sys.stderr)
sys.exit(1)
else:
print(f'success: {args["<identifier>"]} is accepting requests.')
sys.exit()
# Upload keyword arguments.
if args['--size-hint']:
args['--header']['x-archive-size-hint'] = args['--size-hint']
# Upload with backups turned on by default.
if not args['--header'].get('x-archive-keep-old-version') and not args['--no-backup']:
args['--header']['x-archive-keep-old-version'] = '1'
queue_derive = True if args['--no-derive'] is False else False
verbose = True if args['--quiet'] is False else False
if args['--file-metadata']:
try:
with open(args['--file-metadata']) as fh:
args['<file>'] = json.load(fh)
except JSONDecodeError:
args['<file>'] = []
with open(args['--file-metadata']) as fh:
for line in fh:
j = json.loads(line.strip())
args['<file>'].append(j)
upload_kwargs = {
'metadata': args['--metadata'],
'headers': args['--header'],
'debug': args['--debug'],
'queue_derive': queue_derive,
'verbose': verbose,
'verify': args['--verify'],
'checksum': args['--checksum'],
'retries': args['--retries'],
'retries_sleep': args['--sleep'],
'delete': args['--delete'],
'validate_identifier': True,
}
# Upload files.
if not args['--spreadsheet']:
if args['-']:
local_file = TemporaryFile()
# sys.stdin normally has the buffer attribute which returns bytes.
# However, this might not always be the case, e.g. on mocking for test purposes.
# Fall back to reading as str and encoding back to bytes.
# Note that the encoding attribute might also be None. In that case, fall back to
# locale.getpreferredencoding, the default of io.TextIOWrapper and open().
if hasattr(sys.stdin, 'buffer'):
def read():
return sys.stdin.buffer.read(1048576)
else:
encoding = sys.stdin.encoding or getpreferredencoding(False)
def read():
return sys.stdin.read(1048576).encode(encoding)
while True:
data = read()
if not data:
break
local_file.write(data)
local_file.seek(0)
else:
local_file = args['<file>']
if isinstance(local_file, (list, tuple, set)) and args['--remote-name']:
local_file = local_file[0]
if args['--remote-name']:
files = {args['--remote-name']: local_file}
elif args['--keep-directories']:
files = {f: f for f in local_file}
else:
files = local_file
for _r in _upload_files(item, files, upload_kwargs):
if args['--debug']:
break
if (not _r.status_code) or (not _r.ok):
ERRORS = True
else:
if args['--open-after-upload']:
url = f'{session.protocol}//{session.host}/details/{item.identifier}'
webbrowser.open_new_tab(url)
# Bulk upload using spreadsheet.
else:
# Use the same session for each upload request.
with open(args['--spreadsheet'], 'r', newline='', encoding='utf-8-sig') as csvfp:
spreadsheet = csv.DictReader(csvfp)
prev_identifier = None
for row in spreadsheet:
for metadata_key in row:
if not is_valid_metadata_key(metadata_key):
print(f'error: "{metadata_key}" is not a valid metadata key.',
file=sys.stderr)
sys.exit(1)
upload_kwargs_copy = deepcopy(upload_kwargs)
if row.get('REMOTE_NAME'):
local_file = {row['REMOTE_NAME']: row['file']}
del row['REMOTE_NAME']
elif args['--keep-directories']:
local_file = {row['file']: row['file']}
else:
local_file = row['file']
identifier = row.get('item', row.get('identifier'))
if not identifier:
if not prev_identifier:
print('error: no identifier column on spreadsheet.',
file=sys.stderr)
sys.exit(1)
identifier = prev_identifier
del row['file']
if 'identifier' in row:
del row['identifier']
if 'item' in row:
del row['item']
item = session.get_item(identifier)
# TODO: Clean up how indexed metadata items are coerced
# into metadata.
md_args = [f'{k.lower()}:{v}' for (k, v) in row.items() if v]
metadata = get_args_dict(md_args)
upload_kwargs_copy['metadata'].update(metadata)
r = _upload_files(item, local_file, upload_kwargs_copy, prev_identifier,
session)
for _r in r:
if args['--debug']:
break
if (not _r.status_code) or (not _r.ok):
ERRORS = True
else:
if args['--open-after-upload']:
url = f'{session.protocol}//{session.host}/details/{identifier}'
webbrowser.open_new_tab(url)
prev_identifier = identifier
if ERRORS:
sys.exit(1)
|
42,789 | def test_str_lower():
"""Test string converts to lowercase"""
df = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"Graham Chapman",
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Terry Jones",
"Michael Palin",
],
}
)
expected = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"graham chapman",
"john cleese",
"terry gilliam",
"eric idle",
"terry jones",
"michael palin",
],
}
)
result = process_text(df, column="names", string_function="lower")
assert_frame_equal(result, expected)
| def test_str_lower():
"""Test string conversion to lowercase using ``.str.lower()``"""
df = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"Graham Chapman",
"John Cleese",
"Terry Gilliam",
"Eric Idle",
"Terry Jones",
"Michael Palin",
],
}
)
expected = pd.DataFrame(
{
"codes": range(1, 7),
"names": [
"graham chapman",
"john cleese",
"terry gilliam",
"eric idle",
"terry jones",
"michael palin",
],
}
)
result = process_text(df, column="names", string_function="lower")
assert_frame_equal(result, expected)
|
37,809 | def build(options: BuildOptions) -> None:
temp_dir = Path(tempfile.mkdtemp(prefix="cibuildwheel"))
built_wheel_dir = temp_dir / "built_wheel"
repaired_wheel_dir = temp_dir / "repaired_wheel"
try:
if options.before_all:
log.step("Running before_all...")
env = options.environment.as_dictionary(prev_environment=os.environ)
before_all_prepared = prepare_command(
options.before_all, project=".", package=options.package_dir
)
shell(before_all_prepared, env=env)
python_configurations = get_python_configurations(
options.build_selector, options.architectures
)
for config in python_configurations:
log.build_start(config.identifier)
dependency_constraint_flags: Sequence[PathOrStr] = []
if options.dependency_constraints:
dependency_constraint_flags = [
"-c",
options.dependency_constraints.get_for_python_version(config.version),
]
# install Python
env = setup_python(
config,
dependency_constraint_flags,
options.environment,
options.build_frontend,
)
# run the before_build command
if options.before_build:
log.step("Running before_build...")
before_build_prepared = prepare_command(
options.before_build, project=".", package=options.package_dir
)
shell(before_build_prepared, env=env)
log.step("Building wheel...")
if built_wheel_dir.exists():
shutil.rmtree(built_wheel_dir)
built_wheel_dir.mkdir(parents=True)
verbosity_flags = get_build_verbosity_extra_flags(options.build_verbosity)
if options.build_frontend == "pip":
# Path.resolve() is needed. Without it pip wheel may try to fetch package from pypi.org
# see https://github.com/pypa/cibuildwheel/pull/369
call(
[
"python",
"-m",
"pip",
"wheel",
options.package_dir.resolve(),
f"--wheel-dir={built_wheel_dir}",
"--no-deps",
*get_build_verbosity_extra_flags(options.build_verbosity),
],
env=env,
)
elif options.build_frontend == "build":
config_setting = " ".join(verbosity_flags)
build_env = env.copy()
if options.dependency_constraints:
constr = options.dependency_constraints.get_for_python_version(config.version)
build_env["PIP_CONSTRAINT"] = f"'{constr}'"
build_env["VIRTUALENV_PIP"] = get_pip_version(env)
call(
[
"python",
"-m",
"build",
options.package_dir,
"--wheel",
f"--outdir={built_wheel_dir}",
f"--config-setting={config_setting}",
],
env=build_env,
)
else:
assert_never(options.build_frontend)
built_wheel = next(built_wheel_dir.glob("*.whl"))
# repair the wheel
if repaired_wheel_dir.exists():
shutil.rmtree(repaired_wheel_dir)
repaired_wheel_dir.mkdir(parents=True)
if built_wheel.name.endswith("none-any.whl"):
raise NonPlatformWheelError()
if options.repair_command:
log.step("Repairing wheel...")
repair_command_prepared = prepare_command(
options.repair_command, wheel=built_wheel, dest_dir=repaired_wheel_dir
)
shell(repair_command_prepared, env=env)
else:
shutil.move(str(built_wheel), repaired_wheel_dir)
repaired_wheel = next(repaired_wheel_dir.glob("*.whl"))
if options.test_command and options.test_selector(config.identifier):
log.step("Testing wheel...")
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
call(["pip", "install", "virtualenv", *dependency_constraint_flags], env=env)
venv_dir = Path(tempfile.mkdtemp())
# Use --no-download to ensure determinism by using seed libraries
# built into virtualenv
call(["python", "-m", "virtualenv", "--no-download", venv_dir], env=env)
virtualenv_env = env.copy()
virtualenv_env["PATH"] = os.pathsep.join(
[
str(venv_dir / "Scripts"),
virtualenv_env["PATH"],
]
)
# check that we are using the Python from the virtual environment
call(["where", "python"], env=virtualenv_env)
if options.before_test:
before_test_prepared = prepare_command(
options.before_test,
project=".",
package=options.package_dir,
)
shell(before_test_prepared, env=virtualenv_env)
# install the wheel
call(
["pip", "install", str(repaired_wheel) + options.test_extras],
env=virtualenv_env,
)
# test the wheel
if options.test_requires:
call(["pip", "install"] + options.test_requires, env=virtualenv_env)
# run the tests from c:\, with an absolute path in the command
# (this ensures that Python runs the tests against the installed wheel
# and not the repo code)
test_command_prepared = prepare_command(
options.test_command,
project=Path(".").resolve(),
package=options.package_dir.resolve(),
)
shell(test_command_prepared, cwd="c:\\", env=virtualenv_env)
# clean up
shutil.rmtree(venv_dir)
# we're all done here; move it to output (remove if already exists)
shutil.move(str(repaired_wheel), options.output_dir)
log.build_end()
except subprocess.CalledProcessError as error:
log.step_end_with_error(
f"Command {error.cmd} failed with code {error.returncode}. {error.stdout}"
)
sys.exit(1)
| def build(options: BuildOptions) -> None:
temp_dir = Path(tempfile.mkdtemp(prefix="cibuildwheel"))
built_wheel_dir = temp_dir / "built_wheel"
repaired_wheel_dir = temp_dir / "repaired_wheel"
try:
if options.before_all:
log.step("Running before_all...")
env = options.environment.as_dictionary(prev_environment=os.environ)
before_all_prepared = prepare_command(
options.before_all, project=".", package=options.package_dir
)
shell(before_all_prepared, env=env)
python_configurations = get_python_configurations(
options.build_selector, options.architectures
)
for config in python_configurations:
log.build_start(config.identifier)
dependency_constraint_flags: Sequence[PathOrStr] = []
if options.dependency_constraints:
dependency_constraint_flags = [
"-c",
options.dependency_constraints.get_for_python_version(config.version),
]
# install Python
env = setup_python(
config,
dependency_constraint_flags,
options.environment,
options.build_frontend,
)
# run the before_build command
if options.before_build:
log.step("Running before_build...")
before_build_prepared = prepare_command(
options.before_build, project=".", package=options.package_dir
)
shell(before_build_prepared, env=env)
log.step("Building wheel...")
if built_wheel_dir.exists():
shutil.rmtree(built_wheel_dir)
built_wheel_dir.mkdir(parents=True)
verbosity_flags = get_build_verbosity_extra_flags(options.build_verbosity)
if options.build_frontend == "pip":
# Path.resolve() is needed. Without it pip wheel may try to fetch package from pypi.org
# see https://github.com/pypa/cibuildwheel/pull/369
call(
[
"python",
"-m",
"pip",
"wheel",
options.package_dir.resolve(),
f"--wheel-dir={built_wheel_dir}",
"--no-deps",
*get_build_verbosity_extra_flags(options.build_verbosity),
],
env=env,
)
elif options.build_frontend == "build":
config_setting = " ".join(verbosity_flags)
build_env = env.copy()
if options.dependency_constraints:
constr = options.dependency_constraints.get_for_python_version(config.version)
build_env["PIP_CONSTRAINT"] = f'"{constr}"'
build_env["VIRTUALENV_PIP"] = get_pip_version(env)
call(
[
"python",
"-m",
"build",
options.package_dir,
"--wheel",
f"--outdir={built_wheel_dir}",
f"--config-setting={config_setting}",
],
env=build_env,
)
else:
assert_never(options.build_frontend)
built_wheel = next(built_wheel_dir.glob("*.whl"))
# repair the wheel
if repaired_wheel_dir.exists():
shutil.rmtree(repaired_wheel_dir)
repaired_wheel_dir.mkdir(parents=True)
if built_wheel.name.endswith("none-any.whl"):
raise NonPlatformWheelError()
if options.repair_command:
log.step("Repairing wheel...")
repair_command_prepared = prepare_command(
options.repair_command, wheel=built_wheel, dest_dir=repaired_wheel_dir
)
shell(repair_command_prepared, env=env)
else:
shutil.move(str(built_wheel), repaired_wheel_dir)
repaired_wheel = next(repaired_wheel_dir.glob("*.whl"))
if options.test_command and options.test_selector(config.identifier):
log.step("Testing wheel...")
# set up a virtual environment to install and test from, to make sure
# there are no dependencies that were pulled in at build time.
call(["pip", "install", "virtualenv", *dependency_constraint_flags], env=env)
venv_dir = Path(tempfile.mkdtemp())
# Use --no-download to ensure determinism by using seed libraries
# built into virtualenv
call(["python", "-m", "virtualenv", "--no-download", venv_dir], env=env)
virtualenv_env = env.copy()
virtualenv_env["PATH"] = os.pathsep.join(
[
str(venv_dir / "Scripts"),
virtualenv_env["PATH"],
]
)
# check that we are using the Python from the virtual environment
call(["where", "python"], env=virtualenv_env)
if options.before_test:
before_test_prepared = prepare_command(
options.before_test,
project=".",
package=options.package_dir,
)
shell(before_test_prepared, env=virtualenv_env)
# install the wheel
call(
["pip", "install", str(repaired_wheel) + options.test_extras],
env=virtualenv_env,
)
# test the wheel
if options.test_requires:
call(["pip", "install"] + options.test_requires, env=virtualenv_env)
# run the tests from c:\, with an absolute path in the command
# (this ensures that Python runs the tests against the installed wheel
# and not the repo code)
test_command_prepared = prepare_command(
options.test_command,
project=Path(".").resolve(),
package=options.package_dir.resolve(),
)
shell(test_command_prepared, cwd="c:\\", env=virtualenv_env)
# clean up
shutil.rmtree(venv_dir)
# we're all done here; move it to output (remove if already exists)
shutil.move(str(repaired_wheel), options.output_dir)
log.build_end()
except subprocess.CalledProcessError as error:
log.step_end_with_error(
f"Command {error.cmd} failed with code {error.returncode}. {error.stdout}"
)
sys.exit(1)
|
34,962 | def _should_print_backtrace():
in_pytest = "PYTEST_CURRENT_TEST" in os.environ
tvm_backtrace = os.environ.get("TVM_BACKTRACE", "0")
try:
tvm_backtrace = bool(int(tvm_backtrace))
except ValueError:
raise ValueError(
"invalid value for TVM_BACKTRACE `{tvm_backtrace}`, please set to 0 or 1.")
return in_pytest or tvm_backtrace
| def _should_print_backtrace():
in_pytest = "PYTEST_CURRENT_TEST" in os.environ
tvm_backtrace = os.environ.get("TVM_BACKTRACE", "0")
try:
tvm_backtrace = bool(int(tvm_backtrace))
except ValueError:
raise ValueError(
f"invalid value for TVM_BACKTRACE `{tvm_backtrace}`, please set to 0 or 1.")
return in_pytest or tvm_backtrace
|
13,170 | def test__main__():
# From Python 3 documentation on "Importing a source file directly"
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
main_py_path = os.path.join(
os.path.abspath(os.path.dirname(gitlab.__file__)), "__main__.py"
)
# Make the `name` be `__main__` so the if condition will be met
spec = importlib.util.spec_from_file_location(
name="__main__", location=main_py_path
)
module = importlib.util.module_from_spec(spec=spec)
sys.modules["gitlab.__main__"] = module
with pytest.raises(SystemExit) as exc:
spec.loader.exec_module(module)
caught_exception = exc.value
assert caught_exception.code == 2
| def test__main__():
# From Python 3 documentation on "Importing a source file directly"
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
main_py_path = os.path.join(
os.path.abspath(os.path.dirname(gitlab.__file__)), "__main__.py"
)
# Make the `name` be `__main__` so the if condition will be met
spec = importlib.util.spec_from_file_location(
name="__main__", location=main_py_path
)
module = importlib.util.module_from_spec(spec=spec)
sys.modules["gitlab.__main__"] = module
with pytest.raises(SystemExit) as exc:
spec.loader.exec_module(module)
assert exc.value.code == 2
|
32,284 | def panorama_query_logs_command(args: dict):
"""
Query logs
"""
log_type = args.get('log-type')
number_of_logs = args.get('number_of_logs')
query = args.get('query')
address_src = args.get('addr-src')
address_dst = args.get('addr-dst')
ip_ = args.get('ip')
zone_src = args.get('zone-src')
zone_dst = args.get('zone-dst')
time_generated = args.get('time-generated')
action = args.get('action')
port_dst = args.get('port-dst')
rule = args.get('rule')
filedigest = args.get('filedigest')
url = args.get('url')
use_polling = args.get('polling', 'false') == 'true'
job_id = args.get('job_id')
cmd = demisto.command()
interval_in_seconds = int(args.get('interval_in_seconds', 60))
timeout = int(args.get('timeout', 600))
script_results = []
if query and (address_src or address_dst or zone_src or zone_dst
or time_generated or action or port_dst or rule or url or filedigest):
raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.')
if use_polling:
ScheduledCommand.raise_error_if_not_supported()
if not job_id:
# create new search
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
job_id = result['response']['result']['job']
polling_args = {
'job_id': job_id,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
readable_output = f"Panorama log query search created successfully (Job ID: {job_id})"
script_results.append(CommandResults(
readable_output=readable_output,
scheduled_command=scheduled_command
))
else:
result = panorama_get_traffic_logs(job_id)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
if result['response']['result']['job']['status'] != "FIN":
polling_args = {
'job_id': job_id,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
script_results.append(CommandResults(
scheduled_command=scheduled_command
))
else:
result = panorama_get_traffic_logs(job_id)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
query_logs_output = {
'JobID': job_id,
'Status': 'Complete'
}
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \
or 'status' not in result['response']['result']['job']:
raise Exception('Missing JobID status in response.')
if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][
'result'] \
or 'logs' not in result['response']['result']['log']:
raise Exception('Missing logs in response.')
logs = result['response']['result']['log']['logs']
if logs['@count'] == '0':
human_readable = f'No {log_type} logs matched the query.'
else:
pretty_logs = prettify_logs(logs['entry'])
query_logs_output['Logs'] = pretty_logs
human_readable = tableToMarkdown(f'Query {log_type} Logs:', query_logs_output['Logs'],
['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application',
'Action', 'Rule', 'URLOrFilename'], removeNull=True)
script_results.append(CommandResults(
outputs_prefix='Panorama.Monitor',
outputs_key_field='JobID',
outputs=result,
readable_output=human_readable,
ignore_auto_extract=True))
else:
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
raise Exception(f"Query logs failed. Reason is: {result['response']['msg']['line']}")
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
query_logs_output = {
'JobID': result['response']['result']['job'],
'Status': 'Pending',
'LogType': log_type,
'Message': result['response']['result']['msg']['line']
}
script_results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True),
'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output}
})
return_results(script_results)
| def panorama_query_logs_command(args: dict):
"""
Query logs
"""
log_type = args.get('log-type')
number_of_logs = args.get('number_of_logs')
query = args.get('query')
address_src = args.get('addr-src')
address_dst = args.get('addr-dst')
ip_ = args.get('ip')
zone_src = args.get('zone-src')
zone_dst = args.get('zone-dst')
time_generated = args.get('time-generated')
action = args.get('action')
port_dst = args.get('port-dst')
rule = args.get('rule')
filedigest = args.get('filedigest')
url = args.get('url')
use_polling = args.get('polling', 'false') == 'true'
job_id = args.get('job_id')
cmd = demisto.command()
interval_in_seconds = int(args.get('interval_in_seconds', 60))
timeout = int(args.get('timeout', 600))
script_results = []
if query and (address_src or address_dst or zone_src or zone_dst
or time_generated or action or port_dst or rule or url or filedigest):
raise Exception('Use the free query argument or the fixed search parameters arguments to build your query.')
if use_polling:
ScheduledCommand.raise_error_if_not_supported()
if not job_id:
# create new search
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
job_id = result['response']['result']['job']
polling_args = {
'job_id': job_id,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
readable_output = f"Panorama log query search created successfully (Job ID: {job_id})"
script_results.append(CommandResults(
readable_output=readable_output,
scheduled_command=scheduled_command
))
else:
result = panorama_get_traffic_logs(job_id)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
if result['response']['result']['job']['status'] != "FIN":
polling_args = {
'job_id': job_id,
**args
}
scheduled_command = ScheduledCommand(
command=cmd,
next_run_in_seconds=interval_in_seconds,
args=polling_args,
timeout_in_seconds=timeout
)
script_results.append(CommandResults(
scheduled_command=scheduled_command
))
else:
result = panorama_get_traffic_logs(job_id)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
message = '. Reason is: ' + result['response']['msg']['line']
raise Exception('Query logs failed' + message)
else:
raise Exception('Query logs failed.')
query_logs_output = {
'JobID': job_id,
'Status': 'Complete'
}
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result'] \
or 'status' not in result['response']['result']['job']:
raise Exception('Missing JobID status in response {result["response"]}')
if 'response' not in result or 'result' not in result['response'] or 'log' not in result['response'][
'result'] \
or 'logs' not in result['response']['result']['log']:
raise Exception('Missing logs in response.')
logs = result['response']['result']['log']['logs']
if logs['@count'] == '0':
human_readable = f'No {log_type} logs matched the query.'
else:
pretty_logs = prettify_logs(logs['entry'])
query_logs_output['Logs'] = pretty_logs
human_readable = tableToMarkdown(f'Query {log_type} Logs:', query_logs_output['Logs'],
['TimeGenerated', 'SourceAddress', 'DestinationAddress', 'Application',
'Action', 'Rule', 'URLOrFilename'], removeNull=True)
script_results.append(CommandResults(
outputs_prefix='Panorama.Monitor',
outputs_key_field='JobID',
outputs=result,
readable_output=human_readable,
ignore_auto_extract=True))
else:
result = panorama_query_logs(log_type, number_of_logs, query, address_src, address_dst, ip_,
zone_src, zone_dst, time_generated, action,
port_dst, rule, url, filedigest)
if result['response']['@status'] == 'error':
if 'msg' in result['response'] and 'line' in result['response']['msg']:
raise Exception(f"Query logs failed. Reason is: {result['response']['msg']['line']}")
else:
raise Exception('Query logs failed.')
if 'response' not in result or 'result' not in result['response'] or 'job' not in result['response']['result']:
raise Exception('Missing JobID in response.')
query_logs_output = {
'JobID': result['response']['result']['job'],
'Status': 'Pending',
'LogType': log_type,
'Message': result['response']['result']['msg']['line']
}
script_results.append({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Query Logs:', query_logs_output, ['JobID', 'Status'], removeNull=True),
'EntryContext': {"Panorama.Monitor(val.JobID == obj.JobID)": query_logs_output}
})
return_results(script_results)
|
35,670 | def main(args):
if args.weights and PM is None:
raise ImportError("The prototype module couldn't be found. Please install the latest torchvision nightly.")
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
print("torch version: ", torch.__version__)
print("torchvision version: ", torchvision.__version__)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
# Data loading code
print("Loading data")
traindir = os.path.join(args.data_path, args.train_dir)
valdir = os.path.join(args.data_path, args.val_dir)
print("Loading training data")
st = time.time()
cache_path = _get_cache_path(traindir)
transform_train = presets.VideoClassificationPresetTrain((128, 171), (112, 112))
if args.cache_dataset and os.path.exists(cache_path):
print(f"Loading dataset_train from {cache_path}")
dataset, _ = torch.load(cache_path)
dataset.transform = transform_train
else:
if args.distributed:
print("It is recommended to pre-compute the dataset cache on a single-gpu first, as it will be faster")
dataset = torchvision.datasets.Kinetics400(
traindir,
frames_per_clip=args.clip_len,
step_between_clips=1,
transform=transform_train,
frame_rate=15,
extensions=(
"avi",
"mp4",
),
)
if args.cache_dataset:
print(f"Saving dataset_train to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset, traindir), cache_path)
print("Took", time.time() - st)
print("Loading validation data")
cache_path = _get_cache_path(valdir)
if not args.weights:
transform_test = presets.VideoClassificationPresetEval((128, 171), (112, 112))
else:
weights = PM.get_weight(args.weights)
transform_test = weights.transforms()
if args.cache_dataset and os.path.exists(cache_path):
print(f"Loading dataset_test from {cache_path}")
dataset_test, _ = torch.load(cache_path)
dataset_test.transform = transform_test
else:
if args.distributed:
print("It is recommended to pre-compute the dataset cache on a single-gpu first, as it will be faster")
dataset_test = torchvision.datasets.Kinetics400(
valdir,
frames_per_clip=args.clip_len,
step_between_clips=1,
transform=transform_test,
frame_rate=15,
extensions=(
"avi",
"mp4",
),
)
if args.cache_dataset:
print(f"Saving dataset_test to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset_test, valdir), cache_path)
print("Creating data loaders")
train_sampler = RandomClipSampler(dataset.video_clips, args.clips_per_video)
test_sampler = UniformClipSampler(dataset_test.video_clips, args.clips_per_video)
if args.distributed:
train_sampler = DistributedSampler(train_sampler)
test_sampler = DistributedSampler(test_sampler)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=train_sampler,
num_workers=args.workers,
pin_memory=True,
collate_fn=collate_fn,
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=args.batch_size,
sampler=test_sampler,
num_workers=args.workers,
pin_memory=True,
collate_fn=collate_fn,
)
print("Creating model")
if not args.weights:
model = torchvision.models.video.__dict__[args.model](pretrained=args.pretrained)
else:
model = PM.video.__dict__[args.model](weights=args.weights)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss()
lr = args.lr * args.world_size
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=args.momentum, weight_decay=args.weight_decay)
scaler = torch.cuda.amp.GradScaler() if args.amp else None
# convert scheduler to be per iteration, not per epoch, for warmup that lasts
# between different epochs
iters_per_epoch = len(data_loader)
lr_milestones = [iters_per_epoch * (m - args.lr_warmup_epochs) for m in args.lr_milestones]
main_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_milestones, gamma=args.lr_gamma)
if args.lr_warmup_epochs > 0:
warmup_iters = iters_per_epoch * args.lr_warmup_epochs
args.lr_warmup_method = args.lr_warmup_method.lower()
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=args.lr_warmup_decay, total_iters=warmup_iters
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_decay, total_iters=warmup_iters
)
else:
raise RuntimeError(
f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported."
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[warmup_iters]
)
else:
lr_scheduler = main_lr_scheduler
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if args.amp:
scaler.load_state_dict(checkpoint["scaler"])
if args.test_only:
evaluate(model, criterion, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(
model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, args.print_freq, args.amp
)
evaluate(model, criterion, data_loader_test, device=device)
if args.output_dir:
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
}
if args.amp:
checkpoint["scaler"] = scaler.state_dict()
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
| def main(args):
if args.weights and PM is None:
raise ImportError("The prototype module couldn't be found. Please install the latest torchvision nightly.")
if args.output_dir:
utils.mkdir(args.output_dir)
utils.init_distributed_mode(args)
print(args)
print("torch version: ", torch.__version__)
print("torchvision version: ", torchvision.__version__)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
# Data loading code
print("Loading data")
traindir = os.path.join(args.data_path, args.train_dir)
valdir = os.path.join(args.data_path, args.val_dir)
print("Loading training data")
st = time.time()
cache_path = _get_cache_path(traindir)
transform_train = presets.VideoClassificationPresetTrain((128, 171), (112, 112))
if args.cache_dataset and os.path.exists(cache_path):
print(f"Loading dataset_train from {cache_path}")
dataset, _ = torch.load(cache_path)
dataset.transform = transform_train
else:
if args.distributed:
print("It is recommended to pre-compute the dataset cache on a single-gpu first, as it will be faster")
dataset = torchvision.datasets.Kinetics400(
traindir,
frames_per_clip=args.clip_len,
step_between_clips=1,
transform=transform_train,
frame_rate=15,
extensions=(
"avi",
"mp4",
),
)
if args.cache_dataset:
print(f"Saving dataset_train to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset, traindir), cache_path)
print("Took", time.time() - st)
print("Loading validation data")
cache_path = _get_cache_path(valdir)
if not args.weights:
transform_test = presets.VideoClassificationPresetEval((128, 171), (112, 112))
else:
weights = PM.get_weight(args.weights)
transform_test = weights.transforms()
if args.cache_dataset and os.path.exists(cache_path):
print(f"Loading dataset_test from {cache_path}")
dataset_test, _ = torch.load(cache_path)
dataset_test.transform = transform_test
else:
if args.distributed:
print("It is recommended to pre-compute the dataset cache on a single-gpu first, as it will be faster")
dataset_test = torchvision.datasets.Kinetics400(
valdir,
frames_per_clip=args.clip_len,
step_between_clips=1,
transform=transform_test,
frame_rate=15,
extensions=(
"avi",
"mp4",
),
)
if args.cache_dataset:
print(f"Saving dataset_test to {cache_path}")
utils.mkdir(os.path.dirname(cache_path))
utils.save_on_master((dataset_test, valdir), cache_path)
print("Creating data loaders")
train_sampler = RandomClipSampler(dataset.video_clips, args.clips_per_video)
test_sampler = UniformClipSampler(dataset_test.video_clips, args.clips_per_video)
if args.distributed:
train_sampler = DistributedSampler(train_sampler)
test_sampler = DistributedSampler(test_sampler)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
sampler=train_sampler,
num_workers=args.workers,
pin_memory=True,
collate_fn=collate_fn,
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=args.batch_size,
sampler=test_sampler,
num_workers=args.workers,
pin_memory=True,
collate_fn=collate_fn,
)
print("Creating model")
if not args.weights:
model = torchvision.models.video.__dict__[args.model](pretrained=args.pretrained)
else:
model = PM.video.__dict__[args.model](weights=args.weights)
model.to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
criterion = nn.CrossEntropyLoss()
lr = args.lr * args.world_size
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=args.momentum, weight_decay=args.weight_decay)
scaler = torch.cuda.amp.GradScaler() if args.amp else None
# convert scheduler to be per iteration, not per epoch, for warmup that lasts
# between different epochs
iters_per_epoch = len(data_loader)
lr_milestones = [iters_per_epoch * (m - args.lr_warmup_epochs) for m in args.lr_milestones]
main_lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=lr_milestones, gamma=args.lr_gamma)
if args.lr_warmup_epochs > 0:
warmup_iters = iters_per_epoch * args.lr_warmup_epochs
args.lr_warmup_method = args.lr_warmup_method.lower()
if args.lr_warmup_method == "linear":
warmup_lr_scheduler = torch.optim.lr_scheduler.LinearLR(
optimizer, start_factor=args.lr_warmup_decay, total_iters=warmup_iters
)
elif args.lr_warmup_method == "constant":
warmup_lr_scheduler = torch.optim.lr_scheduler.ConstantLR(
optimizer, factor=args.lr_warmup_decay, total_iters=warmup_iters
)
else:
raise RuntimeError(
f"Invalid warmup lr method '{args.lr_warmup_method}'. Only linear and constant are supported."
)
lr_scheduler = torch.optim.lr_scheduler.SequentialLR(
optimizer, schedulers=[warmup_lr_scheduler, main_lr_scheduler], milestones=[warmup_iters]
)
else:
lr_scheduler = main_lr_scheduler
model_without_ddp = model
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location="cpu")
model_without_ddp.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
args.start_epoch = checkpoint["epoch"] + 1
if args.amp:
scaler.load_state_dict(checkpoint["scaler"])
if args.test_only:
evaluate(model, criterion, data_loader_test, device=device)
return
print("Start training")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(
model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, args.print_freq, scaler
)
evaluate(model, criterion, data_loader_test, device=device)
if args.output_dir:
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"args": args,
}
if args.amp:
checkpoint["scaler"] = scaler.state_dict()
utils.save_on_master(checkpoint, os.path.join(args.output_dir, f"model_{epoch}.pth"))
utils.save_on_master(checkpoint, os.path.join(args.output_dir, "checkpoint.pth"))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print(f"Training time {total_time_str}")
|
27,863 | def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale):
candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap()
for y in outputs:
creator = y.creator_node
if creator is not None:
push_candidate(creator)
input_nodes = set(x.node for x in inputs)
ret_dict = {}
base_hooks = chainer.get_function_hooks().values()
while candidate_funcs:
func = pop_candidate()
# Collect the gradients w.r.t. the outputs
ys = [y() for y in func.outputs] # access via weak ref
gys = tuple([grads.pop(y) for y in ys])
for node, gy in six.moves.zip(ys, gys):
if node is not None:
if node in input_nodes:
ret_dict[node] = gy
if retain_grad:
y = node.get_variable_or_none()
if y is not None:
y.grad_var = gy
y._loss_scale = loss_scale
# Collect the gradients w.r.t. the inputs
input_indexes = []
x_grads = collections.OrderedDict()
for i, x in enumerate(func.inputs):
if x not in grad_required:
continue
input_indexes.append(i)
if x not in x_grads:
x_grads[x] = grads.get_as_list(x)
if not input_indexes:
continue
input_indexes = tuple(input_indexes)
# Do backward
# Call pre-backward hooks
if func._n_local_function_hooks != 0:
local_hooks = collections.OrderedDict(chainer.get_function_hooks())
local_hooks.update(func.local_function_hooks)
hooks = local_hooks.values() # avoid six for performance
else:
hooks = base_hooks
in_data = [x.data for x in func.inputs]
out_grad_data = [None if g is None else g.data for g in gys]
with cuda.get_device_from_array(*in_data):
for hook in hooks:
hook.backward_preprocess(
func, tuple(in_data), tuple(out_grad_data))
_backprop_utils.backprop_step(func, input_indexes, gys, x_grads)
# Call post-backward hooks
for hook in hooks:
hook.backward_postprocess(
func, tuple(in_data), tuple(out_grad_data))
# Update grads
for node, g in x_grads.items():
if not g: # gradient == None
continue
creator = node.creator_node
if creator is not None:
push_candidate(creator)
for x in input_nodes:
if x not in ret_dict:
ret_dict[x] = grads.pop(x)
return ret_dict
| def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale):
candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap()
for y in outputs:
creator = y.creator_node
if creator is not None:
push_candidate(creator)
input_nodes = set(x.node for x in inputs)
ret_dict = {}
base_hooks = chainer.get_function_hooks().values()
while candidate_funcs:
func = pop_candidate()
# Collect the gradients w.r.t. the outputs
ys = [y() for y in func.outputs] # access via weak ref
gys = tuple([grads.pop(y) for y in ys])
for node, gy in six.moves.zip(ys, gys):
if node is not None:
if node in input_nodes:
ret_dict[node] = gy
if retain_grad:
y = node.get_variable_or_none()
if y is not None:
y.grad_var = gy
y._loss_scale = loss_scale
# Collect the gradients w.r.t. the inputs
input_indexes = []
x_grads = collections.OrderedDict()
for i, x in enumerate(func.inputs):
if x not in grad_required:
continue
input_indexes.append(i)
if x not in x_grads:
x_grads[x] = grads.get_as_list(x)
if not input_indexes:
continue
input_indexes = tuple(input_indexes)
# Do backward
# Call pre-backward hooks
if func._n_local_function_hooks != 0:
local_hooks = collections.OrderedDict(chainer.get_function_hooks())
local_hooks.update(func.local_function_hooks)
hooks = local_hooks.values() # avoid six for performance
else:
hooks = base_hooks
in_data = [x.data for x in func.inputs]
out_grad_data = [None if g is None else g.data for g in gys]
out_grad_data = [None if g is None else g.array for g in gys]
with cuda.get_device_from_array(*in_data):
for hook in hooks:
hook.backward_preprocess(
func, tuple(in_data), tuple(out_grad_data))
_backprop_utils.backprop_step(func, input_indexes, gys, x_grads)
# Call post-backward hooks
for hook in hooks:
hook.backward_postprocess(
func, tuple(in_data), tuple(out_grad_data))
# Update grads
for node, g in x_grads.items():
if not g: # gradient == None
continue
creator = node.creator_node
if creator is not None:
push_candidate(creator)
for x in input_nodes:
if x not in ret_dict:
ret_dict[x] = grads.pop(x)
return ret_dict
|
24,873 | def check_config(machine, old_conf, new_conf):
"""Example code that will trigger the message
Given an if-elif construct
When the body of the if ends with an elif
Then the message confusing-consecutive-elif must be triggered.
"""
if old_conf:
if not new_conf:
machine.disable()
elif old_conf.value != new_conf.value:
machine.disable()
machine.enable(new_conf.value)
elif new_conf: # [confusing-consecutive-elif]
machine.enable(new_conf.value)
| def triggered_if_if_block_ends_with_elif(machine, old_conf, new_conf):
"""Example code that will trigger the message
Given an if-elif construct
When the body of the if ends with an elif
Then the message confusing-consecutive-elif must be triggered.
"""
if old_conf:
if not new_conf:
machine.disable()
elif old_conf.value != new_conf.value:
machine.disable()
machine.enable(new_conf.value)
elif new_conf: # [confusing-consecutive-elif]
machine.enable(new_conf.value)
|
20,544 | def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = complete_test = arguments.complete
set_loglevel(verbose=verbose)
print("SCT info:")
print("- version: {}".format(__version__))
print("- path: {0}".format(__sct_dir__))
# initialization
install_software = 0
e = 0
os_running = 'not identified'
# complete test
if complete_test:
print(run_proc('date', verbose))
print(run_proc('whoami', verbose))
print(run_proc('pwd', verbose))
bash_profile = os.path.expanduser("~/.bash_profile")
if os.path.isfile(bash_profile):
with io.open(bash_profile, "r") as f:
print(f.read())
bashrc = os.path.expanduser("~/.bashrc")
if os.path.isfile(bashrc):
with io.open(bashrc, "r") as f:
print(f.read())
# check OS
platform_running = sys.platform
if platform_running.find('darwin') != -1:
os_running = 'osx'
elif platform_running.find('linux') != -1:
os_running = 'linux'
print('OS: ' + os_running + ' (' + platform.platform() + ')')
print('CPU cores: Available: {}, Used by ITK functions: {}'.format(psutil.cpu_count(), int(os.getenv('ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS', 0))))
ram = psutil.virtual_memory()
factor_MB = 1024 * 1024
print('RAM: Total: {}MB, Used: {}MB, Available: {}MB'.format(ram.total // factor_MB, ram.used // factor_MB, ram.available // factor_MB))
if arguments.short:
sys.exit()
# check if Python path is within SCT path
print_line('Check Python executable')
path_python = sys.executable
if __sct_dir__ in path_python:
print_ok()
print(' Using bundled python {} at {}'.format(sys.version, path_python))
else:
print_warning()
print(' Using system python which is unsupported: {}'.format(path_python))
# check if data folder is empty
print_line('Check if data are installed')
if os.path.isdir(__data_dir__):
print_ok()
else:
print_fail()
for dep_pkg, dep_ver_spec in get_dependencies():
if dep_ver_spec is None:
print_line('Check if %s is installed' % (dep_pkg))
else:
print_line('Check if %s (%s) is installed' % (dep_pkg, dep_ver_spec))
try:
module_name, suppress_stderr = resolve_module(dep_pkg)
module = module_import(module_name, suppress_stderr)
version = get_version(module)
if dep_ver_spec is not None and version is not None and dep_ver_spec != version:
print_warning(more=(" (%s != %s mandated version))" % (version, dep_ver_spec)))
elif version is not None:
print_ok(more=(" (%s)" % version))
else:
print_ok()
except Exception as err:
print_fail()
print(f"An error occured while importing module {dep_pkg} -> {err}")
print(f"Full traceback: {traceback.format_exc()}")
install_software = 1
print_line('Check if spinalcordtoolbox is installed')
try:
importlib.import_module('spinalcordtoolbox')
print_ok()
except ImportError:
print_fail()
print("Unable to import spinalcordtoolbox module!?")
install_software = 1
# Check ANTs integrity
print_line('Check ANTs compatibility with OS ')
cmd = 'isct_test_ants'
status, output = run_proc(cmd, verbose=0, raise_exception=False)
if status == 0:
print_ok()
else:
print_fail()
print(output)
e = 1
if complete_test:
print('>> ' + cmd)
print((status, output), '\n')
# check PropSeg compatibility with OS
print_line('Check PropSeg compatibility with OS ')
status, output = run_proc('isct_propseg', verbose=0, raise_exception=False, is_sct_binary=True)
if status in (0, 1):
print_ok()
else:
print_fail()
print(output)
e = 1
if complete_test:
print((status, output), '\n')
print_line('Check if figure can be opened with matplotlib')
try:
import matplotlib
import matplotlib.pyplot as plt
# If matplotlib is using a GUI backend, the default 'show()` function will be overridden
# See: https://github.com/matplotlib/matplotlib/issues/20281#issuecomment-846467732
fig = plt.figure()
if getattr(fig.canvas.manager.show, "__func__", None) != matplotlib.backend_bases.FigureManagerBase.show:
print_ok(f" (Using GUI backend: '{matplotlib.get_backend()}')")
else:
print_fail(f" (Using non-GUI backend '{matplotlib.get_backend()}')")
except Exception as err:
print_fail()
print(err)
print_line('Check if figure can be opened with PyQt')
if sys.platform == "linux" and 'DISPLAY' not in os.environ:
print_fail(" ($DISPLAY not set on X11-supporting system)")
else:
try:
from PyQt5.QtWidgets import QApplication, QLabel
_ = QApplication([])
label = QLabel('Hello World!')
label.show()
label.close()
print_ok()
except Exception as err:
print_fail()
print(err)
# Check version of FSLeyes
print_line('Check FSLeyes version')
cmd = 'fsleyes --version'
status, output = run_proc(cmd, verbose=0, raise_exception=False)
# Exit code 0 - command has run successfully
if status == 0:
# Fetch only version number (full output of 'fsleyes --version' is 'fsleyes/FSLeyes version 0.34.2')
fsleyes_version = output.split()[2]
print_ok(more=(" (%s)" % fsleyes_version))
# Exit code 126 - Command invoked cannot execute (permission problem or command is not an executable)
elif status == 126:
print('Command not executable. Please check permissions of fsleyes command.')
# Exit code 127 - Command not found (possible problem with $PATH)
elif status == 127:
print('Command not found. If you installed FSLeyes as part of FSL package, please check that FSL is included '
'in $PATH variable. If you installed FSLeyes using conda environment, make sure that the environment is '
'activated. If you do not have FSLeyes installed, consider its installation to easily visualize '
'processing outputs and/or to use SCT within FSLeyes. More info at: '
'https://spinalcordtoolbox.com/en/latest/user_section/fsleyes.html')
# All other exit codes
else:
print(f'Exit code {status} occurred. Please report this issue on SCT GitHub: '
f'https://github.com/spinalcordtoolbox/spinalcordtoolbox/issues')
if complete_test:
print(output)
print('')
sys.exit(e + install_software)
| def main(argv=None):
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = complete_test = arguments.complete
set_loglevel(verbose=verbose)
print("SCT info:")
print("- version: {}".format(__version__))
print("- path: {0}".format(__sct_dir__))
# initialization
install_software = 0
e = 0
os_running = 'not identified'
# complete test
if complete_test:
print(run_proc('date', verbose))
print(run_proc('whoami', verbose))
print(run_proc('pwd', verbose))
bash_profile = os.path.expanduser("~/.bash_profile")
if os.path.isfile(bash_profile):
with io.open(bash_profile, "r") as f:
print(f.read())
bashrc = os.path.expanduser("~/.bashrc")
if os.path.isfile(bashrc):
with io.open(bashrc, "r") as f:
print(f.read())
# check OS
platform_running = sys.platform
if platform_running.find('darwin') != -1:
os_running = 'osx'
elif platform_running.find('linux') != -1:
os_running = 'linux'
print('OS: ' + os_running + ' (' + platform.platform() + ')')
print('CPU cores: Available: {}, Used by ITK functions: {}'.format(psutil.cpu_count(), int(os.getenv('ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS', 0))))
ram = psutil.virtual_memory()
factor_MB = 1024 * 1024
print('RAM: Total: {}MB, Used: {}MB, Available: {}MB'.format(ram.total // factor_MB, ram.used // factor_MB, ram.available // factor_MB))
if arguments.short:
sys.exit()
# check if Python path is within SCT path
print_line('Check Python executable')
path_python = sys.executable
if __sct_dir__ in path_python:
print_ok()
print(' Using bundled python {} at {}'.format(sys.version, path_python))
else:
print_warning()
print(' Using system python which is unsupported: {}'.format(path_python))
# check if data folder is empty
print_line('Check if data are installed')
if os.path.isdir(__data_dir__):
print_ok()
else:
print_fail()
for dep_pkg, dep_ver_spec in get_dependencies():
if dep_ver_spec is None:
print_line('Check if %s is installed' % (dep_pkg))
else:
print_line('Check if %s (%s) is installed' % (dep_pkg, dep_ver_spec))
try:
module_name, suppress_stderr = resolve_module(dep_pkg)
module = module_import(module_name, suppress_stderr)
version = get_version(module)
if dep_ver_spec is not None and version is not None and dep_ver_spec != version:
print_warning(more=(" (%s != %s mandated version))" % (version, dep_ver_spec)))
elif version is not None:
print_ok(more=(" (%s)" % version))
else:
print_ok()
except Exception as err:
print_fail()
print(f"An error occured while importing module {dep_pkg} -> {err}")
print(f"Full traceback: {traceback.format_exc()}")
install_software = 1
print_line('Check if spinalcordtoolbox is installed')
try:
importlib.import_module('spinalcordtoolbox')
print_ok()
except ImportError:
print_fail("Unable to import spinalcordtoolbox module.")
install_software = 1
# Check ANTs integrity
print_line('Check ANTs compatibility with OS ')
cmd = 'isct_test_ants'
status, output = run_proc(cmd, verbose=0, raise_exception=False)
if status == 0:
print_ok()
else:
print_fail()
print(output)
e = 1
if complete_test:
print('>> ' + cmd)
print((status, output), '\n')
# check PropSeg compatibility with OS
print_line('Check PropSeg compatibility with OS ')
status, output = run_proc('isct_propseg', verbose=0, raise_exception=False, is_sct_binary=True)
if status in (0, 1):
print_ok()
else:
print_fail()
print(output)
e = 1
if complete_test:
print((status, output), '\n')
print_line('Check if figure can be opened with matplotlib')
try:
import matplotlib
import matplotlib.pyplot as plt
# If matplotlib is using a GUI backend, the default 'show()` function will be overridden
# See: https://github.com/matplotlib/matplotlib/issues/20281#issuecomment-846467732
fig = plt.figure()
if getattr(fig.canvas.manager.show, "__func__", None) != matplotlib.backend_bases.FigureManagerBase.show:
print_ok(f" (Using GUI backend: '{matplotlib.get_backend()}')")
else:
print_fail(f" (Using non-GUI backend '{matplotlib.get_backend()}')")
except Exception as err:
print_fail()
print(err)
print_line('Check if figure can be opened with PyQt')
if sys.platform == "linux" and 'DISPLAY' not in os.environ:
print_fail(" ($DISPLAY not set on X11-supporting system)")
else:
try:
from PyQt5.QtWidgets import QApplication, QLabel
_ = QApplication([])
label = QLabel('Hello World!')
label.show()
label.close()
print_ok()
except Exception as err:
print_fail()
print(err)
# Check version of FSLeyes
print_line('Check FSLeyes version')
cmd = 'fsleyes --version'
status, output = run_proc(cmd, verbose=0, raise_exception=False)
# Exit code 0 - command has run successfully
if status == 0:
# Fetch only version number (full output of 'fsleyes --version' is 'fsleyes/FSLeyes version 0.34.2')
fsleyes_version = output.split()[2]
print_ok(more=(" (%s)" % fsleyes_version))
# Exit code 126 - Command invoked cannot execute (permission problem or command is not an executable)
elif status == 126:
print('Command not executable. Please check permissions of fsleyes command.')
# Exit code 127 - Command not found (possible problem with $PATH)
elif status == 127:
print('Command not found. If you installed FSLeyes as part of FSL package, please check that FSL is included '
'in $PATH variable. If you installed FSLeyes using conda environment, make sure that the environment is '
'activated. If you do not have FSLeyes installed, consider its installation to easily visualize '
'processing outputs and/or to use SCT within FSLeyes. More info at: '
'https://spinalcordtoolbox.com/en/latest/user_section/fsleyes.html')
# All other exit codes
else:
print(f'Exit code {status} occurred. Please report this issue on SCT GitHub: '
f'https://github.com/spinalcordtoolbox/spinalcordtoolbox/issues')
if complete_test:
print(output)
print('')
sys.exit(e + install_software)
|
45,079 | def stream_flow_run_logs(
flow_run_id: str, max_duration: timedelta = timedelta(hours=12)
) -> None:
"""
Basic wrapper for `watch_flow_run` to print the logs of the run
Args:
- flow_run_id: The flow run to stream logs from
- max_duration: Duration to wait for flow run to complete. Defaults to 12 hours
"""
for log in watch_flow_run(flow_run_id, max_duration=max_duration):
level_name = logging.getLevelName(log.level)
timestamp = log.timestamp.in_tz(tz="local")
# Uses `print` instead of the logger to prevent duplicate timestamps
print(
f"{timestamp:%H:%M:%S} | {level_name:<7} | {log.message}",
)
| def stream_flow_run_logs(
flow_run_id: str, max_duration: timedelta = timedelta(hours=12)
) -> None:
"""
Basic wrapper for `watch_flow_run` to print the logs of the run
Args:
- flow_run_id: The flow run to stream logs from
- max_duration: Duration to wait for flow run to complete. Defaults to 12 hours.
"""
for log in watch_flow_run(flow_run_id, max_duration=max_duration):
level_name = logging.getLevelName(log.level)
timestamp = log.timestamp.in_tz(tz="local")
# Uses `print` instead of the logger to prevent duplicate timestamps
print(
f"{timestamp:%H:%M:%S} | {level_name:<7} | {log.message}",
)
|
27,718 | def _raw_skip_reason(rep):
assert rep.skipped
assert len(rep.longrepr) == 3
_, _, reason = rep.longrepr
if reason.startswith("Skipped: "):
reason = reason[9:]
return reason
| def _get_raw_skip_reason(rep):
assert rep.skipped
assert len(rep.longrepr) == 3
_, _, reason = rep.longrepr
if reason.startswith("Skipped: "):
reason = reason[9:]
return reason
|
30,316 | def return_warning(message, exit=True, warning='', outputs=None, ignore_auto_extract=False):
"""
Returns error entry with given message and exits the script
:type message: ``str``
:param message: The message to return in the entry (required)
:type exit: ``bool``
:param exit: Determines if the program will terminate after the command. Default is False.
:type warning: ``str``
:param warning: The raw warning message to log (optional)
:type outputs: ``dict or None``
:param outputs: the outputs that will be returned to playbook/investigation context (optional)
:type ignore_auto_extract: ``bool``
:param ignore_auto_extract: Determines if the war-room entry will be auto enriched. Default is false.
:return: Error entry object
:rtype: ``dict``
"""
LOG(message)
if warning:
LOG(warning)
LOG.print_log()
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'IgnoreAutoExtract': ignore_auto_extract,
'Contents': str(message),
"EntryContext": outputs
})
if exit:
sys.exit(0)
| def return_warning(message, exit=False, warning='', outputs=None, ignore_auto_extract=False):
"""
Returns error entry with given message and exits the script
:type message: ``str``
:param message: The message to return in the entry (required)
:type exit: ``bool``
:param exit: Determines if the program will terminate after the command. Default is False.
:type warning: ``str``
:param warning: The raw warning message to log (optional)
:type outputs: ``dict or None``
:param outputs: the outputs that will be returned to playbook/investigation context (optional)
:type ignore_auto_extract: ``bool``
:param ignore_auto_extract: Determines if the war-room entry will be auto enriched. Default is false.
:return: Error entry object
:rtype: ``dict``
"""
LOG(message)
if warning:
LOG(warning)
LOG.print_log()
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'IgnoreAutoExtract': ignore_auto_extract,
'Contents': str(message),
"EntryContext": outputs
})
if exit:
sys.exit(0)
|
831 | def drm(n):
"""Return the multiplicative digital root of integer n which is
a value < 10 obtained by mutiplying the digits of n together
(and repeating with the product) until a single digit is obtained.
e.g. 345 -> 3*4*5 = 60 -> 6*0 = 0.
Examples
========
>>> from sympy.ntheory.factor_ import drm
>>> drm(345)
0
"""
m = abs(as_int(n))
while m > 9:
p = 1
while m:
m, d = divmod(m, 10)
p *= d
m = p
return m
| def drm(n):
"""Return the multiplicative digital root of integer n which is
a value < 10 obtained by multiplying the digits of n together
(and repeating with the product) until a single digit is obtained.
e.g. 345 -> 3*4*5 = 60 -> 6*0 = 0.
Examples
========
>>> from sympy.ntheory.factor_ import drm
>>> drm(345)
0
"""
m = abs(as_int(n))
while m > 9:
p = 1
while m:
m, d = divmod(m, 10)
p *= d
m = p
return m
|
42,040 | def test_multiprocess_with_progbar(
capsys: _pytest.capture.CaptureFixture, storage_url: str
) -> None:
with capsys.disabled():
n_workers = 8
study_name = _STUDY_NAME
optuna.create_study(storage=storage_url, study_name=study_name)
with Pool(n_workers) as pool:
ret = pool.map(run_optimize, [(study_name, storage_url, 1, True)] * n_workers)
comp_cnt = 0
for i in range(n_workers):
if "20/20" in ret[i] and "100%" in ret[i]:
comp_cnt += 1
assert comp_cnt == n_workers
study = optuna.load_study(study_name=study_name, storage=storage_url)
trials = study.trials
assert len(trials) == n_workers * 20
_check_trials(trials)
| def test_multiprocess_with_progbar(
capsys: _pytest.capture.CaptureFixture, storage_url: str, n_jobs: int
) -> None:
with capsys.disabled():
n_workers = 8
study_name = _STUDY_NAME
optuna.create_study(storage=storage_url, study_name=study_name)
with Pool(n_workers) as pool:
ret = pool.map(run_optimize, [(study_name, storage_url, 1, True)] * n_workers)
comp_cnt = 0
for i in range(n_workers):
if "20/20" in ret[i] and "100%" in ret[i]:
comp_cnt += 1
assert comp_cnt == n_workers
study = optuna.load_study(study_name=study_name, storage=storage_url)
trials = study.trials
assert len(trials) == n_workers * 20
_check_trials(trials)
|
58,094 | def get_analysis_iocs_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
try:
analysis = FileAnalysis.from_analysis_id(analysis_id, api=intezer_api)
except HTTPError as error:
if error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result(analysis_id=str(analysis_id))
raise
if not analysis:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
iocs = analysis.iocs
readable_output = ''
if iocs:
network_iocs = iocs.get('network')
if network_iocs:
readable_output += tableToMarkdown('Network IOCs', network_iocs)
files_iocs = iocs.get('files')
if files_iocs:
readable_output += tableToMarkdown('Files IOCs', files_iocs)
else:
readable_output = 'No IOCs found'
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'IOCs': iocs
}
}
return CommandResults(
readable_output=readable_output,
outputs=context_json,
raw_response=iocs
)
| def get_analysis_iocs_command(intezer_api: IntezerApi, args: dict) -> CommandResults:
analysis_id = args.get('analysis_id')
try:
analysis = FileAnalysis.from_analysis_id(analysis_id, api=intezer_api)
except HTTPError as error:
if error.response.status_code == HTTPStatus.CONFLICT:
return _get_analysis_running_result(analysis_id=str(analysis_id))
raise
if not analysis:
return _get_missing_analysis_result(analysis_id=str(analysis_id))
iocs = analysis.iocs
readable_output = ''
if iocs:
if network_iocs := iocs.get('network'):
readable_output += tableToMarkdown('Network IOCs', network_iocs)
if files_iocs := iocs.get('files'):
readable_output += tableToMarkdown('Files IOCs', files_iocs)
else:
readable_output = 'No IOCs found'
context_json = {
'Intezer.Analysis(obj.ID == val.ID)': {
'ID': analysis_id,
'IOCs': iocs
}
}
return CommandResults(
readable_output=readable_output,
outputs=context_json,
raw_response=iocs
)
|
46,090 | def save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
| def _save_response_content(response, destination):
CHUNK_SIZE = 32768
with open(destination, "wb") as f:
for chunk in response.iter_content(CHUNK_SIZE):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
|
26,468 | def get_new_command(command):
missing_module = re.findall(MISSING_MODULE, command.output)[0]
return shell.and_("pip install {}".format(missing_module), command.script)
| def get_new_command(command):
missing_module = re.findall(MISSING_MODULE, command.output)[0]
return shell.and_(u"pip install {}".format(missing_module), command.script)
|
44,845 | def download_artifacts(
artifact_uri: Optional[str] = None,
run_id: Optional[str] = None,
artifact_path: Optional[str] = None,
dst_path: Optional[str] = None,
tracking_uri: Optional[str] = None,
) -> str:
"""
Download an artifact file or directory to a local directory.
:param artifact_uri: URI pointing to the artifacts, such as
``"runs:/500cf58bee2b40a4a82861cc31a617b1/my_model.pkl"``,
``"models:/my_model/Production"``, or ``"s3://my_bucket/my/file.txt"``.
Exactly one of ``artifact_uri`` or ``run_id`` must be specified.
:param run_id: ID of the MLflow Run containing the artifacts. Exactly one of ``run_id`` or
``artifact_uri`` must be specified.
:param artifact_path: (For use with ``run_id``) If specified, a path relative to the MLflow
Run's root directory containing the artifacts to download.
:param dst_path: Path of the local filesystem destination directory to which to download the
specified artifacts. If the directory does not exist, it is created. If
unspecified, the artifacts are downloaded to a new uniquely-named directory on
the local filesystem, unless the artifacts already exist on the local
filesystem, in which case their local path is returned directly.
:param tracking_uri: The tracking URI to be used when downloading artifacts.
:return: The location of the artifact file or directory on the local filesystem.
"""
if (run_id, artifact_uri).count(None) != 1:
raise MlflowException(
message="Exactly one of `run_id` or `artifact_uri` must be specified",
error_code=INVALID_PARAMETER_VALUE,
)
elif artifact_uri is not None and artifact_path is not None:
raise MlflowException(
message="`artifact_path` cannot be specified if `artifact_uri` is specified",
error_code=INVALID_PARAMETER_VALUE,
)
if dst_path is not None:
pathlib.Path(dst_path).mkdir(exist_ok=True, parents=True)
if artifact_uri is not None:
return _download_artifact_from_uri(artifact_uri, output_path=dst_path)
artifact_path = artifact_path if artifact_path is not None else ""
if tracking_uri is not None:
with _use_tracking_uri(tracking_uri):
store = _get_store()
else:
store = _get_store()
artifact_uri = store.get_run(run_id).info.artifact_uri
artifact_repo = get_artifact_repository(artifact_uri)
artifact_location = artifact_repo.download_artifacts(artifact_path, dst_path=dst_path)
return artifact_location
| def download_artifacts(
artifact_uri: Optional[str] = None,
run_id: Optional[str] = None,
artifact_path: Optional[str] = None,
dst_path: Optional[str] = None,
tracking_uri: Optional[str] = None,
) -> str:
"""
Download an artifact file or directory to a local directory.
:param artifact_uri: URI pointing to the artifacts, such as
``"runs:/500cf58bee2b40a4a82861cc31a617b1/my_model.pkl"``,
``"models:/my_model/Production"``, or ``"s3://my_bucket/my/file.txt"``.
Exactly one of ``artifact_uri`` or ``run_id`` must be specified.
:param run_id: ID of the MLflow Run containing the artifacts. Exactly one of ``run_id`` or
``artifact_uri`` must be specified.
:param artifact_path: (For use with ``run_id``) If specified, a path relative to the MLflow
Run's root directory containing the artifacts to download.
:param dst_path: Path of the local filesystem destination directory to which to download the
specified artifacts. If the directory does not exist, it is created. If
unspecified, the artifacts are downloaded to a new uniquely-named directory on
the local filesystem, unless the artifacts already exist on the local
filesystem, in which case their local path is returned directly.
:param tracking_uri: The tracking URI to be used when downloading artifacts.
:return: The location of the artifact file or directory on the local filesystem.
"""
if (run_id, artifact_uri).count(None) != 1:
raise MlflowException(
message="Exactly one of `run_id` or `artifact_uri` must be specified",
error_code=INVALID_PARAMETER_VALUE,
)
elif artifact_uri is not None and artifact_path is not None:
raise MlflowException(
message="`artifact_path` cannot be specified if `artifact_uri` is specified",
error_code=INVALID_PARAMETER_VALUE,
)
if dst_path is not None:
pathlib.Path(dst_path).mkdir(exist_ok=True, parents=True)
if artifact_uri is not None:
return _download_artifact_from_uri(artifact_uri, output_path=dst_path)
artifact_path = artifact_path if artifact_path is not None else ""
store = _get_store(tracking_uri)
artifact_uri = store.get_run(run_id).info.artifact_uri
artifact_repo = get_artifact_repository(artifact_uri)
artifact_location = artifact_repo.download_artifacts(artifact_path, dst_path=dst_path)
return artifact_location
|
31,081 | def get_file_type(indicator):
"""
The function gets a file indicator data and returns it's subtype.
"""
indicator_type = indicator.get('subtype', '')
return indicator_type
| def get_file_type(file_indicator):
"""
The function gets a file indicator data and returns it's subtype.
"""
indicator_type = indicator.get('subtype', '')
return indicator_type
|
44,067 | def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/quotient graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
| def fragment_graph(graph: MultiDiGraph) -> Tuple[Tuple[MultiDiGraph], MultiDiGraph]:
"""
Fragments a cut graph into a collection of subgraphs as well as returning
the communication/[quotient](https://en.wikipedia.org/wiki/Quotient_graph) graph.
Args:
graph (MultiDiGraph): directed multigraph containing measure and prepare
nodes at cut locations
Returns:
subgraphs, communication_graph (Tuple[Tuple[MultiDiGraph], MultiDiGraph]):
the subgraphs of the cut graph and the communication graph where each
node represents a fragment and edges denote the flow of qubits between
fragments
**Example**
Consider the following circuit with the manually-placed wire cuts:
.. code-block:: python
from pennylane.transforms import qcut
wire_cut_0 = qml.WireCut(wires=0)
wire_cut_1 = qml.WireCut(wires=1)
multi_wire_cut = qml.WireCut(wires=[0, 1])
with qml.tape.QuantumTape() as tape:
qml.RX(0.4, wires=0)
qml.apply(wire_cut_0)
qml.RY(0.5, wires=0)
qml.apply(wire_cut_1)
qml.CNOT(wires=[0, 1])
qml.apply(multi_wire_cut)
qml.RZ(0.6, wires=1)
qml.expval(qml.PauliZ(0))
We can find the corresponding graph, remove all the wire cut nodes, and
find the subgraphs and communication graph by using:
>>> graph = qcut.tape_to_graph(tape)
>>> qcut.replace_wire_cut_nodes(graph)
>>> qcut.fragment_graph(graph)
((<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311940>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b2311c10>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e2820>,
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e27f0>),
<networkx.classes.multidigraph.MultiDiGraph object at 0x7fb3b23e26a0>)
"""
edges = list(graph.edges)
cut_edges = []
for node1, node2, _ in edges:
if isinstance(node1, MeasureNode):
assert isinstance(node2, PrepareNode)
cut_edges.append((node1, node2))
graph.remove_edge(node1, node2)
subgraph_nodes = weakly_connected_components(graph)
subgraphs = tuple(graph.subgraph(n) for n in subgraph_nodes)
communication_graph = MultiDiGraph()
communication_graph.add_nodes_from(range(len(subgraphs)))
for node1, node2 in cut_edges:
for i, subgraph in enumerate(subgraphs):
if subgraph.has_node(node1):
start_fragment = i
if subgraph.has_node(node2):
end_fragment = i
communication_graph.add_edge(start_fragment, end_fragment, pair=(node1, node2))
return subgraphs, communication_graph
|
21,211 | def update_requirements(bench_path='.'):
print('Updating Python libraries...')
pip = os.path.join(bench_path, 'env', 'bin', 'pip')
exec_cmd("{pip} install --upgrade pip".format(pip=pip))
apps_dir = os.path.join(bench_path, 'apps')
# Update bench requirements
bench_req_file = os.path.join(os.path.dirname(bench.__path__[0]), 'requirements.txt')
install_requirements(pip, bench_req_file)
from .app import install_app
for app in os.listdir(apps_dir):
install_app(app, bench_path=bench_path)
| def update_requirements(bench_path='.'):
print('Updating Python libraries...')
pip = os.path.join(bench_path, 'env', 'bin', 'pip')
exec_cmd("{pip} install --upgrade pip".format(pip=pip))
apps_dir = os.path.join(bench_path, 'apps')
# Update bench requirements
bench_req_file = os.path.join(os.path.dirname(bench.__path__[0]), 'requirements.txt')
install_requirements(pip, bench_req_file)
from bench.app import install_app
for app in os.listdir(apps_dir):
install_app(app, bench_path=bench_path)
|
44,081 | def gaussian_moment(la, lb, ra, rb, alpha, beta, e, rc):
r"""Compute one-dimensional multipole moment integral for two primitive Gaussian functions.
The multipole moment integral in one dimension is defined as
.. math::
S_{ij}^e = \left \langle G_i | q_C^e | G_j \right \rangle,
where :math:`G` is a Gaussian function at dimension :math:`q = x, y, z` of the Cartesian
coordinates system, :math:`e` is the multipole moment order and :math:`C` is the origin of the
Cartesian coordinates. The integrals can be evauated as
[`Helgaker (1995) p803 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
S_{ij}^e = \sum_{t=0}^{\mathrm{min}(i+j, \ e)} E_t^{ij} M_t^e,
where :math:`E` and :math:`M` are the Hermite Gaussian expansion coefficient and the Hermite
moment integral, respectively, that can be computed recursively.
Args:
la (integer): angular momentum for the first Gaussian function
lb (integer): angular momentum for the second Gaussian function
ra (float): position of the first Gaussian function
rb (float): position of the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
e (integer): order of the multipole moment
rc (array[float]): distance between the center of the Hermite Gaussian and the origin
Returns:
array[float]: one-dimensional multipole moment integral between primitive Gaussian functions
**Example**
>>> la, lb = 0, 0
>>> ra, rb = np.array([2.0]), np.array([2.0])
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> e = 1
>>> rc = 1.5
>>> gaussian_moment(la, lb, ra, rb, alpha, beta, e, rc)
array([1.0157925])
"""
s = 0.0
for t in range(min(la + lb + 1, e + 1)):
s = s + expansion(la, lb, ra, rb, alpha, beta, t) * _hermite_moment(alpha, beta, t, e, rc)
return s
| def gaussian_moment(la, lb, ra, rb, alpha, beta, e, rc):
r"""Compute the one-dimensional multipole moment integral for two primitive Gaussian functions.
The multipole moment integral in one dimension is defined as
.. math::
S_{ij}^e = \left \langle G_i | q_C^e | G_j \right \rangle,
where :math:`G` is a Gaussian function at dimension :math:`q = x, y, z` of the Cartesian
coordinates system, :math:`e` is the multipole moment order and :math:`C` is the origin of the
Cartesian coordinates. The integrals can be evauated as
[`Helgaker (1995) p803 <https://www.worldscientific.com/doi/abs/10.1142/9789812832115_0001>`_]
.. math::
S_{ij}^e = \sum_{t=0}^{\mathrm{min}(i+j, \ e)} E_t^{ij} M_t^e,
where :math:`E` and :math:`M` are the Hermite Gaussian expansion coefficient and the Hermite
moment integral, respectively, that can be computed recursively.
Args:
la (integer): angular momentum for the first Gaussian function
lb (integer): angular momentum for the second Gaussian function
ra (float): position of the first Gaussian function
rb (float): position of the second Gaussian function
alpha (array[float]): exponent of the first Gaussian function
beta (array[float]): exponent of the second Gaussian function
e (integer): order of the multipole moment
rc (array[float]): distance between the center of the Hermite Gaussian and the origin
Returns:
array[float]: one-dimensional multipole moment integral between primitive Gaussian functions
**Example**
>>> la, lb = 0, 0
>>> ra, rb = np.array([2.0]), np.array([2.0])
>>> alpha = np.array([3.42525091])
>>> beta = np.array([3.42525091])
>>> e = 1
>>> rc = 1.5
>>> gaussian_moment(la, lb, ra, rb, alpha, beta, e, rc)
array([1.0157925])
"""
s = 0.0
for t in range(min(la + lb + 1, e + 1)):
s = s + expansion(la, lb, ra, rb, alpha, beta, t) * _hermite_moment(alpha, beta, t, e, rc)
return s
|
35,062 | def hexagon(cpu_ver="v66", **kwargs):
"""Returns a Hexagon target.
Parameters
----------
cpu_ver : str (default: "v66")
CPU version used for code generation. Not all allowed cpu str
will be valid, LLVM will throw an error.
Recognized keyword parameters
-----------------------------
hvx : int (default: 128)
Size of HVX vector in bytes. Value of 0 disables HVX codegen.
sim_options : str or list of str (default: None)
User defined sim arguments. CPU version defaults to cpu_ver.
Otherwise, separate versions are used for codegen and sim. Not
all allowed cpu strings will be valid, simulator will throw an
error if invalid. Does not affect codegen.
llvm_options : str or list of str (default: None)
User defined compiler arguments.
"""
# Some of the target parameters correspond to target kind attributes
# listed in src/target/target_kind.cc. For those parameters, their
# names follow the attribute names with the exception of '_' being used
# in place of '-'.
# Example compiler arguments
# llvm -mtriple=hexagon -mcpu=hexagonv66 -mattr=+hvxv66,+hvx-length128b
# Check for valid codegen cpu
valid_hex = ["v60", "v62", "v65", "v66", "v67", "v67t", "v68"]
try:
cpu_ver = cpu_ver[cpu_ver.index("v") :].lower()
assert cpu_ver in valid_hex
except:
msg = "{} is not a valid Hexagon version\nvalid versions include {}"
raise ValueError(msg.format(cpu_ver, valid_hex)) from None
args = {
"hvx": 128,
"sim_options": None,
"llvm_options": None,
}
args.update(kwargs)
# Warn about obsolete parameter names.
if args.get("sim_args"):
msg = "The keyword parameter 'sim_args' is deprecated, use 'sim_options' instead"
warnings.warn(msg, stacklevel=2)
args.update({"sim_options": args["sim_args"]})
if args.get("llvm_args"):
msg = "The keyword parameter 'llvm_args' is deprecated, use 'llvm_options' instead"
warnings.warn(msg, stacklevel=2)
args.update({"llvm_options": args["llvm_args"]})
# LLVM target string
def create_llvm_target(cpu_ver, args):
""" Create LLVM target string. """
target = " -mtriple=hexagon"
mcpu = " -mcpu=hexagon" + cpu_ver
# Process the options that affect target features and return the
# target feature string.
def create_target_features(args):
tfs = []
if args["hvx"] > 0:
valid_hvx = [0, 64, 128]
if not args["hvx"] in valid_hvx:
raise ValueError("Invalid hvx value, should be one of " + str(valid_hvx))
tfs += ["+hvx" + cpu_ver, "+hvx-length" + str(args["hvx"]) + "b"]
else:
tfs += ["-hvx"]
return "-mattr=" + ",".join(tfs) if tfs else ""
return target + mcpu + " " + create_target_features(args)
# Simulator options string
def create_sim_options(cpu_ver, args):
""" Create simulator option string. """
def validate_hvx_length(codegen_hvx, sim_options):
if sim_options and "--hvx_length" in sim_options:
# If --hvx_length was specified, check HVX length of sim
# vs codegen
i = sim_options.index("hvx_length") + len("hvx_length") + 1
sim_hvx = sim_options[i : i + 3]
if sim_hvx != str(codegen_hvx):
msg = "sim hvx {} and codegen hvx {} mismatch!".format(sim_hvx, codegen_hvx)
# Set the stacklevel to the tvm.target.hexagon() call.
warnings.warn(msg, stacklevel=4)
elif codegen_hvx != 0:
# If --hvx_length was not given, add it if HVX is enabled
sim_options = sim_options + " " if isinstance(sim_options, str) else ""
sim_options += "--hvx_length " + str(codegen_hvx)
return sim_options or ""
hvx = args["hvx"]
sim_options = args["sim_options"]
if not sim_options:
return cpu_ver + " " + validate_hvx_length(hvx, sim_options)
sim_cpu = cpu_ver + " "
# Add user defined args
if isinstance(sim_options, list):
sim_options = " ".join(sim_options)
# Check for supplied sim cpu version
if "v6" in sim_options:
sim_cpu = ""
# Regex match for allowed cpus
valid_cpu_str_regex = (
r"(?P<pre>--.*\s)?(--m)?"
+ r"(?P<base_version>v6[25678])(?P<sub_version>[a-z])?"
+ r"(?P<l2_size>_[0-9]+)?(?P<rev>_rev[0-9])?\s?(?P<post>--.*)?"
)
m = re.match(valid_cpu_str_regex, sim_options.lower())
if not m:
raise ValueError('Invalid simulator argument string "{}"'.format(sim_options))
# Parse options into correct order
cpu_attr = {x: str(m.groupdict()[x] or "") for x in m.groupdict()}
sim_options = (
cpu_attr["base_version"]
+ cpu_attr["sub_version"]
+ cpu_attr["l2_size"]
+ cpu_attr["rev"]
+ " "
+ cpu_attr["pre"]
+ cpu_attr["post"]
)
return sim_cpu + " " + validate_hvx_length(hvx, sim_options)
# LLVM options string
def create_llvm_options(cpu_ver, args): # pylint: disable=unused-argument
""" Create LLVM options string. """
llvm_options = args["llvm_options"]
# TVM's option parser doesn't allow '=' in values, but '=' can
# appear in LLVM flags. Replace it with '@', since it's unlikely
# that '@' will be used in another context.
if llvm_options is None or len(llvm_options.replace(" ", "")) == 0:
return ""
args = [s.replace("=", "@") for s in llvm_options.split()]
return "--llvm-options=" + ",".join(args)
# Sim args
os.environ["HEXAGON_SIM_ARGS"] = create_sim_options(cpu_ver, args)
target_str = create_llvm_target(cpu_ver, args)
llvm_str = create_llvm_options(cpu_ver, args)
args_list = target_str.split() + llvm_str.split()
return Target(" ".join(["hexagon"] + args_list))
| def hexagon(cpu_ver="v66", *, sim_options=None, llvm_options=None, hvx=128, sim_args=None, llvm_args=None):
"""Returns a Hexagon target.
Parameters
----------
cpu_ver : str (default: "v66")
CPU version used for code generation. Not all allowed cpu str
will be valid, LLVM will throw an error.
Recognized keyword parameters
-----------------------------
hvx : int (default: 128)
Size of HVX vector in bytes. Value of 0 disables HVX codegen.
sim_options : str or list of str (default: None)
User defined sim arguments. CPU version defaults to cpu_ver.
Otherwise, separate versions are used for codegen and sim. Not
all allowed cpu strings will be valid, simulator will throw an
error if invalid. Does not affect codegen.
llvm_options : str or list of str (default: None)
User defined compiler arguments.
"""
# Some of the target parameters correspond to target kind attributes
# listed in src/target/target_kind.cc. For those parameters, their
# names follow the attribute names with the exception of '_' being used
# in place of '-'.
# Example compiler arguments
# llvm -mtriple=hexagon -mcpu=hexagonv66 -mattr=+hvxv66,+hvx-length128b
# Check for valid codegen cpu
valid_hex = ["v60", "v62", "v65", "v66", "v67", "v67t", "v68"]
try:
cpu_ver = cpu_ver[cpu_ver.index("v") :].lower()
assert cpu_ver in valid_hex
except:
msg = "{} is not a valid Hexagon version\nvalid versions include {}"
raise ValueError(msg.format(cpu_ver, valid_hex)) from None
args = {
"hvx": 128,
"sim_options": None,
"llvm_options": None,
}
args.update(kwargs)
# Warn about obsolete parameter names.
if args.get("sim_args"):
msg = "The keyword parameter 'sim_args' is deprecated, use 'sim_options' instead"
warnings.warn(msg, stacklevel=2)
args.update({"sim_options": args["sim_args"]})
if args.get("llvm_args"):
msg = "The keyword parameter 'llvm_args' is deprecated, use 'llvm_options' instead"
warnings.warn(msg, stacklevel=2)
args.update({"llvm_options": args["llvm_args"]})
# LLVM target string
def create_llvm_target(cpu_ver, args):
""" Create LLVM target string. """
target = " -mtriple=hexagon"
mcpu = " -mcpu=hexagon" + cpu_ver
# Process the options that affect target features and return the
# target feature string.
def create_target_features(args):
tfs = []
if args["hvx"] > 0:
valid_hvx = [0, 64, 128]
if not args["hvx"] in valid_hvx:
raise ValueError("Invalid hvx value, should be one of " + str(valid_hvx))
tfs += ["+hvx" + cpu_ver, "+hvx-length" + str(args["hvx"]) + "b"]
else:
tfs += ["-hvx"]
return "-mattr=" + ",".join(tfs) if tfs else ""
return target + mcpu + " " + create_target_features(args)
# Simulator options string
def create_sim_options(cpu_ver, args):
""" Create simulator option string. """
def validate_hvx_length(codegen_hvx, sim_options):
if sim_options and "--hvx_length" in sim_options:
# If --hvx_length was specified, check HVX length of sim
# vs codegen
i = sim_options.index("hvx_length") + len("hvx_length") + 1
sim_hvx = sim_options[i : i + 3]
if sim_hvx != str(codegen_hvx):
msg = "sim hvx {} and codegen hvx {} mismatch!".format(sim_hvx, codegen_hvx)
# Set the stacklevel to the tvm.target.hexagon() call.
warnings.warn(msg, stacklevel=4)
elif codegen_hvx != 0:
# If --hvx_length was not given, add it if HVX is enabled
sim_options = sim_options + " " if isinstance(sim_options, str) else ""
sim_options += "--hvx_length " + str(codegen_hvx)
return sim_options or ""
hvx = args["hvx"]
sim_options = args["sim_options"]
if not sim_options:
return cpu_ver + " " + validate_hvx_length(hvx, sim_options)
sim_cpu = cpu_ver + " "
# Add user defined args
if isinstance(sim_options, list):
sim_options = " ".join(sim_options)
# Check for supplied sim cpu version
if "v6" in sim_options:
sim_cpu = ""
# Regex match for allowed cpus
valid_cpu_str_regex = (
r"(?P<pre>--.*\s)?(--m)?"
+ r"(?P<base_version>v6[25678])(?P<sub_version>[a-z])?"
+ r"(?P<l2_size>_[0-9]+)?(?P<rev>_rev[0-9])?\s?(?P<post>--.*)?"
)
m = re.match(valid_cpu_str_regex, sim_options.lower())
if not m:
raise ValueError('Invalid simulator argument string "{}"'.format(sim_options))
# Parse options into correct order
cpu_attr = {x: str(m.groupdict()[x] or "") for x in m.groupdict()}
sim_options = (
cpu_attr["base_version"]
+ cpu_attr["sub_version"]
+ cpu_attr["l2_size"]
+ cpu_attr["rev"]
+ " "
+ cpu_attr["pre"]
+ cpu_attr["post"]
)
return sim_cpu + " " + validate_hvx_length(hvx, sim_options)
# LLVM options string
def create_llvm_options(cpu_ver, args): # pylint: disable=unused-argument
""" Create LLVM options string. """
llvm_options = args["llvm_options"]
# TVM's option parser doesn't allow '=' in values, but '=' can
# appear in LLVM flags. Replace it with '@', since it's unlikely
# that '@' will be used in another context.
if llvm_options is None or len(llvm_options.replace(" ", "")) == 0:
return ""
args = [s.replace("=", "@") for s in llvm_options.split()]
return "--llvm-options=" + ",".join(args)
# Sim args
os.environ["HEXAGON_SIM_ARGS"] = create_sim_options(cpu_ver, args)
target_str = create_llvm_target(cpu_ver, args)
llvm_str = create_llvm_options(cpu_ver, args)
args_list = target_str.split() + llvm_str.split()
return Target(" ".join(["hexagon"] + args_list))
|
31,571 | def search_logs_command(client, args):
query = args.get('query')
time_range = args.get('time_range') if args.get('time_range') else 'Last 5 minutes'
limit = args.get('limit') if args.get('limit') else 100
repos = argToList(args.get('repos')) if args.get('repos') else []
if limit:
try:
limit = int(limit)
except ValueError:
raise DemistoException(f"The provided argument '{limit}' for limit is not a valid integer.")
result = client.get_search_id(query, time_range, limit, repos)
if not result.get('success'):
raise DemistoException(result['message'])
search_id = result.get('search_id')
search_result = client.get_search_results(search_id)
if not search_result.get('success'):
raise DemistoException(search_result['message'])
rows = search_result.get('rows', [])
display_title = f"Found {len(rows)} logs"
markdown = tableToMarkdown(display_title, rows, headers=None)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.SearchLogs',
outputs=rows
)
| def search_logs_command(client, args):
query = args.get('query')
time_range = args.get('time_range') if args.get('time_range') else 'Last 5 minutes'
limit = args.get('limit', '100')
repos = argToList(args.get('repos')) if args.get('repos') else []
if limit:
try:
limit = int(limit)
except ValueError:
raise DemistoException(f"The provided argument '{limit}' for limit is not a valid integer.")
result = client.get_search_id(query, time_range, limit, repos)
if not result.get('success'):
raise DemistoException(result['message'])
search_id = result.get('search_id')
search_result = client.get_search_results(search_id)
if not search_result.get('success'):
raise DemistoException(search_result['message'])
rows = search_result.get('rows', [])
display_title = f"Found {len(rows)} logs"
markdown = tableToMarkdown(display_title, rows, headers=None)
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.SearchLogs',
outputs=rows
)
|
12,127 | def _block2event(block, seed_map, id_default, ph2comp):
"""
Read HypoDD event block
"""
lines = block.strip().splitlines()
yr, mo, dy, hr, mn, sc, la, lo, dp, mg, eh, ez, rms, id_ = lines[0].split()
time = UTCDateTime('{}-{}-{} {}-{}-{}'.format(yr, mo, dy, hr, mn, sc))
picks = []
arrivals = []
for line in lines[1:]:
sta, reltime, weight, phase = line.split()
comp = ph2comp.get(phase, '')
wid = seed_map.get(sta, id_default)
_waveform_id = WaveformStreamID(seed_string=wid.format(sta, comp))
pick = Pick(waveform_id=_waveform_id, phase_hint=phase,
time=time + float(reltime))
arrival = Arrival(phase=phase, pick_id=pick.resource_id,
time_weight=float(weight))
picks.append(pick)
arrivals.append(arrival)
qu = None if rms == '0.0' else OriginQuality(standard_error=float(rms))
origin = Origin(arrivals=arrivals,
quality=qu,
latitude=float(la),
longitude=float(lo),
depth=1000 * float(dp),
time=time)
magnitude = Magnitude(mag=mg)
event = Event(resource_id=id_,
picks=picks,
origins=[origin],
magnitudes=[magnitude],
preferred_origin_id=origin.resource_id,
preferred_magnitude_id=magnitude.resource_id)
return event
| def _block2event(block, seed_map, id_default, ph2comp):
"""
Read HypoDD event block
"""
lines = block.strip().splitlines()
yr, mo, dy, hr, mn, sc, la, lo, dp, mg, eh, ez, rms, id_ = lines[0].split()
time = UTCDateTime(int(yr), int(mo), int(dy), int(hr), int(mn), float(sc))
picks = []
arrivals = []
for line in lines[1:]:
sta, reltime, weight, phase = line.split()
comp = ph2comp.get(phase, '')
wid = seed_map.get(sta, id_default)
_waveform_id = WaveformStreamID(seed_string=wid.format(sta, comp))
pick = Pick(waveform_id=_waveform_id, phase_hint=phase,
time=time + float(reltime))
arrival = Arrival(phase=phase, pick_id=pick.resource_id,
time_weight=float(weight))
picks.append(pick)
arrivals.append(arrival)
qu = None if rms == '0.0' else OriginQuality(standard_error=float(rms))
origin = Origin(arrivals=arrivals,
quality=qu,
latitude=float(la),
longitude=float(lo),
depth=1000 * float(dp),
time=time)
magnitude = Magnitude(mag=mg)
event = Event(resource_id=id_,
picks=picks,
origins=[origin],
magnitudes=[magnitude],
preferred_origin_id=origin.resource_id,
preferred_magnitude_id=magnitude.resource_id)
return event
|
8,717 | def find_entrypoints_plugins(group='sopel.plugins'):
"""List plugins from a setuptools entry point group
:param str group: setuptools entry point group to look for
(default to ``sopel.plugins``)
:return: Yield instance of :class:`~.handlers.EntrypointPluginHandler`
created from setuptools entry point given ``group``
"""
for entrypoint in pkg_resources.iter_entry_points(group):
yield handlers.EntrypointPluginHandler(entrypoint)
| def find_entrypoints_plugins(group='sopel.plugins'):
"""List plugins from a setuptools entry point group
:param str group: setuptools entry point group to look for
(default to ``sopel.plugins``)
:return: Yield instance of :class:`~.handlers.EntrypointPluginHandler`
created from setuptools entry point given ``group``
"""
for entry_point in pkg_resources.iter_entry_points(group):
yield handlers.EntrypointPluginHandler(entrypoint)
|
23,066 | def stack(seq, axis=0, allow_unknown_chunksizes=False):
"""
Stack arrays along a new axis
Given a sequence of dask arrays, form a new dask array by stacking them
along a new dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
from . import wrap
seq = [asarray(a) for a in seq]
if not seq:
raise ValueError("Need array(s) to stack")
if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):
idx = [x for x in filter(lambda x: x[1].shape != seq[0].shape, enumerate(seq))]
raise ValueError(
"Stacked arrays must have the same shape. "
"The first array had shape {0}, while array "
"{1} has shape {2}".format(seq[0].shape, idx[0][0], idx[0][1].shape)
)
meta = np.stack([meta_from_array(a) for a in seq], axis=axis)
seq = [x.astype(meta.dtype) for x in seq]
ndim = meta.ndim - 1
if axis < 0:
axis = ndim + axis + 1
shape = tuple(
len(seq)
if i == axis
else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])
for i in range(meta.ndim)
)
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq2))
_, seq2 = unify_chunks(*uc_args)
assert len(set(a.chunks for a in seq2)) == 1 # same chunks
chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]
names = [a.name for a in seq2]
name = "stack-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [
(names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys
]
values = [
(
getitem,
inp,
(slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis),
)
for inp in inputs
]
layer = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
| def stack(seq, axis=0, allow_unknown_chunksizes=False):
"""
Stack arrays along a new axis
Given a sequence of dask arrays, form a new dask array by stacking them
along a new dimension (axis=0 by default)
Parameters
----------
seq: list of dask.arrays
axis: int
Dimension along which to align all of the arrays
allow_unknown_chunksizes: bool
Allow unknown chunksizes, such as come from converting from dask
dataframes. Dask.array is unable to verify that chunks line up. If
data comes from differently aligned sources then this can cause
unexpected results.
Examples
--------
Create slices
>>> import dask.array as da
>>> import numpy as np
>>> data = [from_array(np.ones((4, 4)), chunks=(2, 2))
... for i in range(3)]
>>> x = da.stack(data, axis=0)
>>> x.shape
(3, 4, 4)
>>> da.stack(data, axis=1).shape
(4, 3, 4)
>>> da.stack(data, axis=-1).shape
(4, 4, 3)
Result is a new dask Array
See Also
--------
concatenate
"""
from . import wrap
seq = [asarray(a) for a in seq]
if not seq:
raise ValueError("Need array(s) to stack")
if not allow_unknown_chunksizes and not all(x.shape == seq[0].shape for x in seq):
idx = [x for x in filter(lambda x: x[1].shape != seq[0].shape, enumerate(seq))]
raise ValueError(
"Stacked arrays must have the same shape. "
"The first array had shape {0}, while array "
"{1} has shape {2}".format(seq[0].shape, idx + 1, seq[idx].shape)
)
meta = np.stack([meta_from_array(a) for a in seq], axis=axis)
seq = [x.astype(meta.dtype) for x in seq]
ndim = meta.ndim - 1
if axis < 0:
axis = ndim + axis + 1
shape = tuple(
len(seq)
if i == axis
else (seq[0].shape[i] if i < axis else seq[0].shape[i - 1])
for i in range(meta.ndim)
)
seq2 = [a for a in seq if a.size]
if not seq2:
seq2 = seq
n = len(seq2)
if n == 0:
try:
return wrap.empty_like(meta, shape=shape, chunks=shape, dtype=meta.dtype)
except TypeError:
return wrap.empty(shape, chunks=shape, dtype=meta.dtype)
ind = list(range(ndim))
uc_args = list(concat((x, ind) for x in seq2))
_, seq2 = unify_chunks(*uc_args)
assert len(set(a.chunks for a in seq2)) == 1 # same chunks
chunks = seq2[0].chunks[:axis] + ((1,) * n,) + seq2[0].chunks[axis:]
names = [a.name for a in seq2]
name = "stack-" + tokenize(names, axis)
keys = list(product([name], *[range(len(bd)) for bd in chunks]))
inputs = [
(names[key[axis + 1]],) + key[1 : axis + 1] + key[axis + 2 :] for key in keys
]
values = [
(
getitem,
inp,
(slice(None, None, None),) * axis
+ (None,)
+ (slice(None, None, None),) * (ndim - axis),
)
for inp in inputs
]
layer = dict(zip(keys, values))
graph = HighLevelGraph.from_collections(name, layer, dependencies=seq2)
return Array(graph, name, chunks, meta=meta)
|
1,319 | def check_estimator(Estimator, verbose=False):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
verbose : boolean
Flag to specify in order to see a progress bar.
"""
if verbose == True:
raise NotImplementedError
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
estimator = Estimator()
check_parameters_default_constructible(name, Estimator)
check_no_attributes_set_in_init(name, estimator)
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
for check in _yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as exception:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(str(exception), SkipTestWarning)
| def check_estimator(Estimator, verbose=False):
"""Check if estimator adheres to scikit-learn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
This test can be applied to classes or instances.
Classes currently have some additional tests that related to construction,
while passing instances allows the testing of multiple options.
Parameters
----------
estimator : estimator object or class
Estimator to check. Estimator is a class object or instance.
verbose : boolean
Flag to specify in order to see a progress bar.
"""
if verbose:
raise NotImplementedError
if isinstance(Estimator, type):
# got a class
name = Estimator.__name__
estimator = Estimator()
check_parameters_default_constructible(name, Estimator)
check_no_attributes_set_in_init(name, estimator)
else:
# got an instance
estimator = Estimator
name = type(estimator).__name__
for check in _yield_all_checks(name, estimator):
try:
check(name, estimator)
except SkipTest as exception:
# the only SkipTest thrown currently results from not
# being able to import pandas.
warnings.warn(str(exception), SkipTestWarning)
|
4,395 | def _check_dict_keys(user_dict, valid_keys,
dict_name="Channel name(s)", valid_name="info"):
"""Check that the keys in dictionary are valid against a set list.
Return the input dictionary if it is valid,
otherwise raise a ValueError with a readable error message.
Parameters
----------
user_dict : dict
The name of the parameter to check. This is used in the error message.
valid_keys : list
All possible valid key names.
Raises
------
ValueError
When the key of the dict is not one of the valid options.
Returns
-------
user_dict
When the keys are deemed acceptable the dictionary is returned.
"""
sorted_dict = sorted(list(user_dict))
missing = [val not in valid_keys for val in sorted_dict]
if any(missing):
raise ValueError(
f"{dict_name} is missing from {valid_name}: "
f"{np.array(sorted_dict)[np.array(missing)]}")
return user_dict
| def _check_dict_keys(user_dict, valid_keys,
dict_name="Channel name(s)", valid_name="info"):
"""Check that the keys in dictionary are valid against a set list.
Return the input dictionary if it is valid,
otherwise raise a ValueError with a readable error message.
Parameters
----------
user_dict : dict
The name of the parameter to check. This is used in the error message.
valid_keys : list
All possible valid key names.
Raises
------
ValueError
When the key of the dict is not one of the valid options.
Returns
-------
user_dict
When the keys are deemed acceptable the dictionary is returned.
"""
sorted_dict = sorted(user_dict)
missing = [val not in valid_keys for val in sorted_dict]
if any(missing):
raise ValueError(
f"{dict_name} is missing from {valid_name}: "
f"{np.array(sorted_dict)[np.array(missing)]}")
return user_dict
|
57,962 | def wrapper_panorama_security_policy_match(destinations: list, sources: list, destination_ports: list, args: dict):
results = []
for source in sources:
args['source'] = source
for destination in destinations:
args['destination'] = destination
if destination_ports:
for port in destination_ports:
args['destination-port'] = port
res = panorama_security_policy_match(args)
results.extend(res)
else:
res = panorama_security_policy_match(args)
results.extend(res)
return results
| def wrapper_panorama_security_policy_match(destinations: list, sources: list, destination_ports: list, args: dict):
results = []
for source in sources:
args['source'] = source
for destination in destinations:
args['destination'] = destination
for port in destination_ports:
args['destination-port'] = port
res = panorama_security_policy_match(args)
results.extend(res)
else:
res = panorama_security_policy_match(args)
results.extend(res)
return results
|
35,574 | def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
elif isinstance(pic, torch.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))
elif pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
npimg = pic
if pic.is_floating_point() and mode != 'F':
pic = pic.mul(255).byte()
if isinstance(pic, torch.Tensor):
if pic.device != torch.device("cpu"):
pic = pic.cpu()
npimg = np.transpose(pic.numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ['LA']
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'LA'
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
| def to_pil_image(pic, mode=None):
"""Convert a tensor or an ndarray to PIL Image.
See :class:`~torchvision.transforms.ToPILImage` for more details.
Args:
pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
.. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
Returns:
PIL Image: Image converted to PIL Image.
"""
if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)):
raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
elif isinstance(pic, torch.Tensor):
if pic.ndimension() not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension()))
elif pic.ndimension() == 2:
# if 2D image, add channel dimension (CHW)
pic = pic.unsqueeze(0)
elif isinstance(pic, np.ndarray):
if pic.ndim not in {2, 3}:
raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))
elif pic.ndim == 2:
# if 2D image, add channel dimension (HWC)
pic = np.expand_dims(pic, 2)
npimg = pic
if pic.is_floating_point() and mode != 'F':
pic = pic.mul(255).byte()
if isinstance(pic, torch.Tensor):
pic = pic.cpu()
npimg = np.transpose(pic.numpy(), (1, 2, 0))
if not isinstance(npimg, np.ndarray):
raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
'not {}'.format(type(npimg)))
if npimg.shape[2] == 1:
expected_mode = None
npimg = npimg[:, :, 0]
if npimg.dtype == np.uint8:
expected_mode = 'L'
elif npimg.dtype == np.int16:
expected_mode = 'I;16'
elif npimg.dtype == np.int32:
expected_mode = 'I'
elif npimg.dtype == np.float32:
expected_mode = 'F'
if mode is not None and mode != expected_mode:
raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
.format(mode, np.dtype, expected_mode))
mode = expected_mode
elif npimg.shape[2] == 2:
permitted_2_channel_modes = ['LA']
if mode is not None and mode not in permitted_2_channel_modes:
raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'LA'
elif npimg.shape[2] == 4:
permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX']
if mode is not None and mode not in permitted_4_channel_modes:
raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGBA'
else:
permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
if mode is not None and mode not in permitted_3_channel_modes:
raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
if mode is None and npimg.dtype == np.uint8:
mode = 'RGB'
if mode is None:
raise TypeError('Input type {} is not supported'.format(npimg.dtype))
return Image.fromarray(npimg, mode=mode)
|
42,447 | def compute(
input_data: Union[Dict[str, Any], "AtomicInput"],
program: str,
raise_error: bool = False,
local_options: Optional[Dict[str, Any]] = None,
return_dict: bool = False,
) -> Union["AtomicResult", "FailedOperation", Dict[str, Any]]:
"""Executes a single CMS program given a QCSchema input.
The full specification can be found at:
http://molssi-qc-schema.readthedocs.io/en/latest/index.html#
Parameters
----------
input_data
A QCSchema input specification in dictionary or model from QCElemental.models
program
The CMS program with which to execute the input.
raise_error
Determines if compute should raise an error or not.
retries : int, optional
The number of random tries to retry for.
local_options
A dictionary of local configuration options corresponding to a TaskConfig object.
return_dict
Returns a dict instead of qcelemental.models.AtomicResult
Returns
-------
result
AtomicResult, FailedOperation, or Dict representation of either object type
A QC Schema representation of the requested output, type depends on return_dict key.
"""
output_data = input_data.copy() # lgtm [py/multiple-definition]
with compute_wrapper(capture_output=False, raise_error=raise_error) as metadata:
# Grab the executor and build the input model
executor = get_program(program)
# Build the model and validate
input_data = model_wrapper(input_data, AtomicInput)
# Build out local options
if local_options is None:
local_options = {}
input_engine_options = input_data.extras.pop("_qcengine_local_config", {})
local_options = {**local_options, **input_engine_options}
config = get_config(local_options=local_options)
# Set environment parameters and execute
with environ_context(config=config):
# Handle optional retries
for x in range(config.retries + 1):
try:
output_data = executor.compute(input_data, config)
break
except RandomError as e:
if x == config.retries:
raise e
else:
metadata["retries"] += 1
except:
raise
return handle_output_metadata(output_data, metadata, raise_error=raise_error, return_dict=return_dict)
| def compute(
input_data: Union[Dict[str, Any], "AtomicInput"],
program: str,
raise_error: bool = False,
local_options: Optional[Dict[str, Any]] = None,
return_dict: bool = False,
) -> Union["AtomicResult", "FailedOperation", Dict[str, Any]]:
"""Executes a single CMS program given a QCSchema input.
The full specification can be found at:
http://molssi-qc-schema.readthedocs.io/en/latest/index.html#
Parameters
----------
input_data
A QCSchema input specification in dictionary or model from QCElemental.models
program
The CMS program with which to execute the input.
raise_error
Determines if compute should raise an error or not.
retries : int, optional
The number of random tries to retry for.
local_options
A dictionary of local configuration options corresponding to a TaskConfig object.
return_dict
Returns a dict instead of qcelemental.models.AtomicResult
Returns
-------
result
AtomicResult, FailedOperation, or Dict representation of either object type
A QCSchema representation of the requested output, type depends on return_dict key.
"""
output_data = input_data.copy() # lgtm [py/multiple-definition]
with compute_wrapper(capture_output=False, raise_error=raise_error) as metadata:
# Grab the executor and build the input model
executor = get_program(program)
# Build the model and validate
input_data = model_wrapper(input_data, AtomicInput)
# Build out local options
if local_options is None:
local_options = {}
input_engine_options = input_data.extras.pop("_qcengine_local_config", {})
local_options = {**local_options, **input_engine_options}
config = get_config(local_options=local_options)
# Set environment parameters and execute
with environ_context(config=config):
# Handle optional retries
for x in range(config.retries + 1):
try:
output_data = executor.compute(input_data, config)
break
except RandomError as e:
if x == config.retries:
raise e
else:
metadata["retries"] += 1
except:
raise
return handle_output_metadata(output_data, metadata, raise_error=raise_error, return_dict=return_dict)
|
58,827 | def nrm2(x, out=None):
"""Computes the Euclidean norm of vector x."""
if x.ndim != 1:
raise ValueError('x must be a 1D array (actual: {})'.format(x.ndim))
dtype = x.dtype.char
if dtype == 'f':
func = cublas.snrm2
elif dtype == 'd':
func = cublas.dnrm2
elif dtype == 'F':
func = cublas.scnrm2
elif dtype == 'D':
func = cublas.dznrm2
else:
raise TypeError('invalid dtype')
handle = device.get_cublas_handle()
result_dtype = dtype.lower()
result_ptr, result, mode = _setup_result_ptr(handle, out, result_dtype)
func(handle, x.size, x.data.ptr, 1, result_ptr)
cublas.setPointerMode(handle, mode)
if out is None:
out = result
elif out.dtype != result_dtype:
out[...] = result
return out
| def nrm2(x, out=None):
"""Computes the Euclidean norm of vector x."""
if x.ndim != 1:
raise ValueError('x must be a 1D array (actual: {})'.format(x.ndim))
dtype = x.dtype.char
if dtype == 'f':
func = cublas.snrm2
elif dtype == 'd':
func = cublas.dnrm2
elif dtype == 'F':
func = cublas.scnrm2
elif dtype == 'D':
func = cublas.dznrm2
else:
raise TypeError('invalid dtype')
handle = device.get_cublas_handle()
result_dtype = dtype.lower()
result_ptr, result, orig_mode = _setup_result_ptr(handle, out, result_dtype)
func(handle, x.size, x.data.ptr, 1, result_ptr)
cublas.setPointerMode(handle, orig_mode)
if out is None:
out = result
elif out.dtype != result_dtype:
out[...] = result
return out
|
32,050 | def relationships_manager(client: Client, entity_a: str, entity_a_type: str, indicator_type: str,
indicator: str, field_for_passive_dns_rs: str, feed_indicator_type_for_passive_dns_rs: str):
"""
manage the relationships creation
Args:
client: Client object with request
entity_a: str the first entity of the relationship
entity_a_type: str the type of the first entity
indicator_type: str the indicator type to get the related information by
entity_b_type: str the indicator to get the related information by
:returns:
a list of the relationships that were created
"""
relationships: list = []
if client.max_indicator_relationships != 0:
params = {'limit': str(client.max_indicator_relationships)}
_, _, urls_raw_response = alienvault_get_related_urls_by_indicator_command(client, indicator_type, indicator, params)
urls_raw_response = delete_duplicated_relationships(dict_safe_get(urls_raw_response, ['url_list'], ['']), 'url')
relationships += create_relationships(client, urls_raw_response, entity_a, entity_a_type, 'url', FeedIndicatorType.URL)
_, _, hash_raw_response = alienvault_get_related_hashes_by_indicator_command(client, indicator_type, indicator, params)
hash_raw_response = delete_duplicated_relationships(dict_safe_get(hash_raw_response, ['data'], ['']), 'hash')
relationships += create_relationships(client, hash_raw_response, entity_a, entity_a_type, 'hash', FeedIndicatorType.File)
_, _, passive_dns_raw_response = alienvault_get_passive_dns_data_by_indicator_command(client, indicator_type,
indicator, params)
if len(dict_safe_get(passive_dns_raw_response, ['passive_dns'], [''])) > client.max_indicator_relationships:
passive_dns_raw_response = delete_duplicated_relationships(passive_dns_raw_response.get('passive_dns')
[0:client.max_indicator_relationships],
field_for_passive_dns_rs)
else:
passive_dns_raw_response = delete_duplicated_relationships(dict_safe_get(passive_dns_raw_response, ['passive_dns'],
['']), field_for_passive_dns_rs)
passive_dns_raw_response = validate_string_is_not_url(passive_dns_raw_response, field_for_passive_dns_rs)
relationships += create_relationships(client, passive_dns_raw_response, entity_a,
entity_a_type, field_for_passive_dns_rs, feed_indicator_type_for_passive_dns_rs)
return relationships
| def relationships_manager(client: Client, entity_a: str, entity_a_type: str, indicator_type: str,
indicator: str, field_for_passive_dns_rs: str, feed_indicator_type_for_passive_dns_rs: str):
"""
manage the relationships creation
Args:
client: Client object with request
entity_a: str the first entity of the relationship
entity_a_type: str the type of the first entity
indicator_type: str the indicator type to get the related information by
entity_b_type: str the indicator to get the related information by
:returns:
a list of the relationships that were created
"""
relationships: list = []
if client.max_indicator_relationships > 0:
params = {'limit': str(client.max_indicator_relationships)}
_, _, urls_raw_response = alienvault_get_related_urls_by_indicator_command(client, indicator_type, indicator, params)
urls_raw_response = delete_duplicated_relationships(dict_safe_get(urls_raw_response, ['url_list'], ['']), 'url')
relationships += create_relationships(client, urls_raw_response, entity_a, entity_a_type, 'url', FeedIndicatorType.URL)
_, _, hash_raw_response = alienvault_get_related_hashes_by_indicator_command(client, indicator_type, indicator, params)
hash_raw_response = delete_duplicated_relationships(dict_safe_get(hash_raw_response, ['data'], ['']), 'hash')
relationships += create_relationships(client, hash_raw_response, entity_a, entity_a_type, 'hash', FeedIndicatorType.File)
_, _, passive_dns_raw_response = alienvault_get_passive_dns_data_by_indicator_command(client, indicator_type,
indicator, params)
if len(dict_safe_get(passive_dns_raw_response, ['passive_dns'], [''])) > client.max_indicator_relationships:
passive_dns_raw_response = delete_duplicated_relationships(passive_dns_raw_response.get('passive_dns')
[0:client.max_indicator_relationships],
field_for_passive_dns_rs)
else:
passive_dns_raw_response = delete_duplicated_relationships(dict_safe_get(passive_dns_raw_response, ['passive_dns'],
['']), field_for_passive_dns_rs)
passive_dns_raw_response = validate_string_is_not_url(passive_dns_raw_response, field_for_passive_dns_rs)
relationships += create_relationships(client, passive_dns_raw_response, entity_a,
entity_a_type, field_for_passive_dns_rs, feed_indicator_type_for_passive_dns_rs)
return relationships
|
44,229 | def quantum_fisher(qnode, *args, **kwargs):
r"""Returns a function that computes the quantum fisher information matrix (QFIM) of a given :class:`.QNode`.
Given a parametrized quantum state :math:`|\psi(\bm{\theta})\rangle`, the quantum fisher information matrix (QFIM) quantifies how changes to the parameters :math:`\bm{\theta}`
are reflected in the quantum state. The metric used to induce the QFIM is the fidelity :math:`f = |\langle \psi | \psi' \rangle|^2` between two (pure) quantum states.
This leads to the following definition of the QFIM (see eq. (27) in `arxiv:2103.15191 <https://arxiv.org/abs/2103.15191>`_):
.. math::
\text{QFIM}_{i, j} = 4 \text{Re}\left[ \langle \partial_i \psi(\bm{\theta}) | \partial_j \psi(\bm{\theta}) \rangle
- \langle \partial_i \psi(\bm{\theta}) | \psi(\bm{\theta}) \rangle \langle \psi(\bm{\theta}) | \partial_j \psi(\bm{\theta}) \rangle \right]
with short notation :math:`| \partial_j \psi(\bm{\theta}) \rangle := \frac{\partial}{\partial \theta_j}| \psi(\bm{\theta}) \rangle`.
.. seealso::
:func:`~.pennylane.metric_tensor`, :func:`~.pennylane.adjoint_metric_tensor`, :func:`~.pennylane.qinfo.transforms.classical_fisher`
Args:
qnode (:class:`.QNode`): A :class:`.QNode` that may have arbitrary return types.
args: In case finite shots are used, further arguments according to :func:`~.pennylane.metric_tensor` may be passed.
Returns:
func: The function that computes the quantum fisher information matrix.
.. note::
``quantum_fisher`` coincides with the ``metric_tensor`` with a prefactor of :math:`4`. In case a device with finite shots is used, the hardware compatible transform :func:`~.pennylane.metric_tensor` is used.
In case of a device with ``shots=None``, :func:`~.pennylane.adjoint_metric_tensor` is used. Please refer to their respective documentations for details on the arguments.
**Example**
The quantum Fisher information matrix (QIFM) can be used to compute the `natural` gradient for `Quantum Natural Gradient Descent <https://arxiv.org/abs/1909.02108>`_.
A typical scenario is optimizing the expectation value of a Hamiltonian:
.. code-block:: python
n_wires = 2
dev = qml.device("default.qubit", wires=n_wires)
H = 1.*qml.PauliX(0) @ qml.PauliX(1) - 0.5 * qml.PauliZ(1)
@qml.qnode(dev)
def circ(params):
qml.RY(params[0], wires=1)
qml.CNOT(wires=(1,0))
qml.RY(params[1], wires=1)
qml.RZ(params[2], wires=1)
return qml.expval(H)
params = pnp.array([0.5, 1., 0.2], requires_grad=True)
The natural gradient is then simply the QFIM multiplied by the gradient:
>>> grad = qml.grad(circ)(params)
[ 0.59422561, -0.02615095, -0.05146226]
>>> qfim = qml.qinfo.quantum_fisher(circ)(params)
np.diag([1., 1., 0.77517241])
>>> q_nat_grad = qfim @ grad
[ 0.59422561 -0.02615095 -0.03989212]
When using real hardware or finite shots, ``quantum_fisher`` is internally calling :func:`~.pennylane.metric_tensor`.
To obtain the full QFIM, we need an auxilary wire to perform the Hadamard test.
>>> dev = qml.device("default.qubit", wires=n_wires+1, shots=1000)
>>> @qml.qnode(dev)
... def circ(params):
... qml.RY(params[0], wires=1)
... qml.CNOT(wires=(1,0))
... qml.RY(params[1], wires=1)
... qml.RZ(params[2], wires=1)
... return qml.expval(H)
>>> qfim = qml.qinfo.quantum_fisher(circ)(params)
Alternatively, we can fall back on the block-diagonal QFIM without the additional wire.
>>> qfim = qml.qinfo.quantum_fisher(circ, approx="block-diag")(params)
"""
if qnode.device.shots is not None:
def wrapper(*args0, **kwargs0):
return 4 * metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0)
else:
def wrapper(*args0, **kwargs0):
return 4 * adjoint_metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0)
return wrapper
| def quantum_fisher(qnode, *args, **kwargs):
r"""Returns a function that computes the quantum fisher information matrix (QFIM) of a given :class:`.QNode`.
Given a parametrized quantum state :math:`|\psi(\bm{\theta})\rangle`, the quantum fisher information matrix (QFIM) quantifies how changes to the parameters :math:`\bm{\theta}`
are reflected in the quantum state. The metric used to induce the QFIM is the fidelity :math:`f = |\langle \psi | \psi' \rangle|^2` between two (pure) quantum states.
This leads to the following definition of the QFIM (see eq. (27) in `arxiv:2103.15191 <https://arxiv.org/abs/2103.15191>`_):
.. math::
\text{QFIM}_{i, j} = 4 \text{Re}\left[ \langle \partial_i \psi(\bm{\theta}) | \partial_j \psi(\bm{\theta}) \rangle
- \langle \partial_i \psi(\bm{\theta}) | \psi(\bm{\theta}) \rangle \langle \psi(\bm{\theta}) | \partial_j \psi(\bm{\theta}) \rangle \right]
with short notation :math:`| \partial_j \psi(\bm{\theta}) \rangle := \frac{\partial}{\partial \theta_j}| \psi(\bm{\theta}) \rangle`.
.. seealso::
:func:`~.pennylane.metric_tensor`, :func:`~.pennylane.adjoint_metric_tensor`, :func:`~.pennylane.qinfo.transforms.classical_fisher`
Args:
qnode (:class:`.QNode`): A :class:`.QNode` that may have arbitrary return types.
*args: In case finite shots are used, further arguments according to :func:`~.pennylane.metric_tensor` may be passed.
Returns:
func: The function that computes the quantum fisher information matrix.
.. note::
``quantum_fisher`` coincides with the ``metric_tensor`` with a prefactor of :math:`4`. In case a device with finite shots is used, the hardware compatible transform :func:`~.pennylane.metric_tensor` is used.
In case of a device with ``shots=None``, :func:`~.pennylane.adjoint_metric_tensor` is used. Please refer to their respective documentations for details on the arguments.
**Example**
The quantum Fisher information matrix (QIFM) can be used to compute the `natural` gradient for `Quantum Natural Gradient Descent <https://arxiv.org/abs/1909.02108>`_.
A typical scenario is optimizing the expectation value of a Hamiltonian:
.. code-block:: python
n_wires = 2
dev = qml.device("default.qubit", wires=n_wires)
H = 1.*qml.PauliX(0) @ qml.PauliX(1) - 0.5 * qml.PauliZ(1)
@qml.qnode(dev)
def circ(params):
qml.RY(params[0], wires=1)
qml.CNOT(wires=(1,0))
qml.RY(params[1], wires=1)
qml.RZ(params[2], wires=1)
return qml.expval(H)
params = pnp.array([0.5, 1., 0.2], requires_grad=True)
The natural gradient is then simply the QFIM multiplied by the gradient:
>>> grad = qml.grad(circ)(params)
[ 0.59422561, -0.02615095, -0.05146226]
>>> qfim = qml.qinfo.quantum_fisher(circ)(params)
np.diag([1., 1., 0.77517241])
>>> q_nat_grad = qfim @ grad
[ 0.59422561 -0.02615095 -0.03989212]
When using real hardware or finite shots, ``quantum_fisher`` is internally calling :func:`~.pennylane.metric_tensor`.
To obtain the full QFIM, we need an auxilary wire to perform the Hadamard test.
>>> dev = qml.device("default.qubit", wires=n_wires+1, shots=1000)
>>> @qml.qnode(dev)
... def circ(params):
... qml.RY(params[0], wires=1)
... qml.CNOT(wires=(1,0))
... qml.RY(params[1], wires=1)
... qml.RZ(params[2], wires=1)
... return qml.expval(H)
>>> qfim = qml.qinfo.quantum_fisher(circ)(params)
Alternatively, we can fall back on the block-diagonal QFIM without the additional wire.
>>> qfim = qml.qinfo.quantum_fisher(circ, approx="block-diag")(params)
"""
if qnode.device.shots is not None:
def wrapper(*args0, **kwargs0):
return 4 * metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0)
else:
def wrapper(*args0, **kwargs0):
return 4 * adjoint_metric_tensor(qnode, *args, **kwargs)(*args0, **kwargs0)
return wrapper
|
8,208 | def read(filepath, **kwargs):
"""
Reads a JPEG2000 file.
Parameters
----------
filepath : `str`
The file to be read.
Returns
-------
pairs : `list`
A list of (data, header) tuples.
"""
# NOTE: This can be removed after support for file-obj in `glymur.Jp2k`.
if isinstance(filepath, io.IOBase):
filepath = filepath.name # Extracting path from the file-obj
# Put import here to speed up sunpy.io import time
from glymur import Jp2k
header = get_header(filepath)
data = Jp2k(filepath)[...]
# For some reason Jp2k doesn't like [::-1], so do directly on the array
data = data[::-1]
return [HDPair(data, header[0])]
| def read(filepath, **kwargs):
"""
Reads a JPEG2000 file.
Parameters
----------
filepath : `str`
The file to be read.
Returns
-------
pairs : `list`
A list of (data, header) tuples.
"""
# NOTE: This can be removed after support for file-obj in `glymur.Jp2k`.
if isinstance(filepath, io.IOBase):
filepath = filepath.name
# Put import here to speed up sunpy.io import time
from glymur import Jp2k
header = get_header(filepath)
data = Jp2k(filepath)[...]
# For some reason Jp2k doesn't like [::-1], so do directly on the array
data = data[::-1]
return [HDPair(data, header[0])]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.