id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
46,521 |
def get_min_basic_value(typ: str) -> Any:
if typ == 'bool':
return False
if typ[:4] == 'uint':
size = int(typ[4:])
assert size in (8, 16, 32, 64, 128, 256)
return 0
if typ == 'byte':
return 0x00
else:
raise ValueError("Not a basic type")
|
def get_min_basic_value(typ: str) -> Any:
if typ == 'bool':
return False
if typ[:4] == 'uint':
size = int(typ[4:])
assert size in UINT_SIZES
return 0
if typ == 'byte':
return 0x00
else:
raise ValueError("Not a basic type")
|
52,124 |
def rgb2gray(rgb_img):
"""Convert image from RGB colorspace to Gray.
Inputs:
rgb_img = RGB image data
Returns:
gray = grayscale image
:param rgb_img: numpy.ndarray
:return gray: numpy.ndarray
"""
gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
_debug(visual=gray, filename=os.path.join(params.debug_outdir, str(params.device) + "_gray.png"))
return gray
|
def rgb2gray(rgb_img):
"""Convert image from RGB colorspace to Gray.
Inputs:
rgb_img = RGB image data
Returns:
gray = grayscale image
:param rgb_img: numpy.ndarray
:return gray: numpy.ndarray
"""
gray = cv2.cvtColor(rgb_img, cv2.COLOR_BGR2GRAY)
_debug(visual=gray, filename=os.path.join(params.debug_outdir, str(params.device) + "_gray.png"))
return gray
|
36,388 |
def fd_count():
"""Count the number of open file descriptors.
"""
if sys.platform.startswith(('linux', 'freebsd')):
try:
names = os.listdir("/proc/self/fd")
# Subtract one because listdir() opens internally a file
# descriptor to list the content of the /proc/self/fd/ directory.
return len(names) - 1
except FileNotFoundError:
pass
MAXFD = 256
if hasattr(os, 'sysconf'):
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except OSError:
pass
old_modes = None
if sys.platform == 'win32':
# bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
# on invalid file descriptor if Python is compiled in debug mode
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
old_modes = {}
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
old_modes[report_type] = msvcrt.CrtSetReportMode(report_type, 0)
try:
count = 0
for fd in range(MAXFD):
try:
# Prefer dup() over fstat(). fstat() can require input/output
# whereas dup() doesn't.
fd2 = os.dup(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
os.close(fd2)
count += 1
finally:
if old_modes is not None:
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
return count
|
def fd_count():
"""Count the number of open file descriptors.
"""
if sys.platform.startswith(('linux', 'freebsd')):
try:
names = os.listdir("/proc/self/fd")
# Subtract one because listdir() internally opens a file
# descriptor to list the content of the /proc/self/fd/ directory.
return len(names) - 1
except FileNotFoundError:
pass
MAXFD = 256
if hasattr(os, 'sysconf'):
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except OSError:
pass
old_modes = None
if sys.platform == 'win32':
# bpo-25306, bpo-31009: Call CrtSetReportMode() to not kill the process
# on invalid file descriptor if Python is compiled in debug mode
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
# no msvcrt or a release build
pass
else:
old_modes = {}
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
old_modes[report_type] = msvcrt.CrtSetReportMode(report_type, 0)
try:
count = 0
for fd in range(MAXFD):
try:
# Prefer dup() over fstat(). fstat() can require input/output
# whereas dup() doesn't.
fd2 = os.dup(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
else:
os.close(fd2)
count += 1
finally:
if old_modes is not None:
for report_type in (msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT):
msvcrt.CrtSetReportMode(report_type, old_modes[report_type])
return count
|
27,911 |
def observe_value(observation_key, target_func):
"""Returns a trainer extension to continuously record a value.
Args:
observation_key (str): Key of observation to record.
target_func (function): Function that returns the value to record.
It must take one argument: :class:~chainer.training.Trainer object.
Returns:
The extension function.
This extension is triggered every 1 epoch by default.
To change this, specify ``trigger`` argument to
:meth:`Trainer.extend() <chainer.training.Trainer.extend>` method.
"""
@extension.make_extension(
trigger=(1, 'epoch'), priority=extension.PRIORITY_WRITER)
def _observe_value(trainer):
trainer.observation[observation_key] = target_func(trainer)
return _observe_value
|
def observe_value(observation_key, target_func):
"""Returns a trainer extension to continuously record a value.
Args:
observation_key (str): Key of observation to record.
target_func (function): Function that returns the value to record.
It must take one argument: :class:~chainer.training.Trainer object.
Returns:
The extension function.
This extension is triggered every 1 epoch by default.
To change this, use the ``trigger`` argument with the
:meth:`Trainer.extend() <chainer.training.Trainer.extend>` method.
"""
@extension.make_extension(
trigger=(1, 'epoch'), priority=extension.PRIORITY_WRITER)
def _observe_value(trainer):
trainer.observation[observation_key] = target_func(trainer)
return _observe_value
|
26,196 |
def test_connection_failure(aggregator):
c = GnatsdCheck(CHECK_NAME, {}, {}, [CONNECTION_FAILURE])
with pytest.raises(Exception):
c.check(CONNECTION_FAILURE)
aggregator.assert_service_check('gnatsd.can_connect', status=GnatsdCheck.CRITICAL, count=1)
|
def test_connection_failure(aggregator):
c = GnatsdCheck(CHECK_NAME, {}, [CONNECTION_FAILURE])
with pytest.raises(Exception):
c.check(CONNECTION_FAILURE)
aggregator.assert_service_check('gnatsd.can_connect', status=GnatsdCheck.CRITICAL, count=1)
|
9,050 |
def rate_channel(
rate: int,
message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited for a channel.
:param rate: seconds between permitted calls of this function in the same
channel, regardless of triggering user
:param message: optional; message send as notice when a user hits the limit
This decorator can be used alone or with the :func:`rate` decorator, as it
will always take precedence::
@rate(10, 10, 10)
@rate_channel(5, 'You hit the channel rate limit for this function.')
# channel limit will be set to 5, other to 10
# will send a NOTICE only when a user hits the channel limit
# as other rate limit don't have any message set
If you don't provide a message, the default message set (if any) by
:func:`rate` will be used instead.
.. versionadded:: 8.0
"""
def add_attribute(function):
function.channel_rate = rate
function.channel_rate_message = message
return function
return add_attribute
|
def rate_channel(
rate: int,
message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited for a channel.
:param rate: seconds between permitted calls of this function in the same
channel, regardless of triggering user
:param message: optional; message send as notice when a user hits the limit
This decorator can be used alone or with the :func:`rate` decorator, as it
will always take precedence::
@rate(10, 10, 10)
@rate_channel(5, 'You hit the channel rate limit for this function.')
# channel limit will be set to 5, other to 10
# will send a NOTICE only when a user hits the channel limit
# as other rate limits don't have any message set
If you don't provide a message, the default message set (if any) by
:func:`rate` will be used instead.
.. versionadded:: 8.0
"""
def add_attribute(function):
function.channel_rate = rate
function.channel_rate_message = message
return function
return add_attribute
|
10,234 |
def main():
result = {}
category_list = []
module = AnsibleModule(
argument_spec=dict(
category=dict(type='list', default=['Systems']),
command=dict(type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
is_old_facts = module._name == 'redfish_facts'
if is_old_facts:
module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
# Build Category list
if "all" in module.params['category']:
for entry in CATEGORY_COMMANDS_ALL:
category_list.append(entry)
else:
# one or more categories specified
category_list = module.params['category']
for category in category_list:
command_list = []
# Build Command list for each Category
if category in CATEGORY_COMMANDS_ALL:
if not module.params['command']:
# True if we don't specify a command --> use default
command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
elif "all" in module.params['command']:
for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
# one or more commands
else:
command_list = module.params['command']
# Verify that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg="Invalid Command: %s" % cmd)
else:
# Fail if even one category given is invalid
module.fail_json(msg="Invalid Category: %s" % category)
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a Systems resource
resource = rf_utils._find_systems_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSystemInventory":
result["system"] = rf_utils.get_multi_system_inventory()
elif command == "GetCpuInventory":
result["cpu"] = rf_utils.get_multi_cpu_inventory()
elif command == "GetMemoryInventory":
result["memory"] = rf_utils.get_multi_memory_inventory()
elif command == "GetNicInventory":
result["nic"] = rf_utils.get_multi_nic_inventory(category)
elif command == "GetStorageControllerInventory":
result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
elif command == "GetDiskInventory":
result["disk"] = rf_utils.get_multi_disk_inventory()
elif command == "GetVolumeInventory":
result["volume"] = rf_utils.get_multi_volume_inventory()
elif command == "GetBiosAttributes":
result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
elif command == "GetBootOrder":
result["boot_order"] = rf_utils.get_multi_boot_order()
elif command == "GetBootOverride":
result["boot_override"] = rf_utils.get_multi_boot_override()
elif category == "Chassis":
# execute only if we find Chassis resource
resource = rf_utils._find_chassis_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFanInventory":
result["fan"] = rf_utils.get_fan_inventory()
elif command == "GetPsuInventory":
result["psu"] = rf_utils.get_psu_inventory()
elif command == "GetChassisThermals":
result["thermals"] = rf_utils.get_chassis_thermals()
elif command == "GetChassisPower":
result["chassis_power"] = rf_utils.get_chassis_power()
elif command == "GetChassisInventory":
result["chassis"] = rf_utils.get_chassis_inventory()
elif category == "Accounts":
# execute only if we find an Account service resource
resource = rf_utils._find_accountservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "ListUsers":
result["user"] = rf_utils.list_users()
elif category == "Update":
# execute only if we find UpdateService resources
resource = rf_utils._find_updateservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFirmwareInventory":
result["firmware"] = rf_utils.get_firmware_inventory()
elif command == "GetFirmwareUpdateCapabilities":
result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
elif category == "Sessions":
# execute only if we find SessionService resources
resource = rf_utils._find_sessionservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSessions":
result["session"] = rf_utils.get_sessions()
elif category == "Manager":
# execute only if we find a Manager service resource
resource = rf_utils._find_managers_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetManagerNicInventory":
result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
elif command == "GetVirtualMedia":
result["virtual_media"] = rf_utils.get_multi_virtualmedia()
elif command == "GetLogs":
result["log"] = rf_utils.get_logs()
elif command == "GetManagerServices":
result["manager_services"] = rf_utils.get_manager_services()
# Return data back
if is_old_facts:
module.exit_json(ansible_facts=dict(redfish_facts=result))
else:
module.exit_json(redfish_facts=result)
|
def main():
result = {}
category_list = []
module = AnsibleModule(
argument_spec=dict(
category=dict(type='list', default=['Systems']),
command=dict(type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
is_old_facts = module._name == 'redfish_facts'
if is_old_facts:
module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
# Build Category list
if "all" in module.params['category']:
for entry in CATEGORY_COMMANDS_ALL:
category_list.append(entry)
else:
# one or more categories specified
category_list = module.params['category']
for category in category_list:
command_list = []
# Build Command list for each Category
if category in CATEGORY_COMMANDS_ALL:
if not module.params['command']:
# True if we don't specify a command --> use default
command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
elif "all" in module.params['command']:
for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
# one or more commands
else:
command_list = module.params['command']
# Verify that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg="Invalid Command: %s" % cmd)
else:
# Fail if even one category given is invalid
module.fail_json(msg="Invalid Category: %s" % category)
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a Systems resource
resource = rf_utils._find_systems_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSystemInventory":
result["system"] = rf_utils.get_multi_system_inventory()
elif command == "GetCpuInventory":
result["cpu"] = rf_utils.get_multi_cpu_inventory()
elif command == "GetMemoryInventory":
result["memory"] = rf_utils.get_multi_memory_inventory()
elif command == "GetNicInventory":
result["nic"] = rf_utils.get_multi_nic_inventory(category)
elif command == "GetStorageControllerInventory":
result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
elif command == "GetDiskInventory":
result["disk"] = rf_utils.get_multi_disk_inventory()
elif command == "GetVolumeInventory":
result["volume"] = rf_utils.get_multi_volume_inventory()
elif command == "GetBiosAttributes":
result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
elif command == "GetBootOrder":
result["boot_order"] = rf_utils.get_multi_boot_order()
elif command == "GetBootOverride":
result["boot_override"] = rf_utils.get_multi_boot_override()
elif category == "Chassis":
# execute only if we find Chassis resource
resource = rf_utils._find_chassis_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFanInventory":
result["fan"] = rf_utils.get_fan_inventory()
elif command == "GetPsuInventory":
result["psu"] = rf_utils.get_psu_inventory()
elif command == "GetChassisThermals":
result["thermals"] = rf_utils.get_chassis_thermals()
elif command == "GetChassisPower":
result["chassis_power"] = rf_utils.get_chassis_power()
elif command == "GetChassisInventory":
result["chassis"] = rf_utils.get_chassis_inventory()
elif category == "Accounts":
# execute only if we find an Account service resource
resource = rf_utils._find_accountservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "ListUsers":
result["user"] = rf_utils.list_users()
elif category == "Update":
# execute only if we find UpdateService resources
resource = rf_utils._find_updateservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFirmwareInventory":
result["firmware"] = rf_utils.get_firmware_inventory()
elif command == "GetFirmwareUpdateCapabilities":
result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
elif category == "Sessions":
# execute only if we find SessionService resources
resource = rf_utils._find_sessionservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSessions":
result["session"] = rf_utils.get_sessions()
elif category == "Manager":
# execute only if we find a Manager service resource
resource = rf_utils._find_managers_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetManagerNicInventory":
result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
elif command == "GetVirtualMedia":
result["virtual_media"] = rf_utils.get_multi_virtualmedia()
elif command == "GetLogs":
result["log"] = rf_utils.get_logs()
elif command == "GetManagerServices":
result["network_protocols"] = rf_utils.get_network_protocols()
# Return data back
if is_old_facts:
module.exit_json(ansible_facts=dict(redfish_facts=result))
else:
module.exit_json(redfish_facts=result)
|
3,318 |
def populate_connected_event_scenario_2(
react_project: Project, python_project: Project, quick=False
):
"""
This function populates a set of four related events with the same trace id:
- Front-end transaction
- Back-end transaction
Occurrance times and durations are randomized
"""
react_transaction = get_event_from_file("scen2/react_transaction.json")
python_transaction = get_event_from_file("scen2/python_transaction.json")
log_extra = {
"organization_slug": react_project.organization.slug,
"quick": quick,
}
logger.info("populate_connected_event_scenario_2.start", extra=log_extra)
for (timestamp, day) in iter_timestamps(2, quick):
transaction_user = generate_user(quick)
trace_id = uuid4().hex
release = get_release_from_time(react_project.organization_id, timestamp)
release_sha = release.version
old_span_id = react_transaction["contexts"]["trace"]["span_id"]
frontend_root_span_id = uuid4().hex[:16]
frontend_duration = random_normal(2000 - 50 * day, 250, 1000) / 1000.0
frontend_context = {
"trace": {
"type": "trace",
"trace_id": trace_id,
"span_id": frontend_root_span_id,
}
}
# React transaction
local_event = copy.deepcopy(react_transaction)
local_event.update(
project=react_project,
platform=react_project.platform,
event_id=uuid4().hex,
user=transaction_user,
release=release_sha,
timestamp=timestamp,
# start_timestamp decreases based on day so that there's a trend
start_timestamp=timestamp - timedelta(seconds=frontend_duration),
measurements=gen_measurements(day),
contexts=frontend_context,
)
fix_transaction_event(local_event, old_span_id)
safe_send_event(local_event, quick)
# note picking the 0th span is arbitrary
backend_parent_id = local_event["spans"][0]["span_id"]
# python transaction
old_span_id = python_transaction["contexts"]["trace"]["span_id"]
backend_duration = random_normal(1500 + 50 * day, 250, 500)
backend_context = {
"trace": {
"type": "trace",
"trace_id": trace_id,
"span_id": uuid4().hex[:16],
"parent_span_id": backend_parent_id,
}
}
local_event = copy.deepcopy(python_transaction)
local_event.update(
project=python_project,
platform=python_project.platform,
timestamp=timestamp,
start_timestamp=timestamp - timedelta(milliseconds=backend_duration),
user=transaction_user,
release=release_sha,
contexts=backend_context,
)
fix_transaction_event(local_event, old_span_id)
safe_send_event(local_event, quick)
logger.info("populate_connected_event_scenario_2.finished", extra=log_extra)
|
def populate_connected_event_scenario_2(
react_project: Project, python_project: Project, quick=False
):
"""
This function populates a set of two related events with the same trace id:
- Front-end transaction
- Back-end transaction
Occurrance times and durations are randomized
"""
react_transaction = get_event_from_file("scen2/react_transaction.json")
python_transaction = get_event_from_file("scen2/python_transaction.json")
log_extra = {
"organization_slug": react_project.organization.slug,
"quick": quick,
}
logger.info("populate_connected_event_scenario_2.start", extra=log_extra)
for (timestamp, day) in iter_timestamps(2, quick):
transaction_user = generate_user(quick)
trace_id = uuid4().hex
release = get_release_from_time(react_project.organization_id, timestamp)
release_sha = release.version
old_span_id = react_transaction["contexts"]["trace"]["span_id"]
frontend_root_span_id = uuid4().hex[:16]
frontend_duration = random_normal(2000 - 50 * day, 250, 1000) / 1000.0
frontend_context = {
"trace": {
"type": "trace",
"trace_id": trace_id,
"span_id": frontend_root_span_id,
}
}
# React transaction
local_event = copy.deepcopy(react_transaction)
local_event.update(
project=react_project,
platform=react_project.platform,
event_id=uuid4().hex,
user=transaction_user,
release=release_sha,
timestamp=timestamp,
# start_timestamp decreases based on day so that there's a trend
start_timestamp=timestamp - timedelta(seconds=frontend_duration),
measurements=gen_measurements(day),
contexts=frontend_context,
)
fix_transaction_event(local_event, old_span_id)
safe_send_event(local_event, quick)
# note picking the 0th span is arbitrary
backend_parent_id = local_event["spans"][0]["span_id"]
# python transaction
old_span_id = python_transaction["contexts"]["trace"]["span_id"]
backend_duration = random_normal(1500 + 50 * day, 250, 500)
backend_context = {
"trace": {
"type": "trace",
"trace_id": trace_id,
"span_id": uuid4().hex[:16],
"parent_span_id": backend_parent_id,
}
}
local_event = copy.deepcopy(python_transaction)
local_event.update(
project=python_project,
platform=python_project.platform,
timestamp=timestamp,
start_timestamp=timestamp - timedelta(milliseconds=backend_duration),
user=transaction_user,
release=release_sha,
contexts=backend_context,
)
fix_transaction_event(local_event, old_span_id)
safe_send_event(local_event, quick)
logger.info("populate_connected_event_scenario_2.finished", extra=log_extra)
|
32,175 |
def get_indicator_with_dbotscore_unknown(indicator, indicator_type, reliability=None,
context_prefix=None, address_type=None):
'''
Used for cases where the api response to an indicator is not found,
returns CommandResults with readable_output generic in this case, and indicator with DBotScore unknown
:type indicator: ``str``
:param name: The value of the indicator
:type indicator_type: ``DBotScoreType``
:param indicator_type: use DBotScoreType class [Unsupport in types CVE and ATTACKPATTERN]
:type reliability: ``DBotScoreReliability``
:param reliability: use DBotScoreReliability class
:type context_prefix: ``str``
:param context_prefix: Use only in case that the indicator is CustomIndicator
:type address_type: ``str``
:param address_type: Use only in case that the indicator is Cryptocurrency
:rtype: ``CommandResults``
:return: CommandResults
'''
if not DBotScoreType.is_valid_type(indicator_type) and not context_prefix or indicator_type is DBotScoreType.CUSTOM and not context_prefix:
raise ValueError('indicator type is invalid')
if indicator_type in [DBotScoreType.CVE, DBotScoreType.ATTACKPATTERN]:
msg_error = 'DBotScoreType.{} is unsupported'.format(indicator_type.upper())
raise ValueError(msg_error)
dbot_score = Common.DBotScore(indicator=indicator,
indicator_type=indicator_type
if DBotScoreType.is_valid_type(indicator_type) else DBotScoreType.CUSTOM,
score=Common.DBotScore.NONE,
reliability=reliability,
message='No results found')
integration_name = dbot_score.integration_name or 'Results'
indicator_ = None # type: Any
if indicator_type is DBotScoreType.FILE:
if sha1Regex.match(indicator):
indicator_ = Common.File(dbot_score=dbot_score, sha1=indicator)
indicator_type = 'sha1'
elif sha256Regex.match(indicator):
indicator_ = Common.File(dbot_score=dbot_score, sha256=indicator)
indicator_type = 'sha256'
elif sha512Regex.match(indicator):
indicator_ = Common.File(dbot_score=dbot_score, sha512=indicator)
indicator_type = 'sha512'
elif md5Regex.match(indicator):
indicator_ = Common.File(dbot_score=dbot_score, md5=indicator)
indicator_type = 'md5'
elif indicator_type is DBotScoreType.IP:
indicator_ = Common.IP(ip=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.URL:
indicator_ = Common.URL(url=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.DOMAIN:
indicator_ = Common.Domain(domain=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.EMAIL:
indicator_ = Common.EMAIL(address=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.CERTIFICATE:
indicator_ = Common.Certificate(subject_dn=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.ACCOUNT:
indicator_ = Common.Account(id=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.CRYPTOCURRENCY:
if not address_type:
raise ValueError('Missing address_type parameter')
indicator_ = Common.Cryptocurrency(address=indicator, address_type=address_type, dbot_score=dbot_score)
indicator_type = address_type
else:
indicator_ = Common.CustomIndicator(indicator_type=indicator_type,
value=indicator,
dbot_score=dbot_score,
data={},
context_prefix=context_prefix)
indicator_type = indicator_type.upper()
readable_output = tableToMarkdown(name='{}:'.format(integration_name),
t={indicator_type: indicator, 'Result': 'Not found'},
headers=[indicator_type, 'Result'])
return CommandResults(readable_output=readable_output, indicator=indicator_)
|
def get_indicator_with_dbotscore_unknown(indicator, indicator_type, reliability=None,
context_prefix=None, address_type=None):
'''
Used for cases where the api response to an indicator is not found,
returns CommandResults with readable_output generic in this case, and indicator with DBotScore unknown
:type indicator: ``str``
:param name: The value of the indicator
:type indicator_type: ``DBotScoreType``
:param indicator_type: use DBotScoreType class [Unsupport in types CVE and ATTACKPATTERN]
:type reliability: ``DBotScoreReliability``
:param reliability: use DBotScoreReliability class
:type context_prefix: ``str``
:param context_prefix: Use only in case that the indicator is CustomIndicator
:type address_type: ``str``
:param address_type: Use only in case that the indicator is Cryptocurrency
:rtype: ``CommandResults``
:return: CommandResults
'''
if not DBotScoreType.is_valid_type(indicator_type) and not context_prefix or indicator_type is DBotScoreType.CUSTOM and not context_prefix:
raise ValueError('indicator type is invalid')
if indicator_type in [DBotScoreType.CVE, DBotScoreType.ATTACKPATTERN]:
msg_error = 'DBotScoreType.{} is unsupported'.format(indicator_type.upper())
raise ValueError(msg_error)
dbot_score = Common.DBotScore(indicator=indicator,
indicator_type=indicator_type
if DBotScoreType.is_valid_type(indicator_type) else DBotScoreType.CUSTOM,
score=Common.DBotScore.NONE,
reliability=reliability,
message='No results found.')
integration_name = dbot_score.integration_name or 'Results'
indicator_ = None # type: Any
if indicator_type is DBotScoreType.FILE:
if sha1Regex.match(indicator):
indicator_ = Common.File(dbot_score=dbot_score, sha1=indicator)
indicator_type = 'sha1'
elif sha256Regex.match(indicator):
indicator_ = Common.File(dbot_score=dbot_score, sha256=indicator)
indicator_type = 'sha256'
elif sha512Regex.match(indicator):
indicator_ = Common.File(dbot_score=dbot_score, sha512=indicator)
indicator_type = 'sha512'
elif md5Regex.match(indicator):
indicator_ = Common.File(dbot_score=dbot_score, md5=indicator)
indicator_type = 'md5'
elif indicator_type is DBotScoreType.IP:
indicator_ = Common.IP(ip=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.URL:
indicator_ = Common.URL(url=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.DOMAIN:
indicator_ = Common.Domain(domain=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.EMAIL:
indicator_ = Common.EMAIL(address=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.CERTIFICATE:
indicator_ = Common.Certificate(subject_dn=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.ACCOUNT:
indicator_ = Common.Account(id=indicator, dbot_score=dbot_score)
elif indicator_type is DBotScoreType.CRYPTOCURRENCY:
if not address_type:
raise ValueError('Missing address_type parameter')
indicator_ = Common.Cryptocurrency(address=indicator, address_type=address_type, dbot_score=dbot_score)
indicator_type = address_type
else:
indicator_ = Common.CustomIndicator(indicator_type=indicator_type,
value=indicator,
dbot_score=dbot_score,
data={},
context_prefix=context_prefix)
indicator_type = indicator_type.upper()
readable_output = tableToMarkdown(name='{}:'.format(integration_name),
t={indicator_type: indicator, 'Result': 'Not found'},
headers=[indicator_type, 'Result'])
return CommandResults(readable_output=readable_output, indicator=indicator_)
|
27,972 |
def parse_options(compilation_db_entry,
compiler_info_file=None,
keep_gcc_include_fixed=False,
keep_gcc_intrin=False,
get_clangsa_version_func=None,
env=None):
"""
This function parses a GCC compilation action and returns a BuildAction
object which can be the input of Clang analyzer tools.
compilation_db_entry -- An entry from a valid compilation database JSON
file, i.e. a dictionary with the compilation
command, the compiled file and the current working
directory.
compiler_info_file -- Contains the path to a compiler info file.
keep_gcc_include_fixed -- There are some implicit include paths which are
only used by GCC (include-fixed). This flag
determines whether these should be kept among
the implicit include paths.
keep_gcc_intrin -- There are some implicit include paths which contain
GCC-specific header files (those which end with
intrin.h). This flag determines whether these should be
kept among the implicit include paths. Use this flag if
Clang analysis fails with error message related to
__builtin symbols.
get_clangsa_version_func -- Is a function which should return the
version information for a clang compiler.
It requires the compiler binary and an env.
get_clangsa_version_func(compiler_binary, env)
Should return false for a non clang compiler.
env -- Is the environment where a subprocess call should be executed.
"""
details = {
'analyzer_options': [],
'compiler_includes': defaultdict(dict), # For each language c/cpp.
'compiler_standard': defaultdict(dict), # For each language c/cpp.
'analyzer_type': -1,
'original_command': '',
'directory': '',
'output': '',
'lang': None,
'arch': '', # Target in the compile command set by -arch.
'target': defaultdict(dict),
'source': ''}
if 'arguments' in compilation_db_entry:
gcc_command = compilation_db_entry['arguments']
details['original_command'] = ' '.join(gcc_command)
elif 'command' in compilation_db_entry:
details['original_command'] = compilation_db_entry['command']
gcc_command = shlex.split(compilation_db_entry['command'])
else:
raise KeyError("No valid 'command' or 'arguments' entry found!")
details['directory'] = compilation_db_entry['directory']
details['action_type'] = None
details['compiler'] =\
determine_compiler(gcc_command,
ImplicitCompilerInfo.is_executable_compiler)
if '++' in os.path.basename(details['compiler']):
details['lang'] = 'c++'
# Source files are skipped first so they are not collected
# with the other compiler flags together. Source file is handled
# separately from the compile command json.
clang_flag_collectors = [
__skip_sources,
__skip_clang,
__collect_transform_xclang_opts,
__get_output,
__determine_action_type,
__get_arch,
__get_language,
__collect_transform_include_opts,
__collect_clang_compile_opts
]
gcc_flag_transformers = [
__skip_gcc,
__replace,
__collect_compile_opts,
__collect_transform_include_opts,
__determine_action_type,
__skip_sources,
__get_arch,
__get_language,
__get_output]
flag_processors = gcc_flag_transformers
compiler_version_info = \
ImplicitCompilerInfo.compiler_versions.get(
details['compiler'], False)
if not compiler_version_info and get_clangsa_version_func:
# did not find in the cache yet
try:
compiler_version_info = \
get_clangsa_version_func(details['compiler'], env)
except subprocess.CalledProcessError as cerr:
LOG.error('Failed to get and parse clang version: %s',
details['compiler'])
LOG.error(cerr)
compiler_version_info = False
ImplicitCompilerInfo.compiler_versions[details['compiler']] \
= compiler_version_info
using_clang_to_compile_and_analyze = False
if ImplicitCompilerInfo.compiler_versions[details['compiler']]:
# Based on the version information the compiler is clang.
using_clang_to_compile_and_analyze = True
flag_processors = clang_flag_collectors
for it in OptionIterator(gcc_command[1:]):
for flag_processor in flag_processors:
if flag_processor(it, details):
break
else:
pass
# print('Unhandled argument: ' + it.item)
if details['action_type'] is None:
details['action_type'] = BuildAction.COMPILE
details['source'] = compilation_db_entry['file']
# In case the file attribute in the entry is empty.
if details['source'] == '.':
details['source'] = ''
lang = get_language(os.path.splitext(details['source'])[1])
if lang:
if details['lang'] is None:
details['lang'] = lang
else:
details['action_type'] = BuildAction.LINK
# Option parser detects target architecture but does not know about the
# language during parsing. Set the collected compilation target for the
# language detected language.
details['target'][lang] = details['arch']
# With gcc-toolchain a non default compiler toolchain can be set. Clang
# will search for include paths and libraries based on the gcc-toolchain
# parameter. Detecting extra include paths from the host compiler could
# conflict with this.
# For example if the compiler in the compile command is clang and
# gcc-toolchain is set we will get the include paths for clang and not for
# the compiler set in gcc-toolchain. This can cause missing headers during
# the analysis.
toolchain = \
gcc_toolchain.toolchain_in_args(details['analyzer_options'])
# Store the compiler built in include paths and defines.
# If clang compiler is used for compilation and analysis,
# do not collect the implicit include paths.
if (not toolchain and not using_clang_to_compile_and_analyze) or \
(compiler_info_file and os.path.exists(compiler_info_file)):
ImplicitCompilerInfo.set(details, compiler_info_file)
if not keep_gcc_include_fixed:
for lang, includes in details['compiler_includes'].items():
details['compiler_includes'][lang] = \
filter(__is_not_include_fixed, includes)
if not keep_gcc_intrin:
for lang, includes in details['compiler_includes'].items():
details['compiler_includes'][lang] = \
filter(__contains_no_intrinsic_headers, includes)
# filter out intrin directories
aop_without_itrin = []
analyzer_options = iter(details['analyzer_options'])
for aopt in analyzer_options:
m = INCLUDE_OPTIONS_MERGED.match(aopt)
if m:
flag = m.group(0)
together = len(flag) != len(aopt)
if together:
value = aopt[len(flag):]
else:
flag = aopt
value = analyzer_options.next()
if os.path.isdir(value) and __contains_no_intrinsic_headers(
value) or not os.path.isdir(value):
if together:
aop_without_itrin.append(aopt)
else:
aop_without_itrin.append(flag)
aop_without_itrin.append(value)
else:
# no match
aop_without_itrin.append(aopt)
details['analyzer_options'] = aop_without_itrin
return BuildAction(**details)
|
def parse_options(compilation_db_entry,
compiler_info_file=None,
keep_gcc_include_fixed=False,
keep_gcc_intrin=False,
get_clangsa_version_func=None,
env=None):
"""
This function parses a GCC compilation action and returns a BuildAction
object which can be the input of Clang analyzer tools.
compilation_db_entry -- An entry from a valid compilation database JSON
file, i.e. a dictionary with the compilation
command, the compiled file and the current working
directory.
compiler_info_file -- Contains the path to a compiler info file.
keep_gcc_include_fixed -- There are some implicit include paths which are
only used by GCC (include-fixed). This flag
determines whether these should be kept among
the implicit include paths.
keep_gcc_intrin -- There are some implicit include paths which contain
GCC-specific header files (those which end with
intrin.h). This flag determines whether these should be
kept among the implicit include paths. Use this flag if
Clang analysis fails with error message related to
__builtin symbols.
get_clangsa_version_func -- Is a function which should return the
version information for a clang compiler.
It requires the compiler binary and an env.
get_clangsa_version_func(compiler_binary, env)
Should return false for a non clang compiler.
env -- Is the environment where a subprocess call should be executed.
"""
details = {
'analyzer_options': [],
'compiler_includes': defaultdict(dict), # For each language c/cpp.
'compiler_standard': defaultdict(dict), # For each language c/cpp.
'analyzer_type': -1,
'original_command': '',
'directory': '',
'output': '',
'lang': None,
'arch': '', # Target in the compile command set by -arch.
'target': defaultdict(dict),
'source': ''}
if 'arguments' in compilation_db_entry:
gcc_command = compilation_db_entry['arguments']
details['original_command'] = ' '.join(gcc_command)
elif 'command' in compilation_db_entry:
details['original_command'] = compilation_db_entry['command']
gcc_command = shlex.split(compilation_db_entry['command'])
else:
raise KeyError("No valid 'command' or 'arguments' entry found!")
details['directory'] = compilation_db_entry['directory']
details['action_type'] = None
details['compiler'] =\
determine_compiler(gcc_command,
ImplicitCompilerInfo.is_executable_compiler)
if '++' in os.path.basename(details['compiler']):
details['lang'] = 'c++'
# Source files are skipped first so they are not collected
# with the other compiler flags together. Source file is handled
# separately from the compile command json.
clang_flag_collectors = [
__skip_sources,
__skip_clang,
__collect_transform_xclang_opts,
__get_output,
__determine_action_type,
__get_arch,
__get_language,
__collect_transform_include_opts,
__collect_clang_compile_opts
]
gcc_flag_transformers = [
__skip_gcc,
__replace,
__collect_compile_opts,
__collect_transform_include_opts,
__determine_action_type,
__skip_sources,
__get_arch,
__get_language,
__get_output]
flag_processors = gcc_flag_transformers
compiler_version_info = \
ImplicitCompilerInfo.compiler_versions.get(
details['compiler'], False)
if not compiler_version_info and get_clangsa_version_func:
# did not find in the cache yet
try:
compiler_version_info = \
get_clangsa_version_func(details['compiler'], env)
except subprocess.CalledProcessError as cerr:
LOG.error('Failed to get and parse clang version: %s',
details['compiler'])
LOG.error(cerr)
compiler_version_info = False
ImplicitCompilerInfo.compiler_versions[details['compiler']] \
= compiler_version_info
using_clang_to_compile_and_analyze = False
if ImplicitCompilerInfo.compiler_versions[details['compiler']]:
# Based on the version information the compiler is clang.
using_clang_to_compile_and_analyze = True
flag_processors = clang_flag_collectors
for it in OptionIterator(gcc_command[1:]):
for flag_processor in flag_processors:
if flag_processor(it, details):
break
else:
pass
# print('Unhandled argument: ' + it.item)
if details['action_type'] is None:
details['action_type'] = BuildAction.COMPILE
details['source'] = compilation_db_entry['file']
# In case the file attribute in the entry is empty.
if details['source'] == '.':
details['source'] = ''
lang = get_language(os.path.splitext(details['source'])[1])
if lang:
if details['lang'] is None:
details['lang'] = lang
else:
details['action_type'] = BuildAction.LINK
# Option parser detects target architecture but does not know about the
# language during parsing. Set the collected compilation target for the
# language detected language.
details['target'][lang] = details['arch']
# With gcc-toolchain a non default compiler toolchain can be set. Clang
# will search for include paths and libraries based on the gcc-toolchain
# parameter. Detecting extra include paths from the host compiler could
# conflict with this.
# For example if the compiler in the compile command is clang and
# gcc-toolchain is set we will get the include paths for clang and not for
# the compiler set in gcc-toolchain. This can cause missing headers during
# the analysis.
toolchain = \
gcc_toolchain.toolchain_in_args(details['analyzer_options'])
# Store the compiler built in include paths and defines.
# If clang compiler is used for compilation and analysis,
# do not collect the implicit include paths.
if (not toolchain and not using_clang_to_compile_and_analyze) or \
(compiler_info_file and os.path.exists(compiler_info_file)):
ImplicitCompilerInfo.set(details, compiler_info_file)
if not keep_gcc_include_fixed:
for lang, includes in details['compiler_includes'].items():
details['compiler_includes'][lang] = \
filter(__is_not_include_fixed, includes)
if not keep_gcc_intrin:
for lang, includes in details['compiler_includes'].items():
details['compiler_includes'][lang] = \
filter(__contains_no_intrinsic_headers, includes)
# filter out intrin directories
aop_without_intrin = []
analyzer_options = iter(details['analyzer_options'])
for aopt in analyzer_options:
m = INCLUDE_OPTIONS_MERGED.match(aopt)
if m:
flag = m.group(0)
together = len(flag) != len(aopt)
if together:
value = aopt[len(flag):]
else:
flag = aopt
value = analyzer_options.next()
if os.path.isdir(value) and __contains_no_intrinsic_headers(
value) or not os.path.isdir(value):
if together:
aop_without_itrin.append(aopt)
else:
aop_without_itrin.append(flag)
aop_without_itrin.append(value)
else:
# no match
aop_without_itrin.append(aopt)
details['analyzer_options'] = aop_without_itrin
return BuildAction(**details)
|
29,363 |
def compile_and_check_typescript(config_path: str) -> None:
"""Compiles typescript files and checks the compilation errors.
Args:
config_path: str. The config that should be used to run the typescript
checks.
"""
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
validate_compiled_js_dir()
if os.path.exists(COMPILED_JS_DIR):
shutil.rmtree(COMPILED_JS_DIR)
print('Compiling and testing typescript...')
cmd = ['./node_modules/typescript/bin/tsc', '--project', config_path]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, encoding='utf-8')
if os.path.exists(COMPILED_JS_DIR):
shutil.rmtree(COMPILED_JS_DIR)
# The value of `process.stdout` should not be None since we passed
# the `stdout=subprocess.PIPE` argument to `Popen`.
assert process.stdout is not None
error_messages = list(iter(process.stdout.readline, ''))
if config_path == STRICT_TSCONFIG_FILEPATH:
# Generate file names from the error messages.
errors = [x.strip() for x in error_messages]
# Remove the empty lines and error explanation lines.
prefixes = ('core', 'extension', 'typings')
errors = [x for x in errors if x.startswith(prefixes)]
# Remove error explanation lines.
errors = [x.split('(', 1)[0] for x in errors]
# Remove the dublin core prefixes.
errors = list(dict.fromkeys(errors))
files_with_errors = sorted(errors)
# List of missing files that are neither strict typed nor present in
# NOT_FULLY_TYPE_STRICT_TSCONFIG_FILEPATH.
files_not_type_strict = []
for filename in files_with_errors:
if filename not in NOT_FULLY_TYPE_STRICT_TSCONFIG_FILEPATH:
files_not_type_strict.append(filename)
# Add "typings" folder to get global imports while compiling.
files_not_type_strict.append('typings')
# Update "include" field of tsconfig-strict.json with files that are
# neither strict typed nor present in
# NOT_FULLY_TYPE_STRICT_TSCONFIG_FILEPATH.
# Example: List "files_not_type_strict".
file_name = os.path.join(os.getcwd(), 'tsconfig-strict.json')
with open(file_name, 'r', encoding='utf-8') as f:
tsconfig_strict_json_dict = yaml.safe_load(f)
tsconfig_strict_json_dict['include'] = files_not_type_strict
tsconfig_strict_json_dict = (
json.dumps(tsconfig_strict_json_dict, indent=2, sort_keys=True))
with open(file_name, 'w', encoding='utf-8') as f:
f.write(tsconfig_strict_json_dict + '\n')
# Compile tsconfig-strict.json with updated "include" property.
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
validate_compiled_js_dir()
if os.path.exists(COMPILED_JS_DIR):
shutil.rmtree(COMPILED_JS_DIR)
cmd = ['./node_modules/typescript/bin/tsc', '--project', config_path]
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, encoding='utf-8')
if os.path.exists(COMPILED_JS_DIR):
shutil.rmtree(COMPILED_JS_DIR)
# The value of `process.stdout` should not be None since we passed
# the `stdout=subprocess.PIPE` argument to `Popen`.
assert process.stdout is not None
error_messages = list(iter(process.stdout.readline, ''))
# Update tsconfig-strict.json and set to its intial "include" state
# example "include": ["core", "extensions", "typings"].
with open(file_name, 'r', encoding='utf-8') as f:
tsconfig_strict_json_dict = yaml.safe_load(f)
tsconfig_strict_json_dict['include'] = (
['core', 'extensions', 'typings'])
tsconfig_strict_json_dict = (
json.dumps(tsconfig_strict_json_dict, indent=2, sort_keys=True))
with open(file_name, 'w', encoding='utf-8') as f:
f.write(tsconfig_strict_json_dict + '\n')
if error_messages:
print('\n' + '\n'.join(error_messages))
print(
str(len([x for x in error_messages if x.startswith(prefixes)]))
+ ' Errors found during compilation.\n')
sys.exit(1)
else:
print('Compilation successful!')
else:
if error_messages:
print('Errors found during compilation\n')
print('\n'.join(error_messages))
sys.exit(1)
else:
print('Compilation successful!')
|
def compile_and_check_typescript(config_path: str) -> None:
"""Compiles typescript files and checks the compilation errors.
Args:
config_path: str. The config that should be used to run the typescript
checks.
"""
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
validate_compiled_js_dir()
if os.path.exists(COMPILED_JS_DIR):
shutil.rmtree(COMPILED_JS_DIR)
print('Compiling and testing typescript...')
cmd = ['./node_modules/typescript/bin/tsc', '--project', config_path]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, encoding='utf-8')
if os.path.exists(COMPILED_JS_DIR):
shutil.rmtree(COMPILED_JS_DIR)
# The value of `process.stdout` should not be None since we passed
# the `stdout=subprocess.PIPE` argument to `Popen`.
assert process.stdout is not None
error_messages = list(iter(process.stdout.readline, ''))
if config_path == STRICT_TSCONFIG_FILEPATH:
# Generate file names from the error messages.
errors = [x.strip() for x in error_messages]
# Remove the empty lines and error explanation lines.
prefixes = ('core', 'extension', 'typings')
errors = [x for x in errors if x.startswith(prefixes)]
# Remove error explanation lines.
errors = [x.split('(', 1)[0] for x in errors]
# Remove the dublin core prefixes.
errors = list(dict.fromkeys(errors))
files_with_errors = sorted(errors)
# List of missing files that are neither strictly typed nor present in
# NOT_FULLY_TYPE_STRICT_TSCONFIG_FILEPATH.
files_not_type_strict = []
for filename in files_with_errors:
if filename not in NOT_FULLY_TYPE_STRICT_TSCONFIG_FILEPATH:
files_not_type_strict.append(filename)
# Add "typings" folder to get global imports while compiling.
files_not_type_strict.append('typings')
# Update "include" field of tsconfig-strict.json with files that are
# neither strict typed nor present in
# NOT_FULLY_TYPE_STRICT_TSCONFIG_FILEPATH.
# Example: List "files_not_type_strict".
file_name = os.path.join(os.getcwd(), 'tsconfig-strict.json')
with open(file_name, 'r', encoding='utf-8') as f:
tsconfig_strict_json_dict = yaml.safe_load(f)
tsconfig_strict_json_dict['include'] = files_not_type_strict
tsconfig_strict_json_dict = (
json.dumps(tsconfig_strict_json_dict, indent=2, sort_keys=True))
with open(file_name, 'w', encoding='utf-8') as f:
f.write(tsconfig_strict_json_dict + '\n')
# Compile tsconfig-strict.json with updated "include" property.
os.environ['PATH'] = '%s/bin:' % common.NODE_PATH + os.environ['PATH']
validate_compiled_js_dir()
if os.path.exists(COMPILED_JS_DIR):
shutil.rmtree(COMPILED_JS_DIR)
cmd = ['./node_modules/typescript/bin/tsc', '--project', config_path]
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, encoding='utf-8')
if os.path.exists(COMPILED_JS_DIR):
shutil.rmtree(COMPILED_JS_DIR)
# The value of `process.stdout` should not be None since we passed
# the `stdout=subprocess.PIPE` argument to `Popen`.
assert process.stdout is not None
error_messages = list(iter(process.stdout.readline, ''))
# Update tsconfig-strict.json and set to its intial "include" state
# example "include": ["core", "extensions", "typings"].
with open(file_name, 'r', encoding='utf-8') as f:
tsconfig_strict_json_dict = yaml.safe_load(f)
tsconfig_strict_json_dict['include'] = (
['core', 'extensions', 'typings'])
tsconfig_strict_json_dict = (
json.dumps(tsconfig_strict_json_dict, indent=2, sort_keys=True))
with open(file_name, 'w', encoding='utf-8') as f:
f.write(tsconfig_strict_json_dict + '\n')
if error_messages:
print('\n' + '\n'.join(error_messages))
print(
str(len([x for x in error_messages if x.startswith(prefixes)]))
+ ' Errors found during compilation.\n')
sys.exit(1)
else:
print('Compilation successful!')
else:
if error_messages:
print('Errors found during compilation\n')
print('\n'.join(error_messages))
sys.exit(1)
else:
print('Compilation successful!')
|
34,704 |
def test_md_format_message_using_long_entity_syntax_no_start_end():
formatted = format_message(
"I am from Berlin.",
intent="location",
entities=[
{"start": 10, "end": 16, "entity": "city", "value": "Berlin"},
{"entity": "country", "value": "Germany", "role": "destination",},
],
)
assert formatted == """I am from [Berlin](city)."""
|
def test_md_format_message_using_long_entity_syntax_no_start_end():
formatted = format_message(
"I am from Berlin.",
intent="location",
entities=[
{"start": 10, "end": 16, "entity": "city", "value": "Berlin"},
{"entity": "country", "value": "Germany", "role": "destination",},
],
)
assert formatted == "I am from [Berlin](city)."
|
59,394 |
def test_artifact_metadata_save(wandb_init):
# Test artifact metadata sucessfully saved for len(numpy) > 32
dummy_metadata = np.array([0] * 33)
run = wandb_init()
artifact = wandb.Artifact(name="art", type="dataset")
run.log_artifact(artifact)
artifact.wait().metadata.update(dummy_metadata=dummy_metadata)
artifact.save()
saved_artifact = run.use_artifact("art:latest")
assert "dummy_metadata" in saved_artifact.metadata
assert saved_artifact.metadata["dummy_metadata"]["_type"] == "histogram"
run.finish()
|
def test_artifact_metadata_save(wandb_init):
# Test artifact metadata sucessfully saved for len(numpy) > 32
dummy_metadata = np.array([0] * 33)
run = wandb_init()
artifact = wandb.Artifact(name="art", type="dataset", metadata={"dummy_metadata": dummy_metadata})
run.log_artifact(artifact).wait()
saved_artifact = run.use_artifact("art:latest")
assert "dummy_metadata" in saved_artifact.metadata
assert saved_artifact.metadata["dummy_metadata"]["_type"] == "histogram"
run.finish()
|
37,498 |
def marginal_counts(result, indices=None, inplace=False, format_marginal=False):
"""Marginalize counts from an experiment over some indices of interest.
Args:
result (dict or Result): result to be marginalized
(a Result object or a dict(str, int) of counts).
indices (set(int) or None): The bit positions of interest. Bit indices not
spcified will be ignored. If ``None`` (default), do not marginalize at all,
this is equivalent to providing a set of all bit positions.
inplace (bool): Default: False. Operates on the original Result
argument if True, leading to loss of original Job Result.
It has no effect if ``result`` is a dict.
format_marginal (bool): Default: False. If True, takes the output of
marginalize and formats it with placeholders between cregs and
for non-indices.
Returns:
Result or dict(str, int): A Result object or a dictionary with
the observed counts, marginalized to only account for frequency
of observations of bits of interest.
Raises:
QiskitError: in case of invalid indices to marginalize over.
"""
if indices is None:
return result
if isinstance(result, Result):
if not inplace:
result = deepcopy(result)
for i, experiment_result in enumerate(result.results):
counts = result.get_counts(i)
new_counts = _marginalize(counts, indices)
new_counts_hex = {}
for k, v in new_counts.items():
new_counts_hex[_bin_to_hex(k)] = v
experiment_result.data.counts = new_counts_hex
experiment_result.header.memory_slots = len(indices)
csize = experiment_result.header.creg_sizes
experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)
return result
else:
marg_counts = _marginalize(result, indices)
if format_marginal and indices is not None:
marg_counts = _format_marginal(result, marg_counts, indices)
return marg_counts
|
def marginal_counts(result, indices=None, inplace=False, format_marginal=False):
"""Marginalize counts from an experiment over some indices of interest.
Args:
result (dict or Result): result to be marginalized
(a Result object or a dict(str, int) of counts).
indices (set(int) or None): The bit positions of interest. Bit indices not
specified will be ignored. If ``None`` (default), do not marginalize at all,
this is equivalent to providing a set of all bit positions.
inplace (bool): Default: False. Operates on the original Result
argument if True, leading to loss of original Job Result.
It has no effect if ``result`` is a dict.
format_marginal (bool): Default: False. If True, takes the output of
marginalize and formats it with placeholders between cregs and
for non-indices.
Returns:
Result or dict(str, int): A Result object or a dictionary with
the observed counts, marginalized to only account for frequency
of observations of bits of interest.
Raises:
QiskitError: in case of invalid indices to marginalize over.
"""
if indices is None:
return result
if isinstance(result, Result):
if not inplace:
result = deepcopy(result)
for i, experiment_result in enumerate(result.results):
counts = result.get_counts(i)
new_counts = _marginalize(counts, indices)
new_counts_hex = {}
for k, v in new_counts.items():
new_counts_hex[_bin_to_hex(k)] = v
experiment_result.data.counts = new_counts_hex
experiment_result.header.memory_slots = len(indices)
csize = experiment_result.header.creg_sizes
experiment_result.header.creg_sizes = _adjust_creg_sizes(csize, indices)
return result
else:
marg_counts = _marginalize(result, indices)
if format_marginal and indices is not None:
marg_counts = _format_marginal(result, marg_counts, indices)
return marg_counts
|
43,860 |
def coefficients(f, n_inputs, degree, lowpass_filter=False, filter_threshold=None):
r"""Computes the first :math:`2d+1` Fourier coefficients of a :math:`2\pi`
periodic function, where :math:`d` is the highest desired frequency (the
degree) of the Fourier spectrum.
While this function can be used to compute Fourier coefficients in general,
the specific use case in PennyLane is to compute coefficients of the
functions that result from measuring expectation values of parametrized
quantum circuits, as described in `Schuld, Sweke and Meyer (2020)
<https://arxiv.org/abs/2008.08605>`__ and `Vidal and Theis, 2019
<https://arxiv.org/abs/1901.11434>`__.
**Details**
Consider a quantum circuit that depends on a
parameter vector :math:`x` with
length :math:`N`. The circuit involves application of some unitary
operations :math:`U(x)`, and then measurement of an observable
:math:`\langle \hat{O} \rangle`. Analytically, the expectation value is
.. math::
\langle \hat{O} \rangle = \langle 0 \vert U^\dagger (x) \hat{O} U(x) \vert 0\rangle = \langle
\psi(x) \vert \hat{O} \vert \psi (x)\rangle.
This output is simply a function :math:`f(x) = \langle \psi(x) \vert \hat{O} \vert \psi
(x)\rangle`. Notably, it is a periodic function of the parameters, and
it can thus be expressed as a multidimensional Fourier series:
.. math::
f(x) = \sum \limits_{n_1\in \Omega_1} \dots \sum \limits_{n_N \in \Omega_N}
c_{n_1,\dots, n_N} e^{-i x_1 n_1} \dots e^{-i x_N n_N},
where :math:`n_i` are integer-valued frequencies, :math:`\Omega_i` are the set
of available values for the integer frequencies, and the
:math:`c_{n_1,\ldots,n_N}` are Fourier coefficients.
Args:
f (callable): Function that takes a 1D tensor of ``n_inputs`` scalar inputs. The function can be a QNode, but
has to return a single real value (such as an expectation) only.
n_inputs (int): number of function inputs
degree (int): max frequency of Fourier coeffs to be computed. For degree :math:`d`,
the coefficients from frequencies :math:`-d, -d+1,...0,..., d-1, d` will be computed.
lowpass_filter (bool): If ``True``, a simple low-pass filter is applied prior to
computing the set of coefficients in order to filter out frequencies above the
given degree. See examples below.
filter_threshold (None or int): The integer frequency at which to filter. If
``lowpass_filter`` is set to ``True,`` but no value is specified, ``2 * degree`` is used.
Returns:
array[complex]: The Fourier coefficients of the function ``f`` up to the specified degree.
**Example**
Suppose we have the following quantum function and wish to compute its Fourier
coefficients with respect to the variable ``inpt``, which is an array with 2 values:
.. code-block:: python
dev = qml.device('default.qubit', wires=['a'])
@qml.qnode(dev)
def circuit(weights, inpt):
qml.RX(inpt[0], wires='a')
qml.Rot(*weights[0], wires='a')
qml.RY(inpt[1], wires='a')
qml.Rot(*weights[1], wires='a')
return qml.expval(qml.PauliZ(wires='a'))
Unless otherwise specified, the coefficients will be computed for all input
values. To compute coefficients with respect to only a subset of the input
values, it is necessary to use a wrapper function (e.g.,
``functools.partial``). We do this below, while fixing a value for
``weights``:
>>> from functools import partial
>>> weights = np.array([[0.1, 0.2, 0.3], [-4.1, 3.2, 1.3]])
>>> partial_circuit = partial(circuit, weights)
Now we must specify the number of inputs, and the maximum desired
degree. Based on the underlying theory, we expect the degree to be 1
(frequencies -1, 0, and 1).
>>> num_inputs = 2
>>> degree = 1
Then we can obtain the coefficients:
>>> coeffs = coefficients(partial_circuit, num_inputs, degree)
>>> print(coeffs)
[[ 0. +0.j -0. +0.j -0. +0.j ]
[-0.0014-0.022j -0.3431-0.0408j -0.1493+0.0374j]
[-0.0014+0.022j -0.1493-0.0374j -0.3431+0.0408j]]
If the specified degree is lower than the highest frequency of the function,
aliasing may occur, and the resultant coefficients will be incorrect as they
will include components of the series expansion from higher frequencies. In
order to mitigate aliasing, setting ``lowpass_filter=True`` will apply a
simple low-pass filter prior to computing the coefficients. Coefficients up
to a specified value are computed, and then frequencies higher than the
degree are simply removed. This ensures that the coefficients returned will
have the correct values, though they may not be the full set of
coefficients. If no threshold value is provided, the threshold will be set
to ``2 * degree``.
Consider the circuit below:
.. code-block:: python
@qml.qnode(dev)
def circuit(inpt):
qml.RX(inpt[0], wires=0)
qml.RY(inpt[0], wires=1)
qml.CNOT(wires=[1, 0])
return qml.expval(qml.PauliZ(0))
One can work out by hand that the Fourier coefficients are :math:`c_0 = 0.5, c_1 = c_{-1} = 0,`
and :math:`c_2 = c_{-2} = 0.25`. Suppose we would like only to obtain the coefficients
:math:`c_0` and :math:`c_1, c_{-1}`. If we simply ask for the coefficients of degree 1,
we will obtain incorrect values due to aliasing:
>>> coefficients(circuit, 1, 1)
array([0.5 +0.j, 0.25+0.j, 0.25+0.j])
However if we enable the low-pass filter, we can still obtain the correct coefficients:
>>> coefficients(circuit, 1, 1, lowpass_filter=True)
array([0.5+0.j, 0. +0.j, 0. +0.j])
Note that in this case, ``2 * degree`` gives us exactly the maximum coefficient;
in other situations it may be desirable to set the threshold value explicitly.
The `coefficients` function can handle qnodes from all PennyLane interfaces.
"""
if not lowpass_filter:
return _coefficients_no_filter(f, n_inputs, degree)
if filter_threshold is None:
filter_threshold = 2 * degree
# Compute the fft of the function at 2x the specified degree
unfiltered_coeffs = _coefficients_no_filter(f, n_inputs, filter_threshold)
# Shift the frequencies so that the 0s are at the centre
shifted_unfiltered_coeffs = np.fft.fftshift(unfiltered_coeffs)
# Next, slice up the array so that we get only the coefficients we care about,
# those between -degree and degree
range_slices = list(
range(
filter_threshold - degree,
shifted_unfiltered_coeffs.shape[0] - (filter_threshold - degree),
)
)
shifted_filtered_coeffs = shifted_unfiltered_coeffs.copy()
# Go axis by axis and take only the central components
for axis in range(n_inputs - 1, -1, -1):
shifted_filtered_coeffs = np.take(shifted_filtered_coeffs, range_slices, axis=axis)
# Shift everything back into "normal" fft ordering
filtered_coeffs = np.fft.ifftshift(shifted_filtered_coeffs)
# Compute the inverse FFT
f_discrete_filtered = np.fft.ifftn(filtered_coeffs)
# Now compute the FFT again on the filtered data
coeffs = np.fft.fftn(f_discrete_filtered)
return coeffs
|
def coefficients(f, n_inputs, degree, lowpass_filter=False, filter_threshold=None):
r"""Computes the first :math:`2d+1` Fourier coefficients of a :math:`2\pi`
periodic function, where :math:`d` is the highest desired frequency (the
degree) of the Fourier spectrum.
While this function can be used to compute Fourier coefficients in general,
the specific use case in PennyLane is to compute coefficients of the
functions that result from measuring expectation values of parametrized
quantum circuits, as described in `Schuld, Sweke and Meyer (2020)
<https://arxiv.org/abs/2008.08605>`__ and `Vidal and Theis, 2019
<https://arxiv.org/abs/1901.11434>`__.
**Details**
Consider a quantum circuit that depends on a
parameter vector :math:`x` with
length :math:`N`. The circuit involves application of some unitary
operations :math:`U(x)`, and then measurement of an observable
:math:`\langle \hat{O} \rangle`. Analytically, the expectation value is
.. math::
\langle \hat{O} \rangle = \langle 0 \vert U^\dagger (x) \hat{O} U(x) \vert 0\rangle = \langle
\psi(x) \vert \hat{O} \vert \psi (x)\rangle.
This output is simply a function :math:`f(x) = \langle \psi(x) \vert \hat{O} \vert \psi
(x)\rangle`. Notably, it is a periodic function of the parameters, and
it can thus be expressed as a multidimensional Fourier series:
.. math::
f(x) = \sum \limits_{n_1\in \Omega_1} \dots \sum \limits_{n_N \in \Omega_N}
c_{n_1,\dots, n_N} e^{-i x_1 n_1} \dots e^{-i x_N n_N},
where :math:`n_i` are integer-valued frequencies, :math:`\Omega_i` are the set
of available values for the integer frequencies, and the
:math:`c_{n_1,\ldots,n_N}` are Fourier coefficients.
Args:
f (callable): Function that takes a 1D tensor of ``n_inputs`` scalar inputs. The function can be a QNode, but
has to return a real scalar value (such as an expectation).
n_inputs (int): number of function inputs
degree (int): max frequency of Fourier coeffs to be computed. For degree :math:`d`,
the coefficients from frequencies :math:`-d, -d+1,...0,..., d-1, d` will be computed.
lowpass_filter (bool): If ``True``, a simple low-pass filter is applied prior to
computing the set of coefficients in order to filter out frequencies above the
given degree. See examples below.
filter_threshold (None or int): The integer frequency at which to filter. If
``lowpass_filter`` is set to ``True,`` but no value is specified, ``2 * degree`` is used.
Returns:
array[complex]: The Fourier coefficients of the function ``f`` up to the specified degree.
**Example**
Suppose we have the following quantum function and wish to compute its Fourier
coefficients with respect to the variable ``inpt``, which is an array with 2 values:
.. code-block:: python
dev = qml.device('default.qubit', wires=['a'])
@qml.qnode(dev)
def circuit(weights, inpt):
qml.RX(inpt[0], wires='a')
qml.Rot(*weights[0], wires='a')
qml.RY(inpt[1], wires='a')
qml.Rot(*weights[1], wires='a')
return qml.expval(qml.PauliZ(wires='a'))
Unless otherwise specified, the coefficients will be computed for all input
values. To compute coefficients with respect to only a subset of the input
values, it is necessary to use a wrapper function (e.g.,
``functools.partial``). We do this below, while fixing a value for
``weights``:
>>> from functools import partial
>>> weights = np.array([[0.1, 0.2, 0.3], [-4.1, 3.2, 1.3]])
>>> partial_circuit = partial(circuit, weights)
Now we must specify the number of inputs, and the maximum desired
degree. Based on the underlying theory, we expect the degree to be 1
(frequencies -1, 0, and 1).
>>> num_inputs = 2
>>> degree = 1
Then we can obtain the coefficients:
>>> coeffs = coefficients(partial_circuit, num_inputs, degree)
>>> print(coeffs)
[[ 0. +0.j -0. +0.j -0. +0.j ]
[-0.0014-0.022j -0.3431-0.0408j -0.1493+0.0374j]
[-0.0014+0.022j -0.1493-0.0374j -0.3431+0.0408j]]
If the specified degree is lower than the highest frequency of the function,
aliasing may occur, and the resultant coefficients will be incorrect as they
will include components of the series expansion from higher frequencies. In
order to mitigate aliasing, setting ``lowpass_filter=True`` will apply a
simple low-pass filter prior to computing the coefficients. Coefficients up
to a specified value are computed, and then frequencies higher than the
degree are simply removed. This ensures that the coefficients returned will
have the correct values, though they may not be the full set of
coefficients. If no threshold value is provided, the threshold will be set
to ``2 * degree``.
Consider the circuit below:
.. code-block:: python
@qml.qnode(dev)
def circuit(inpt):
qml.RX(inpt[0], wires=0)
qml.RY(inpt[0], wires=1)
qml.CNOT(wires=[1, 0])
return qml.expval(qml.PauliZ(0))
One can work out by hand that the Fourier coefficients are :math:`c_0 = 0.5, c_1 = c_{-1} = 0,`
and :math:`c_2 = c_{-2} = 0.25`. Suppose we would like only to obtain the coefficients
:math:`c_0` and :math:`c_1, c_{-1}`. If we simply ask for the coefficients of degree 1,
we will obtain incorrect values due to aliasing:
>>> coefficients(circuit, 1, 1)
array([0.5 +0.j, 0.25+0.j, 0.25+0.j])
However if we enable the low-pass filter, we can still obtain the correct coefficients:
>>> coefficients(circuit, 1, 1, lowpass_filter=True)
array([0.5+0.j, 0. +0.j, 0. +0.j])
Note that in this case, ``2 * degree`` gives us exactly the maximum coefficient;
in other situations it may be desirable to set the threshold value explicitly.
The `coefficients` function can handle qnodes from all PennyLane interfaces.
"""
if not lowpass_filter:
return _coefficients_no_filter(f, n_inputs, degree)
if filter_threshold is None:
filter_threshold = 2 * degree
# Compute the fft of the function at 2x the specified degree
unfiltered_coeffs = _coefficients_no_filter(f, n_inputs, filter_threshold)
# Shift the frequencies so that the 0s are at the centre
shifted_unfiltered_coeffs = np.fft.fftshift(unfiltered_coeffs)
# Next, slice up the array so that we get only the coefficients we care about,
# those between -degree and degree
range_slices = list(
range(
filter_threshold - degree,
shifted_unfiltered_coeffs.shape[0] - (filter_threshold - degree),
)
)
shifted_filtered_coeffs = shifted_unfiltered_coeffs.copy()
# Go axis by axis and take only the central components
for axis in range(n_inputs - 1, -1, -1):
shifted_filtered_coeffs = np.take(shifted_filtered_coeffs, range_slices, axis=axis)
# Shift everything back into "normal" fft ordering
filtered_coeffs = np.fft.ifftshift(shifted_filtered_coeffs)
# Compute the inverse FFT
f_discrete_filtered = np.fft.ifftn(filtered_coeffs)
# Now compute the FFT again on the filtered data
coeffs = np.fft.fftn(f_discrete_filtered)
return coeffs
|
31,404 |
def get_violation_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
:type client: Client
:param client: Gamma client
:param args: all command arguments, usually passed from demisto.args()
args['name'] is used as input name
:return:
A CommandResults object that is then passed to return_results
:rtype: ``CommandResults``
"""
minimum_violation = args.get("minimum_violation", 1)
limit = args.get("limit", 10)
if not int(minimum_violation) >= 1:
raise ValueError("minimum_violation must be greater than 0")
if not int(limit) >= 1 or not int(limit) <= 100:
raise ValueError("limit must be between 1 and 100")
v_list = client.get_violation_list(minimum_violation, limit)
note = ''
if v_list['response'][0]['violation_id'] != int(minimum_violation):
note += f'Violation with the minimum_violation ID does not exist. Showing violations pulled from the next available ID: {v_list["response"][0]["violation_id"]} \r'
human_readable = note
for i in v_list['response']:
violation_id = i['violation_id']
human_readable += f'### Violation {i["violation_id"]} \r' \
f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \
f'|---|---|---|---|---|---| \r' \
f'| {violation_id} | {i["violation_status"]} | {timestamp_to_datestring(i["violation_event_timestamp"]*1000)} | {i["dashboard_url"]} | {i["user"]} | {i["app_name"]} | \r'
return CommandResults(
readable_output=human_readable,
outputs_prefix="GammaViolation",
outputs_key_field="violation_id",
outputs=v_list,
raw_response=v_list
)
|
def get_violation_list_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
:type client: Client
:param client: Gamma client
:param args: all command arguments, usually passed from demisto.args()
args['name'] is used as input name
:return:
A CommandResults object that is then passed to return_results
:rtype: ``CommandResults``
"""
minimum_violation = args.get("minimum_violation", 1)
limit = args.get("limit", 10)
if not int(minimum_violation) >= 1:
raise ValueError("minimum_violation must be greater than 0")
if int(limit) < 1 or int(limit) > 100:
raise ValueError("limit must be between 1 and 100")
v_list = client.get_violation_list(minimum_violation, limit)
note = ''
if v_list['response'][0]['violation_id'] != int(minimum_violation):
note += f'Violation with the minimum_violation ID does not exist. Showing violations pulled from the next available ID: {v_list["response"][0]["violation_id"]} \r'
human_readable = note
for i in v_list['response']:
violation_id = i['violation_id']
human_readable += f'### Violation {i["violation_id"]} \r' \
f'|Violation ID|Status|Timestamp|Dashboard URL|User|App Name| \r' \
f'|---|---|---|---|---|---| \r' \
f'| {violation_id} | {i["violation_status"]} | {timestamp_to_datestring(i["violation_event_timestamp"]*1000)} | {i["dashboard_url"]} | {i["user"]} | {i["app_name"]} | \r'
return CommandResults(
readable_output=human_readable,
outputs_prefix="GammaViolation",
outputs_key_field="violation_id",
outputs=v_list,
raw_response=v_list
)
|
32,564 |
def main() -> None: # pragma: no cover
args = demisto.args()
command = demisto.command()
demisto.debug(f'Command being called is {demisto.command()}')
try:
admin_api = create_api_call()
set_proxy(admin_api)
if command == 'test-module':
test_instance(admin_api)
elif command == 'duoadmin-get-users':
get_all_users(admin_api)
elif command == 'duoadmin-get-admins':
get_all_admins(admin_api)
elif command == 'duoadmin-get-bypass-codes':
get_all_bypass_codes(admin_api)
elif command == 'duoadmin-get-authentication-logs-by-user':
get_authentication_logs_by_user(admin_api, args)
elif command == 'duoadmin-get-devices':
get_all_devices(admin_api)
elif command == 'duoadmin-get-devices-by-user':
get_devices_by_user(admin_api, args)
elif command == 'duoadmin-associate-device-to-user':
associate_device_to_user(admin_api, args)
elif command == 'duoadmin-dissociate-device-from-user':
dissociate_device_by_user(admin_api, args)
elif command == 'duoadmin-get-u2f-tokens-by-user':
get_u2f_tokens_by_user(admin_api, args)
elif command == 'duoadmin-delete-u2f-token':
delete_u2f_token(admin_api, args)
elif command == 'duoadmin-modify-user':
return_results(modify_user(admin_api, **demisto.args()))
elif command == 'duoadmin-modify-admin':
return_results(modify_admin_user(admin_api, **demisto.args()))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
|
def main() -> None: # pragma: no cover
args = demisto.args()
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
admin_api = create_api_call()
set_proxy(admin_api)
if command == 'test-module':
test_instance(admin_api)
elif command == 'duoadmin-get-users':
get_all_users(admin_api)
elif command == 'duoadmin-get-admins':
get_all_admins(admin_api)
elif command == 'duoadmin-get-bypass-codes':
get_all_bypass_codes(admin_api)
elif command == 'duoadmin-get-authentication-logs-by-user':
get_authentication_logs_by_user(admin_api, args)
elif command == 'duoadmin-get-devices':
get_all_devices(admin_api)
elif command == 'duoadmin-get-devices-by-user':
get_devices_by_user(admin_api, args)
elif command == 'duoadmin-associate-device-to-user':
associate_device_to_user(admin_api, args)
elif command == 'duoadmin-dissociate-device-from-user':
dissociate_device_by_user(admin_api, args)
elif command == 'duoadmin-get-u2f-tokens-by-user':
get_u2f_tokens_by_user(admin_api, args)
elif command == 'duoadmin-delete-u2f-token':
delete_u2f_token(admin_api, args)
elif command == 'duoadmin-modify-user':
return_results(modify_user(admin_api, **demisto.args()))
elif command == 'duoadmin-modify-admin':
return_results(modify_admin_user(admin_api, **demisto.args()))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
|
30,626 |
def appendContext(key, data, dedup=False):
"""
Append data to the investigation context
:type key: ``str``
:param key: The context path (required)
:type data: ``any``
:param data: Data to be added to the context (required)
:type dedup: ``bool``
:param dedup: True if de-duplication is required. Default is False.
:return: No data returned
:rtype: ``None``
"""
if data is None:
return
existing = demisto.get(demisto.context(), key)
if existing:
if isinstance(existing, STRING_TYPES):
if isinstance(data, STRING_TYPES):
new_val = data + ',' + existing
else:
return_error("Cannot append data to the existing context - \n The data is of instance {} while the "
"context in the specified path is of instance {}.".format(type(data), type(existing)))
if isinstance(existing, dict):
if isinstance(data, dict):
existing.update(data)
new_val = existing
else:
return_error("Cannot append data to the existing context - \n The data is of instance {} while the "
"context in the specified path is of instance {}.".format(type(data), type(existing)))
if isinstance(existing, list):
if isinstance(data, list):
existing.extend(data)
else:
existing.append(data)
new_val = existing
if dedup:
new_val = list(set(new_val))
demisto.setContext(key, new_val)
else:
demisto.setContext(key, data)
|
def appendContext(key, data, dedup=False):
"""
Append data to the investigation context
:type key: ``str``
:param key: The context path (required)
:type data: ``any``
:param data: Data to be added to the context (required)
:type dedup: ``bool``
:param dedup: True if de-duplication is required. Default is False.
:return: No data returned
:rtype: ``None``
"""
if data is None:
return
existing = demisto.get(demisto.context(), key)
if existing:
if isinstance(existing, STRING_TYPES):
if isinstance(data, STRING_TYPES):
new_val = data + ',' + existing
else:
return_error("Cannot append data to the existing context - \n The data is of instance {} while the "
"context in the specified path is of instance {}.".format(type(data), type(existing)))
if isinstance(existing, dict):
if isinstance(data, dict):
existing.update(data)
new_val = existing
else:
return_error("Cannot append data to the existing context - \n The data is of type {} while the "
"context in the specified path is of instance {}.".format(type(data), type(existing)))
if isinstance(existing, list):
if isinstance(data, list):
existing.extend(data)
else:
existing.append(data)
new_val = existing
if dedup:
new_val = list(set(new_val))
demisto.setContext(key, new_val)
else:
demisto.setContext(key, data)
|
6,229 |
def divideFullName(entityName, second=None):
""" Convert component full name to tuple
:param str entityName: component full name, e.g.: 'Framework/ProxyManager'
:param str second: component name
:return: tuple -- contain system and component name
"""
if entityName and '/' not in entityName and second:
return (entityName, second)
fields = [field.strip() for field in entityName.split("/") if field.strip()]
if len(fields) == 2:
return tuple(fields)
raise RuntimeError("Service (%s) name must be with the form system/service" % entityName)
|
def divideFullName(entityName, componentName=None):
""" Convert component full name to tuple
:param str entityName: component full name, e.g.: 'Framework/ProxyManager'
:param str second: component name
:return: tuple -- contain system and component name
"""
if entityName and '/' not in entityName and second:
return (entityName, second)
fields = [field.strip() for field in entityName.split("/") if field.strip()]
if len(fields) == 2:
return tuple(fields)
raise RuntimeError("Service (%s) name must be with the form system/service" % entityName)
|
58,367 |
def validate_root(root, validate):
# Validate root argument and make sure it contains mandatory info
try:
root = Path(root)
except TypeError:
raise TypeError("root argument must be a pathlib.Path (or a type that "
"supports casting to pathlib.Path, such as "
"string) specifying the directory "
"containing the BIDS dataset.")
root = root.absolute()
if not root.exists():
raise ValueError("BIDS root does not exist: %s" % root)
target = root / 'dataset_description.json'
if not target.exists():
if validate:
raise BIDSValidationError(
"'dataset_description.json' is missing from project root."
" Every valid BIDS dataset must have this file."
"\nExample contents of 'dataset_description.json': \n%s" %
json.dumps(EXAMPLE_BIDS_DESCRIPTION)
)
else:
description = None
else:
err = None
try:
with open(target, 'r', encoding='utf-8') as desc_fd:
description = json.load(desc_fd)
except (UnicodeDecodeError, json.JSONDecodeError) as err:
description = None
if validate:
if description is None:
raise BIDSValidationError(
"'dataset_description.json' is not a valid json file."
" There is likely a typo in your 'dataset_description.json'."
"\nExample contents of 'dataset_description.json': \n%s" %
json.dumps(EXAMPLE_BIDS_DESCRIPTION)
) from err
for k in MANDATORY_BIDS_FIELDS:
if k not in description:
raise BIDSValidationError(
"Mandatory %r field missing from "
"'dataset_description.json'."
"\nExample: %s" % (k, MANDATORY_BIDS_FIELDS[k])
)
return root, description
|
def validate_root(root, validate):
# Validate root argument and make sure it contains mandatory info
try:
root = Path(root)
except TypeError:
raise TypeError("root argument must be a pathlib.Path (or a type that "
"supports casting to pathlib.Path, such as "
"string) specifying the directory "
"containing the BIDS dataset.")
root = root.absolute()
if not root.exists():
raise ValueError("BIDS root does not exist: %s" % root)
target = root / 'dataset_description.json'
if not target.exists():
if validate:
raise BIDSValidationError(
"'dataset_description.json' is missing from project root."
" Every valid BIDS dataset must have this file."
"\nExample contents of 'dataset_description.json': \n%s" %
json.dumps(EXAMPLE_BIDS_DESCRIPTION)
)
else:
description = None
else:
err = None
try:
with open(target, 'r', encoding='utf-8') as desc_fd:
description = json.load(desc_fd)
except (UnicodeDecodeError, json.JSONDecodeError) as e:
description = None
err = e
if validate:
if description is None:
raise BIDSValidationError(
"'dataset_description.json' is not a valid json file."
" There is likely a typo in your 'dataset_description.json'."
"\nExample contents of 'dataset_description.json': \n%s" %
json.dumps(EXAMPLE_BIDS_DESCRIPTION)
) from err
for k in MANDATORY_BIDS_FIELDS:
if k not in description:
raise BIDSValidationError(
"Mandatory %r field missing from "
"'dataset_description.json'."
"\nExample: %s" % (k, MANDATORY_BIDS_FIELDS[k])
)
return root, description
|
5,672 |
def spearmanr(a, b=None, axis=0, nan_policy='propagate', alternative='two-sided'):
"""
Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
.. versionadded:: 1.6.0
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose alternative hypothesis is
defined by the ``alternative`` parameter and who's null hypotheisis
is that two sets of data are uncorrelated, has same dimension as rho.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, supplied axis argument {}, please use only values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).sum(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
if alternative == 'less':
prob = distributions.t.cdf(t, dof)
elif alternative == 'greater':
prob = distributions.t.sf(t, dof)
elif alternative == 'two-sided':
prob = 2 * distributions.t.sf(np.abs(t), dof)
else:
raise ValueError("alternative should be "
"'less', 'greater' or 'two-sided'")
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
|
def spearmanr(a, b=None, axis=0, nan_policy='propagate', alternative='two-sided'):
"""
Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
.. versionadded:: 1.6.0
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose alternative hypothesis is
defined by the ``alternative`` parameter and whose null hypotheisis
is that two sets of data are uncorrelated, has same dimension as rho.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
(0.82078268166812329, 0.088587005313543798)
>>> np.random.seed(1234321)
>>> x2n = np.random.randn(100, 2)
>>> y2n = np.random.randn(100, 2)
>>> stats.spearmanr(x2n)
(0.059969996999699973, 0.55338590803773591)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
(0.059969996999699973, 0.55338590803773591)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> pval
array([[ 0. , 0.55338591, 0.06435364, 0.53617935],
[ 0.55338591, 0. , 0.27592895, 0.80234077],
[ 0.06435364, 0.27592895, 0. , 0.73039992],
[ 0.53617935, 0.80234077, 0.73039992, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , 0.05997 , 0.18569457, 0.06258626],
[ 0.05997 , 1. , 0.110003 , 0.02534653],
[ 0.18569457, 0.110003 , 1. , 0.03488749],
[ 0.06258626, 0.02534653, 0.03488749, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
(0.10816770419260482, 0.1273562188027364)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
(0.10816770419260482, 0.1273562188027364)
>>> xint = np.random.randint(10, size=(100, 2))
>>> stats.spearmanr(xint)
(0.052760927029710199, 0.60213045837062351)
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, supplied axis argument {}, please use only values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).sum(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
if alternative == 'less':
prob = distributions.t.cdf(t, dof)
elif alternative == 'greater':
prob = distributions.t.sf(t, dof)
elif alternative == 'two-sided':
prob = 2 * distributions.t.sf(np.abs(t), dof)
else:
raise ValueError("alternative should be "
"'less', 'greater' or 'two-sided'")
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
|
5,487 |
def test_get_nonexisting_user(client, wiki_user, wiki_user_github_account):
"""Test GET on the api.v1.get_user endpoint for a non-existing user."""
url = reverse('api.v1.get_user', args=('nonexistent',))
response = client.get(url)
assert response.status_code == 404
assert_no_cache_header(response)
|
def test_get_nonexisting_user(client):
"""Test GET on the api.v1.get_user endpoint for a non-existing user."""
url = reverse('api.v1.get_user', args=('nonexistent',))
response = client.get(url)
assert response.status_code == 404
assert_no_cache_header(response)
|
54,070 |
def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-statements
def get_custom_sdk(custom_module, client_factory, resource_type=CUSTOM_DATA_STORAGE):
"""Returns a CliCommandType instance with specified operation template based on the given custom module name.
This is useful when the command is not defined in the default 'custom' module but instead in a module under
'operations' package."""
return CliCommandType(
operations_tmpl='azext_storage_preview.operations.{}#'.format(custom_module) + '{}',
client_factory=client_factory,
resource_type=resource_type
)
base_blob_sdk = CliCommandType(
operations_tmpl='azext_storage_preview.vendored_sdks.azure_storage.blob.baseblobservice#BaseBlobService.{}',
client_factory=blob_data_service_factory,
resource_type=CUSTOM_DATA_STORAGE)
with self.command_group('storage blob service-properties', command_type=base_blob_sdk) as g:
g.storage_command_oauth('show', 'get_blob_service_properties', exception_handler=show_exception_handler)
g.storage_command_oauth('update', generic_update=True, getter_name='get_blob_service_properties',
setter_type=get_custom_sdk('blob', cf_blob_data_gen_update),
setter_name='set_service_properties',
client_factory=cf_blob_data_gen_update)
block_blob_sdk = CliCommandType(
operations_tmpl='azure.multiapi.storage.blob.blockblobservice#BlockBlobService.{}',
client_factory=blob_data_service_factory,
resource_type=CUSTOM_DATA_STORAGE)
with self.command_group('storage azcopy blob', command_type=block_blob_sdk,
custom_command_type=get_custom_sdk('azcopy', blob_data_service_factory)) as g:
g.storage_custom_command_oauth('upload', 'storage_blob_upload')
g.storage_custom_command_oauth('download', 'storage_blob_download')
g.storage_custom_command_oauth('delete', 'storage_blob_remove')
g.storage_custom_command_oauth('sync', 'storage_blob_sync')
with self.command_group('storage azcopy', custom_command_type=get_custom_sdk('azcopy', None)) as g:
g.custom_command('run-command', 'storage_run_command', validator=lambda namespace: None)
# pylint: disable=line-too-long
adls_base_blob_sdk = CliCommandType(
operations_tmpl='azext_storage_preview.vendored_sdks.azure_adls_storage_preview.blob.baseblobservice#BaseBlobService.{}',
client_factory=adls_blob_data_service_factory,
resource_type=CUSTOM_DATA_STORAGE_ADLS)
def _adls_deprecate_message(self):
msg = "This {} has been deprecated and will be removed in future release.".format(self.object_type)
msg += " Use '{}' instead.".format(self.redirect)
msg += " For more information go to"
msg += " https://github.com/Azure/azure-cli/blob/dev/src/azure-cli/azure/cli/command_modules/storage/docs/ADLS%20Gen2.md"
return msg
# Change existing Blob Commands
with self.command_group('storage blob', command_type=adls_base_blob_sdk) as g:
from ._format import transform_blob_output
from ._transformers import transform_storage_list_output
g.storage_command_oauth('list', 'list_blobs', transform=transform_storage_list_output,
table_transformer=transform_blob_output,
deprecate_info=self.deprecate(redirect="az storage fs file list", hide=True,
message_func=_adls_deprecate_message))
# New Blob Commands
with self.command_group('storage blob', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS) as g:
g.storage_command_oauth('move', 'rename_path', is_preview=True,
deprecate_info=self.deprecate(redirect="az storage fs file move", hide=True,
message_func=_adls_deprecate_message))
with self.command_group('storage blob access', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS,
deprecate_info=self.deprecate(redirect="az storage fs access", hide=True,
message_func=_adls_deprecate_message)) as g:
g.storage_command_oauth('set', 'set_path_access_control')
g.storage_command_oauth('update', 'set_path_access_control')
g.storage_command_oauth('show', 'get_path_access_control')
# TODO: Remove them after deprecate for two sprints
# Blob directory Commands Group
with self.command_group('storage blob directory', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS, is_preview=True) as g:
from ._format import transform_blob_output
from ._transformers import (transform_storage_list_output, create_boolean_result_output_transformer)
g.storage_command_oauth('create', 'create_directory')
g.storage_command_oauth('delete', 'delete_directory')
g.storage_custom_command_oauth('move', 'rename_directory')
g.storage_custom_command_oauth('show', 'show_directory', table_transformer=transform_blob_output,
exception_handler=show_exception_handler)
g.storage_custom_command_oauth('list', 'list_directory', transform=transform_storage_list_output,
table_transformer=transform_blob_output)
g.storage_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists'))
g.storage_command_oauth(
'metadata show', 'get_blob_metadata', exception_handler=show_exception_handler)
g.storage_command_oauth('metadata update', 'set_blob_metadata')
with self.command_group('storage blob directory', is_preview=True,
custom_command_type=get_custom_sdk('azcopy', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS))as g:
g.storage_custom_command_oauth('upload', 'storage_blob_upload')
g.storage_custom_command_oauth('download', 'storage_blob_download')
with self.command_group('storage blob directory access', command_type=adls_base_blob_sdk, is_preview=True,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS) as g:
g.storage_command_oauth('set', 'set_path_access_control')
g.storage_command_oauth('update', 'set_path_access_control')
g.storage_command_oauth('show', 'get_path_access_control')
with self.command_group('storage blob directory',
deprecate_info=self.deprecate(redirect="az storage fs directory", hide=True,
message_func=_adls_deprecate_message)) as g:
pass
|
def load_command_table(self, _): # pylint: disable=too-many-locals, too-many-statements
def get_custom_sdk(custom_module, client_factory, resource_type=CUSTOM_DATA_STORAGE):
"""Returns a CliCommandType instance with specified operation template based on the given custom module name.
This is useful when the command is not defined in the default 'custom' module but instead in a module under
'operations' package."""
return CliCommandType(
operations_tmpl='azext_storage_preview.operations.{}#'.format(custom_module) + '{}',
client_factory=client_factory,
resource_type=resource_type
)
base_blob_sdk = CliCommandType(
operations_tmpl='azext_storage_preview.vendored_sdks.azure_storage.blob.baseblobservice#BaseBlobService.{}',
client_factory=blob_data_service_factory,
resource_type=CUSTOM_DATA_STORAGE)
with self.command_group('storage blob service-properties', command_type=base_blob_sdk) as g:
g.storage_command_oauth('show', 'get_blob_service_properties', exception_handler=show_exception_handler)
g.storage_command_oauth('update', generic_update=True, getter_name='get_blob_service_properties',
setter_type=get_custom_sdk('blob', cf_blob_data_gen_update),
setter_name='set_service_properties',
client_factory=cf_blob_data_gen_update)
block_blob_sdk = CliCommandType(
operations_tmpl='azure.multiapi.storage.blob.blockblobservice#BlockBlobService.{}',
client_factory=blob_data_service_factory,
resource_type=CUSTOM_DATA_STORAGE)
with self.command_group('storage azcopy blob', command_type=block_blob_sdk,
custom_command_type=get_custom_sdk('azcopy', blob_data_service_factory)) as g:
g.storage_custom_command_oauth('upload', 'storage_blob_upload')
g.storage_custom_command_oauth('download', 'storage_blob_download')
g.storage_custom_command_oauth('delete', 'storage_blob_remove')
g.storage_custom_command_oauth('sync', 'storage_blob_sync')
with self.command_group('storage azcopy', custom_command_type=get_custom_sdk('azcopy', None)) as g:
g.custom_command('run-command', 'storage_run_command', validator=lambda namespace: None)
# pylint: disable=line-too-long
adls_base_blob_sdk = CliCommandType(
operations_tmpl='azext_storage_preview.vendored_sdks.azure_adls_storage_preview.blob.baseblobservice#BaseBlobService.{}',
client_factory=adls_blob_data_service_factory,
resource_type=CUSTOM_DATA_STORAGE_ADLS)
def _adls_deprecate_message(self):
msg = "This {} has been deprecated and will be removed in future release.".format(self.object_type)
msg += " Use '{}' instead.".format(self.redirect)
msg += " For more information go to"
msg += " https://github.com/Azure/azure-cli/blob/dev/src/azure-cli/azure/cli/command_modules/storage/docs/ADLS%20Gen2.md"
return msg
# Change existing Blob Commands
with self.command_group('storage blob', command_type=adls_base_blob_sdk) as g:
from ._format import transform_blob_output
from ._transformers import transform_storage_list_output
g.storage_command_oauth('list', 'list_blobs', transform=transform_storage_list_output,
table_transformer=transform_blob_output,
deprecate_info=self.deprecate(redirect="az storage fs file list", hide=True,
message_func=_adls_deprecate_message))
# New Blob Commands
with self.command_group('storage blob', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS) as g:
g.storage_command_oauth('move', 'rename_path',
deprecate_info=self.deprecate(redirect="az storage fs file move", hide=True,
message_func=_adls_deprecate_message))
with self.command_group('storage blob access', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS,
deprecate_info=self.deprecate(redirect="az storage fs access", hide=True,
message_func=_adls_deprecate_message)) as g:
g.storage_command_oauth('set', 'set_path_access_control')
g.storage_command_oauth('update', 'set_path_access_control')
g.storage_command_oauth('show', 'get_path_access_control')
# TODO: Remove them after deprecate for two sprints
# Blob directory Commands Group
with self.command_group('storage blob directory', command_type=adls_base_blob_sdk,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS, is_preview=True) as g:
from ._format import transform_blob_output
from ._transformers import (transform_storage_list_output, create_boolean_result_output_transformer)
g.storage_command_oauth('create', 'create_directory')
g.storage_command_oauth('delete', 'delete_directory')
g.storage_custom_command_oauth('move', 'rename_directory')
g.storage_custom_command_oauth('show', 'show_directory', table_transformer=transform_blob_output,
exception_handler=show_exception_handler)
g.storage_custom_command_oauth('list', 'list_directory', transform=transform_storage_list_output,
table_transformer=transform_blob_output)
g.storage_command_oauth('exists', 'exists', transform=create_boolean_result_output_transformer('exists'))
g.storage_command_oauth(
'metadata show', 'get_blob_metadata', exception_handler=show_exception_handler)
g.storage_command_oauth('metadata update', 'set_blob_metadata')
with self.command_group('storage blob directory', is_preview=True,
custom_command_type=get_custom_sdk('azcopy', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS))as g:
g.storage_custom_command_oauth('upload', 'storage_blob_upload')
g.storage_custom_command_oauth('download', 'storage_blob_download')
with self.command_group('storage blob directory access', command_type=adls_base_blob_sdk, is_preview=True,
custom_command_type=get_custom_sdk('blob', adls_blob_data_service_factory,
CUSTOM_DATA_STORAGE_ADLS),
resource_type=CUSTOM_DATA_STORAGE_ADLS) as g:
g.storage_command_oauth('set', 'set_path_access_control')
g.storage_command_oauth('update', 'set_path_access_control')
g.storage_command_oauth('show', 'get_path_access_control')
with self.command_group('storage blob directory',
deprecate_info=self.deprecate(redirect="az storage fs directory", hide=True,
message_func=_adls_deprecate_message)) as g:
pass
|
35,583 |
def alexnet(pretrained: bool = False, progress: bool = True, **kwargs) -> AlexNet:
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = AlexNet(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['alexnet'],
progress=progress)
model.load_state_dict(state_dict)
return model
|
def alexnet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> AlexNet:
r"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = AlexNet(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['alexnet'],
progress=progress)
model.load_state_dict(state_dict)
return model
|
5,155 |
def get_glyphs_subset(fontfile, characters):
"""
Subset a TTF font
Reads the named fontfile and restricts the font to the characters.
Returns a serialization of the subset font as file-like object.
Parameters
----------
symbol : str
Path to the font file
characters : str
Continuous set of characters to include in subset
"""
options = subset.Options(glyph_names=True, recommended_glyphs=True)
# prevent subsetting FontForge Timestamp and other tables
options.drop_tables += ['FFTM', 'PfEd', 'BDF']
# if fontfile is a ttc, specify font number
if os.path.splitext(fontfile)[1] == ".ttc":
options.font_number = 0
with subset.load_font(fontfile, options) as font:
subsetter = subset.Subsetter(options=options)
subsetter.populate(text=characters)
subsetter.subset(font)
fh = BytesIO()
font.save(fh, reorderTables=False)
return fh
|
def get_glyphs_subset(fontfile, characters):
"""
Subset a TTF font
Reads the named fontfile and restricts the font to the characters.
Returns a serialization of the subset font as file-like object.
Parameters
----------
symbol : str
Path to the font file
characters : str
Continuous set of characters to include in subset
"""
options = subset.Options(glyph_names=True, recommended_glyphs=True)
# prevent subsetting FontForge Timestamp and other tables
options.drop_tables += ['FFTM', 'PfEd', 'BDF']
# if fontfile is a ttc, specify font number
if fontfile.endswith(".ttc"):
options.font_number = 0
with subset.load_font(fontfile, options) as font:
subsetter = subset.Subsetter(options=options)
subsetter.populate(text=characters)
subsetter.subset(font)
fh = BytesIO()
font.save(fh, reorderTables=False)
return fh
|
5,090 |
def _get_anchored_bbox(loc, bbox, parentbbox, borderpad):
"""
Return the position of the *bbox* anchored at the *parentbbox* with the
*loc* code with the *borderpad*.
"""
c = _api.check_getitem({1: "NE", 2: "NW", 3: "SW", 4: "SE", 5: "E",
6: "W", 7: "E", 8: "S", 9: "N", 10: "C"}, loc=loc)
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
|
def _get_anchored_bbox(loc, bbox, parentbbox, borderpad):
"""
Return the position (x, y) of the *bbox* anchored at the *parentbbox* with the
*loc* code with the *borderpad*.
"""
c = _api.check_getitem({1: "NE", 2: "NW", 3: "SW", 4: "SE", 5: "E",
6: "W", 7: "E", 8: "S", 9: "N", 10: "C"}, loc=loc)
container = parentbbox.padded(-borderpad)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
|
36,354 |
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
# I'm not using namedtuple's _asdict()
# method, because:
# - it does not recurse in to the namedtuple fields and
# convert them to dicts (using dict_factory).
# - I don't actually want to return a dict here. The the main
# use case here is json.dumps, and it handles converting
# namedtuples to lists. Admittedly we're losing some
# information here when we produce a json list instead of a
# dict. Note that if we returned dicts here instead of
# namedtuples, we could no longer call asdict() on a data
# structure where a namedtuple was used as a dict key.
return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, collections.defaultdict):
# defaultdict does not have the same constructor than dict and must be
# hendled separately
return type(obj)(obj.default_factory, ((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items()))
elif isinstance(obj, dict):
return type(obj)((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
|
def _asdict_inner(obj, dict_factory):
if _is_dataclass_instance(obj):
result = []
for f in fields(obj):
value = _asdict_inner(getattr(obj, f.name), dict_factory)
result.append((f.name, value))
return dict_factory(result)
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# obj is a namedtuple. Recurse into it, but the returned
# object is another namedtuple of the same type. This is
# similar to how other list- or tuple-derived classes are
# treated (see below), but we just need to create them
# differently because a namedtuple's __init__ needs to be
# called differently (see bpo-34363).
# I'm not using namedtuple's _asdict()
# method, because:
# - it does not recurse in to the namedtuple fields and
# convert them to dicts (using dict_factory).
# - I don't actually want to return a dict here. The the main
# use case here is json.dumps, and it handles converting
# namedtuples to lists. Admittedly we're losing some
# information here when we produce a json list instead of a
# dict. Note that if we returned dicts here instead of
# namedtuples, we could no longer call asdict() on a data
# structure where a namedtuple was used as a dict key.
return type(obj)(*[_asdict_inner(v, dict_factory) for v in obj])
elif isinstance(obj, (list, tuple)):
# Assume we can create an object of this type by passing in a
# generator (which is not true for namedtuples, handled
# above).
return type(obj)(_asdict_inner(v, dict_factory) for v in obj)
elif isinstance(obj, collections.defaultdict):
# defaultdict does not have the same constructor than dict and must be
# handled separately
return type(obj)(obj.default_factory, ((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items()))
elif isinstance(obj, dict):
return type(obj)((_asdict_inner(k, dict_factory),
_asdict_inner(v, dict_factory))
for k, v in obj.items())
else:
return copy.deepcopy(obj)
|
58,373 |
def validate_derivative_path(path, **kwargs):
# Collect all paths that contain a dataset_description.json
dd = path / 'dataset_description.json'
with dd.open('r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_names = [pipeline["Name"]
for pipeline in description.get("GeneratedBy", [])
if "Name" in pipeline]
if pipeline_names:
pipeline_name = pipeline_names[0]
elif "PipelineDescription" in description:
warnings.warn("The PipelineDescription field was superseded "
"by GeneratedBy in BIDS 1.4.0. You can use "
"``pybids upgrade`` to update your derivative "
"dataset.")
pipeline_name = description["PipelineDescription"].get("Name")
else:
pipeline_name = None
if pipeline_name is None:
raise BIDSDerivativesValidationError(
"Every valid BIDS-derivatives dataset must "
"have a GeneratedBy.Name field set "
"inside 'dataset_description.json'. "
"\nExample: %s" %
MANDATORY_DERIVATIVES_FIELDS['GeneratedBy'])
return pipeline_name
|
def validate_derivative_path(path, **kwargs):
# Collect all paths that contain a dataset_description.json
dd = path / 'dataset_description.json'
with dd.open('r', encoding='utf-8') as ddfd:
description = json.load(ddfd)
pipeline_names = [pipeline["Name"]
for pipeline in description.get("GeneratedBy", [])
if "Name" in pipeline]
if pipeline_names:
pipeline_name = pipeline_names[0]
elif "PipelineDescription" in description:
warnings.warn("The PipelineDescription field was superseded "
"by GeneratedBy in BIDS 1.4.0. You can use "
"``pybids upgrade`` to update your derivative "
"dataset.")
pipeline_name = description["PipelineDescription"].get("Name")
else:
pipeline_name = None
if pipeline_name is None:
raise BIDSDerivativesValidationError(
"Every valid BIDS-derivatives dataset must have a "
"GeneratedBy.Name field set inside 'dataset_description.json'.\n"
"Example: %s" %
MANDATORY_DERIVATIVES_FIELDS['GeneratedBy'])
return pipeline_name
|
48,221 |
def upgrade(module, pacman_path):
cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
cmdneedrefresh = "%s -Qu" % (pacman_path)
rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
data = stdout.split('\n')
data.remove('')
packages = []
diff = {
'before': '',
'after': '',
}
if rc == 0:
regex = re.compile(r'([\w+\-.@]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
for p in data:
m = regex.search(p)
packages.append(m.group(1))
if module._diff:
diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
if module.check_mode:
module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
else:
module.fail_json(msg="Could not upgrade")
else:
module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
|
def upgrade(module, pacman_path):
cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
cmdneedrefresh = "%s -Qu" % (pacman_path)
rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
data = stdout.split('\n')
data.remove('')
packages = []
diff = {
'before': '',
'after': '',
}
if rc == 0:
# https://wiki.archlinux.org/index.php/Arch_package_guidelines#Package_naming
# Package names can contain only alphanumeric characters and any of @, ., _, +, -.
# All letters should be lowercase.
regex = re.compile(r'([\w+-.@]+) ((?:\S+)-(?:\S+)) -> ((?:\S+)-(?:\S+))')
for p in data:
m = regex.search(p)
packages.append(m.group(1))
if module._diff:
diff['before'] += "%s-%s\n" % (m.group(1), m.group(2))
diff['after'] += "%s-%s\n" % (m.group(1), m.group(3))
if module.check_mode:
module.exit_json(changed=True, msg="%s package(s) would be upgraded" % (len(data)), packages=packages, diff=diff)
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded', packages=packages, diff=diff)
else:
module.fail_json(msg="Could not upgrade")
else:
module.exit_json(changed=False, msg='Nothing to upgrade', packages=packages)
|
25,269 |
def single_attribute(attribute_path=None):
"""Creates a projection that extracts the value of
an attribute path.
Args:
attribute_path (str): Extracts values from this path, if given.
Returns:
Projection[any]: A projection that extracts the value of the given
attribute path.
"""
return _SingleAttributeProjection(attribute_path)
|
def single_attribute(attribute_path=None):
"""Creates a projection that extracts the value of
an attribute path.
Args:
attribute_path (str): Path to extract the attribute from.
Returns:
Projection[any]: A projection that extracts the value of the given
attribute path.
"""
return _SingleAttributeProjection(attribute_path)
|
20,772 |
def update_po_file(input_filename: str, output_filename: str, messages: List[Msg]):
# Takes a list of changed messages and writes a copy of input file with updated message strings
with open(input_filename, "r") as input_file, open(output_filename, "w") as output_file:
iterator = iter(input_file.readlines())
while True:
try:
line = next(iterator)
output_file.write(line)
if line[0: 7] == "msgctxt":
# Start of translation block
msgctxt = line
msgid = next(iterator)
output_file.write(msgid)
# Check for updated version of msgstr
message = list(filter(lambda m: m.msgctxt == msgctxt and m.msgid == msgid, messages))
if message and message[0]:
# Write update translation
output_file.write(message[0].msgstr)
# Skip lines until next translation. This should skip multiline msgstr
while True:
line = next(iterator)
if line == "\n":
output_file.write(line)
break
except StopIteration:
return
|
def updatePOFile(input_filename: str, output_filename: str, messages: List[Msg]) -> None:
# Takes a list of changed messages and writes a copy of input file with updated message strings
with open(input_filename, "r") as input_file, open(output_filename, "w") as output_file:
iterator = iter(input_file.readlines())
while True:
try:
line = next(iterator)
output_file.write(line)
if line[0: 7] == "msgctxt":
# Start of translation block
msgctxt = line
msgid = next(iterator)
output_file.write(msgid)
# Check for updated version of msgstr
message = list(filter(lambda m: m.msgctxt == msgctxt and m.msgid == msgid, messages))
if message and message[0]:
# Write update translation
output_file.write(message[0].msgstr)
# Skip lines until next translation. This should skip multiline msgstr
while True:
line = next(iterator)
if line == "\n":
output_file.write(line)
break
except StopIteration:
return
|
25,905 |
def autoscale_rule_list(cmd, client, autoscale_name, resource_group_name, profile_name=DEFAULT_PROFILE_NAME):
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile_names = [x.name for x in autoscale_settings.profiles]
if not profile_name in profile_names:
from knack.util import CLIError
raise CLIError('Profile name is invalid. Please check the existence of the profile.')
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
index = 0
# we artificially add indices to the rules so the user can target them with the remove command
for rule in profile.rules:
setattr(rule, 'index', index)
index += 1
return profile.rules
|
def autoscale_rule_list(cmd, client, autoscale_name, resource_group_name, profile_name=DEFAULT_PROFILE_NAME):
autoscale_settings = client.get(resource_group_name, autoscale_name)
profile_names = [x.name for x in autoscale_settings.profiles]
if profile_name not in profile_names:
from knack.util import CLIError
raise CLIError('Profile name is invalid. Please check the existence of the profile.')
profile = next(x for x in autoscale_settings.profiles if x.name == profile_name)
index = 0
# we artificially add indices to the rules so the user can target them with the remove command
for rule in profile.rules:
setattr(rule, 'index', index)
index += 1
return profile.rules
|
30,875 |
def get_file_from_endpoint_path(session_id: str, path: str) -> Tuple[Union[dict, list], dict]:
""" Get file from file from session (endpoint/sensor).
Args:
session_id: Actvie session id.
path: Path of file to be retrieved.
Returns:
dict/list: entry context.
dict: raw response.
Raises:
Exception: If file can't be retrieved.
"""
try:
# Get file from enpoint
output = demisto.executeCommand("cb-get-file-from-endpoint", {'session': session_id, 'path': path})
entry_context = dict_safe_get(output, [0, 'EntryContext'])
# Output file to waroom as soon as possible, But removing human readable so it will be signle summary in the end.
output[0]['HumanReadable'] = ""
demisto.results(output)
except Exception as e:
raise Exception(f"Session established but file can't retrieved from endpoint.\nError:{e}")
return entry_context
|
def get_file_from_endpoint_path(session_id: str, path: str) -> Tuple[Union[dict, list], dict]:
""" Get file from file from session (endpoint/sensor).
Args:
session_id: Actvie session id.
path: Path of file to be retrieved.
Returns:
dict/list: entry context.
dict: raw response.
Raises:
Exception: If file can't be retrieved.
"""
try:
# Get file from enpoint
output = demisto.executeCommand("cb-get-file-from-endpoint", {'session': session_id, 'path': path})
entry_context = dict_safe_get(output, [0, 'EntryContext'])
# Output file to war-room as soon as possible, But removing human-readable so it will be a single summary in the end.
output[0]['HumanReadable'] = ""
demisto.results(output)
except Exception as e:
raise Exception(f"Session established but file can't retrieved from endpoint.\nError:{e}")
return entry_context
|
56,855 |
def exercise():
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
# Choose a Boost Python class for which to deprecate a method. Here we
# use rational.int.
from boost_adaptbx.boost import rational
original_value = rational.int().numerator()
deprecate_method(rational.int, "numerator")
new_value = rational.int().numerator()
assert original_value == new_value
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
|
def exercise():
# Choose a Boost Python class for which to deprecate a method. Here we
# use rational.int.
from boost_adaptbx.boost import rational
original_value = rational.int().numerator()
deprecate_method(rational.int, "numerator")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
new_value = rational.int().numerator()
assert original_value == new_value
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
|
28,299 |
def add_parameter(method):
"""
A decorator function that wraps a method of an Instrument subclass such that the
new method will be converted into the corresponding :code:`param_class`
in the :code:`_add_params_from_decorated_methods`.
Args:
method: The method to be wrapped and flagged to be converted to parameter.
"""
if DECORATED_METHOD_PREFIX not in method.__name__:
raise ValueError(
f"Only methods prefixed with '{DECORATED_METHOD_PREFIX}' can be decorated "
f"with this decorator."
)
@wraps(method) # preserves info like `__doc__` and signature
def kwargs_and_doc_container(self, *args, **kwargs):
raise RuntimeError(
f"Method not intended to be called.\n"
f"'{method.__name__}' is a special method used as information container "
f"for creating and assigning parameters to {self}."
)
# special attribute to flag method for conversion to parameter
setattr(kwargs_and_doc_container, ADD_PARAMETER_ATTR_NAME, True)
return kwargs_and_doc_container
|
def add_parameter(method):
"""
A decorator function that wraps a method of an Instrument subclass such that the
new method will be converted into the corresponding :code:`param_class`
in the :code:`_add_params_from_decorated_methods`.
Args:
method: The method to be wrapped and flagged to be converted to parameter.
"""
if not method.__name__.startswith(DECORATED_METHOD_PREFIX):
raise ValueError(
f"Only methods prefixed with '{DECORATED_METHOD_PREFIX}' can be decorated "
f"with this decorator."
)
@wraps(method) # preserves info like `__doc__` and signature
def kwargs_and_doc_container(self, *args, **kwargs):
raise RuntimeError(
f"Method not intended to be called.\n"
f"'{method.__name__}' is a special method used as information container "
f"for creating and assigning parameters to {self}."
)
# special attribute to flag method for conversion to parameter
setattr(kwargs_and_doc_container, ADD_PARAMETER_ATTR_NAME, True)
return kwargs_and_doc_container
|
3,355 |
def query_top_tags(
params: Mapping[str, str],
tag_key: str,
limit: int,
referrer: str,
orderby: Optional[str] = None,
filter_query: Optional[str] = None,
) -> Optional[List[Any]]:
"""
Fetch counts by tag value, finding the top tag values for a tag key by a limit.
:return: Returns the row with the value, the aggregate and the count if the query was successful
Returns None if query was not successful which causes the endpoint to return early
"""
with sentry_sdk.start_span(
op="discover.discover", description="facets.filter_transform"
) as span:
span.set_data("query", filter_query)
snuba_filter = get_filter(filter_query, params)
# Resolve the public aliases into the discover dataset names.
snuba_filter, translated_columns = discover.resolve_discover_aliases(snuba_filter)
with sentry_sdk.start_span(op="discover.discover", description="facets.top_tags"):
if not orderby:
orderby = "-count"
if len(orderby) >= 1:
orderby = orderby[0]
if "frequency" in orderby:
# Replacing frequency as it's the same underlying data dimension, this way we don't have to modify the existing histogram query.
orderby = orderby.replace("-frequency", "count")
orderby = orderby.replace("frequency", "-count")
# Get the average and count to use to filter the next request to facets
tag_data = discover.query(
selected_columns=[
"count()",
"array_join(tags.value) as tags_value",
],
query=filter_query,
params=params,
orderby=orderby,
conditions=[["tags_key", "IN", [tag_key]]],
functions_acl=["array_join"],
referrer=f"{referrer}.top_tags",
limit=limit,
)
if len(tag_data["data"]) <= 0:
return None
counts = [r["count"] for r in tag_data["data"]]
# Return early to avoid doing more queries with 0 count transactions or aggregates for columns that dont exist
if counts[0] == 0:
return None
if not tag_data["data"]:
return None
return tag_data["data"]
|
def query_top_tags(
params: Mapping[str, str],
tag_key: str,
limit: int,
referrer: str,
orderby: Optional[str] = None,
filter_query: Optional[str] = None,
) -> Optional[List[Any]]:
"""
Fetch counts by tag value, finding the top tag values for a tag key by a limit.
:return: Returns the row with the value, the aggregate and the count if the query was successful
Returns None if query was not successful which causes the endpoint to return early
"""
with sentry_sdk.start_span(
op="discover.discover", description="facets.filter_transform"
) as span:
span.set_data("query", filter_query)
snuba_filter = get_filter(filter_query, params)
# Resolve the public aliases into the discover dataset names.
snuba_filter, translated_columns = discover.resolve_discover_aliases(snuba_filter)
with sentry_sdk.start_span(op="discover.discover", description="facets.top_tags"):
if not orderby:
orderby = "-count"
if isinstance(orderby, list) >= 1:
orderby = orderby[0]
if "frequency" in orderby:
# Replacing frequency as it's the same underlying data dimension, this way we don't have to modify the existing histogram query.
orderby = orderby.replace("-frequency", "count")
orderby = orderby.replace("frequency", "-count")
# Get the average and count to use to filter the next request to facets
tag_data = discover.query(
selected_columns=[
"count()",
"array_join(tags.value) as tags_value",
],
query=filter_query,
params=params,
orderby=orderby,
conditions=[["tags_key", "IN", [tag_key]]],
functions_acl=["array_join"],
referrer=f"{referrer}.top_tags",
limit=limit,
)
if len(tag_data["data"]) <= 0:
return None
counts = [r["count"] for r in tag_data["data"]]
# Return early to avoid doing more queries with 0 count transactions or aggregates for columns that dont exist
if counts[0] == 0:
return None
if not tag_data["data"]:
return None
return tag_data["data"]
|
43,661 |
def read_structure(filepath, outpath="."):
r"""Reads the structure of the polyatomic system from a file and creates
a list containing the symbol and Cartesian coordinates of the atomic species.
The `xyz <https://en.wikipedia.org/wiki/XYZ_file_format>`_ format is supported out of the box.
If `Open Babel <https://openbabel.org/>`_ is installed,
`any format recognized by Open Babel <https://openbabel.org/wiki/Category:Formats>`_
is also supported. Additionally, the new file ``structure.xyz``,
containing the inputted geometry, is created in a directory with path given by
``outpath``.
Open Babel can be installed using ``apt`` if on Ubuntu:
.. code-block:: bash
sudo apt install openbabel
or using Anaconda:
.. code-block:: bash
conda install -c conda-forge openbabel
See the Open Babel documentation for more details on installation.
Args:
filepath (str): name of the molecular structure file in the working directory
or the full path to the file if it is located in a different folder
outpath (str): path to the output directory
Returns:
list: for each atomic species, a list containing the symbol and the Cartesian coordinates
**Example**
>>> read_structure('h2_ref.xyz')
[['H', (0.0, 0.0, -0.35)], ['H', (0.0, 0.0, 0.35)]]
"""
obabel_error_message = (
"Open Babel converter not found:\n"
"If using Ubuntu or Debian, try: 'sudo apt install openbabel' \n"
"If using openSUSE, try: 'sudo zypper install openbabel' \n"
"If using CentOS or Fedora, try: 'sudo snap install openbabel' "
"Open Babel can also be downloaded from http://openbabel.org/wiki/Main_Page, \n"
"make sure you add it to the PATH environment variable. \n"
"If Anaconda is installed, try: 'conda install -c conda-forge openbabel'"
)
extension = filepath.split(".")[-1].strip().lower()
file_in = filepath.strip()
file_out = os.path.join(outpath, "structure.xyz")
if extension != "xyz":
if not _exec_exists("obabel"):
raise TypeError(obabel_error_message)
try:
subprocess.run(
["obabel", "-i" + extension, file_in, "-oxyz", "-O", file_out], check=True
)
except subprocess.CalledProcessError as e:
raise RuntimeError(
"Open Babel error. See the following Open Babel "
"output for details:\n\n {}\n{}".format(e.stdout, e.stderr)
)
else:
copyfile(file_in, file_out)
geometry = []
with open(file_out) as f:
for line in f.readlines()[2:]:
species, x, y, z = line.split()
geometry.append([species, (float(x), float(y), float(z))])
return geometry
|
def read_structure(filepath, outpath="."):
r"""Reads the structure of the polyatomic system from a file and creates
a list containing the symbol and Cartesian coordinates of the atomic species.
The `xyz <https://en.wikipedia.org/wiki/XYZ_file_format>`_ format is supported out of the box.
If `Open Babel <https://openbabel.org/>`_ is installed,
`any format recognized by Open Babel <https://openbabel.org/wiki/Category:Formats>`_
is also supported. Additionally, the new file ``structure.xyz``,
containing the input geometry, is created in a directory with path given by
``outpath``.
Open Babel can be installed using ``apt`` if on Ubuntu:
.. code-block:: bash
sudo apt install openbabel
or using Anaconda:
.. code-block:: bash
conda install -c conda-forge openbabel
See the Open Babel documentation for more details on installation.
Args:
filepath (str): name of the molecular structure file in the working directory
or the full path to the file if it is located in a different folder
outpath (str): path to the output directory
Returns:
list: for each atomic species, a list containing the symbol and the Cartesian coordinates
**Example**
>>> read_structure('h2_ref.xyz')
[['H', (0.0, 0.0, -0.35)], ['H', (0.0, 0.0, 0.35)]]
"""
obabel_error_message = (
"Open Babel converter not found:\n"
"If using Ubuntu or Debian, try: 'sudo apt install openbabel' \n"
"If using openSUSE, try: 'sudo zypper install openbabel' \n"
"If using CentOS or Fedora, try: 'sudo snap install openbabel' "
"Open Babel can also be downloaded from http://openbabel.org/wiki/Main_Page, \n"
"make sure you add it to the PATH environment variable. \n"
"If Anaconda is installed, try: 'conda install -c conda-forge openbabel'"
)
extension = filepath.split(".")[-1].strip().lower()
file_in = filepath.strip()
file_out = os.path.join(outpath, "structure.xyz")
if extension != "xyz":
if not _exec_exists("obabel"):
raise TypeError(obabel_error_message)
try:
subprocess.run(
["obabel", "-i" + extension, file_in, "-oxyz", "-O", file_out], check=True
)
except subprocess.CalledProcessError as e:
raise RuntimeError(
"Open Babel error. See the following Open Babel "
"output for details:\n\n {}\n{}".format(e.stdout, e.stderr)
)
else:
copyfile(file_in, file_out)
geometry = []
with open(file_out) as f:
for line in f.readlines()[2:]:
species, x, y, z = line.split()
geometry.append([species, (float(x), float(y), float(z))])
return geometry
|
43,139 |
def traverse_dir(
directory,
topdown=True,
ignore=None,
only=None,
recursive=True,
include_subdir=True,
):
"""
Recursively traverse all files and sub directories in a directory and
get a list of relative paths.
:param directory: Path to a directory that will be traversed.
:type directory: ``str``
:param topdown: Browse the directory in a top-down or bottom-up approach.
:type topdown: ``bool``
:param ignore: list of patterns to ignore by glob style filtering.
:type ignore: ``list``
:param only: list of patterns to exclusively consider by glob style
filtering.
:type only: ``list``
:param recursive: Traverse through all sub directories recursively.
:type recursive: ``bool``
:param include_subdir: Include all sub directories and files if True, or
exclude directories in the result.
:type include_subdir: ``bool``
:return: A list of relative file paths
:rtype: ``list`` of ``str``
"""
result = []
ignore = ignore or []
only = only or []
def should_ignore(filename):
"""Decide if a file should be ignored by its name."""
for pattern in ignore:
if fnmatch.fnmatch(filename, pattern):
return True
if only:
for pattern in only:
if fnmatch.fnmatch(filename, pattern):
return False
else:
return True
return False
for dirpath, dirnames, filenames in os.walk(
directory, topdown=topdown or recursive is False
):
if include_subdir:
for dname in dirnames:
if not should_ignore(dname):
dpath = os.path.join(dirpath, dname)
_, _, relpath = dpath.partition(directory)
while relpath.startswith(os.sep):
relpath = relpath[len(os.sep) :]
result.append(relpath)
for fname in filenames:
if not should_ignore(fname):
fpath = os.path.join(dirpath, fname)
_, _, relpath = fpath.partition(directory)
while relpath.startswith(os.sep):
relpath = relpath[len(os.sep) :]
result.append(relpath)
if recursive is False:
break
return result
|
def traverse_dir(
directory,
topdown=True,
ignore=None,
only=None,
recursive=True,
include_subdir=True,
):
"""
Recursively traverse all files and sub directories in a directory and
get a list of relative paths.
:param directory: Path to a directory that will be traversed.
:type directory: ``str``
:param topdown: Browse the directory in a top-down or bottom-up approach.
:type topdown: ``bool``
:param ignore: list of patterns to ignore by glob style filtering.
:type ignore: ``list``
:param only: list of patterns to include by glob style
filtering.
:type only: ``list``
:param recursive: Traverse through all sub directories recursively.
:type recursive: ``bool``
:param include_subdir: Include all sub directories and files if True, or
exclude directories in the result.
:type include_subdir: ``bool``
:return: A list of relative file paths
:rtype: ``list`` of ``str``
"""
result = []
ignore = ignore or []
only = only or []
def should_ignore(filename):
"""Decide if a file should be ignored by its name."""
for pattern in ignore:
if fnmatch.fnmatch(filename, pattern):
return True
if only:
for pattern in only:
if fnmatch.fnmatch(filename, pattern):
return False
else:
return True
return False
for dirpath, dirnames, filenames in os.walk(
directory, topdown=topdown or recursive is False
):
if include_subdir:
for dname in dirnames:
if not should_ignore(dname):
dpath = os.path.join(dirpath, dname)
_, _, relpath = dpath.partition(directory)
while relpath.startswith(os.sep):
relpath = relpath[len(os.sep) :]
result.append(relpath)
for fname in filenames:
if not should_ignore(fname):
fpath = os.path.join(dirpath, fname)
_, _, relpath = fpath.partition(directory)
while relpath.startswith(os.sep):
relpath = relpath[len(os.sep) :]
result.append(relpath)
if recursive is False:
break
return result
|
5,644 |
def _conv_ops(x_shape, h_shape, mode):
x_size, h_size = _prod(x_shape), _prod(h_shape)
if mode == "full":
out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
elif mode == "valid":
out_shape = [max(n, k) - min(n, k) + 1 for n, k in zip(x_shape, h_shape)],
elif mode == "same":
out_shape = x_shape
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full', not mode={}".format(mode))
s1, s2 = x_shape, h_shape
if len(x_shape) == 1:
s1, s2 = s1[0], s2[0]
if mode == "full":
direct_ops = s1 * s2
elif mode == "valid":
direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2
elif mode == "same":
direct_mul = s1 * s2 if s1 <= s2 else s1 * s2 - (s2 // 2) * ((s2 + 1) // 2)
else:
if mode == "full":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "valid":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "same":
direct_ops = _prod(s1) * _prod(s2)
fft_ops = sum(n * np.log(n) for n in (x_shape + h_shape + tuple(out_shape)))
return fft_ops, direct_ops
|
def _conv_ops(x_shape, h_shape, mode):
x_size, h_size = _prod(x_shape), _prod(h_shape)
if mode == "full":
out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
elif mode == "valid":
out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)],
elif mode == "same":
out_shape = x_shape
else:
raise ValueError("Acceptable mode flags are 'valid',"
" 'same', or 'full', not mode={}".format(mode))
s1, s2 = x_shape, h_shape
if len(x_shape) == 1:
s1, s2 = s1[0], s2[0]
if mode == "full":
direct_ops = s1 * s2
elif mode == "valid":
direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2
elif mode == "same":
direct_mul = s1 * s2 if s1 <= s2 else s1 * s2 - (s2 // 2) * ((s2 + 1) // 2)
else:
if mode == "full":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "valid":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "same":
direct_ops = _prod(s1) * _prod(s2)
fft_ops = sum(n * np.log(n) for n in (x_shape + h_shape + tuple(out_shape)))
return fft_ops, direct_ops
|
58,770 |
def parse_disabled_pass(input_string):
"""Parse an input string for disabled passes
Parameters
----------
input_string: str
Possibly comma-separated string with the names of disabled passes
Returns
-------
list: a list of disabled passes.
"""
if input_string is not None:
pass_list = input_string.split(",")
nf = [_ for _ in pass_list if tvm.get_global_func("relay._transform." + _, True) is None]
if len(nf) > 0:
raise argparse.ArgumentTypeError(
"Following passes are not registered within tvm: " + str(nf)
)
return pass_list
return None
|
def parse_disabled_pass(input_string):
"""Parse an input string for disabled passes
Parameters
----------
input_string: str
Possibly comma-separated string with the names of disabled passes
Returns
-------
list: a list of disabled passes.
"""
if input_string is not None:
pass_list = input_string.split(",")
invalid_passes = [p for p in pass_list if tvm.get_global_func("relay._transform.%s" % p, True) is None]
if len(invalid_passes) > 0:
raise argparse.ArgumentTypeError(
"Following passes are not registered within tvm: " + str(nf)
)
return pass_list
return None
|
5,714 |
def _kpoints(data, k, rng):
"""Pick k points at random in data (one row = one observation).
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
idx = rng.choice(data.shape[0], size=k, replace=False)
return data[idx]
|
def _kpoints(data, k, rng):
"""Pick k points at random in data (one row = one observation).
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 are assumed to describe one
dimensional data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
idx = rng.choice(data.shape[0], size=k, replace=False)
return data[idx]
|
33,688 |
def hash_runtime_conf(file_mounts, extra_objs, use_cached_contents_hash=True):
config_hasher = hashlib.sha1()
contents_hasher = hashlib.sha1()
def add_content_hashes(path):
def add_hash_of_file(fpath):
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(2**20), b""):
contents_hasher.update(chunk)
path = os.path.expanduser(path)
if os.path.isdir(path):
dirs = []
for dirpath, _, filenames in os.walk(path):
dirs.append((dirpath, sorted(filenames)))
for dirpath, filenames in sorted(dirs):
contents_hasher.update(dirpath.encode("utf-8"))
for name in filenames:
contents_hasher.update(name.encode("utf-8"))
fpath = os.path.join(dirpath, name)
add_hash_of_file(fpath)
else:
add_hash_of_file(path)
conf_str = (json.dumps(file_mounts, sort_keys=True).encode("utf-8") +
json.dumps(extra_objs, sort_keys=True).encode("utf-8"))
# Only hash the files once unless use_chaced_contents_hash is false.
if not use_cached_contents_hash or conf_str not in _hash_cache:
config_hasher.update(conf_str)
for local_path in sorted(file_mounts.values()):
add_content_hashes(local_path)
_hash_cache[conf_str] = (config_hasher.hexdigest(),
contents_hasher.hexdigest())
return _hash_cache[conf_str]
|
def hash_runtime_conf(file_mounts, extra_objs, use_cached_contents_hash=True):
config_hasher = hashlib.sha1()
contents_hasher = hashlib.sha1()
def add_content_hashes(path):
def add_hash_of_file(fpath):
with open(fpath, "rb") as f:
for chunk in iter(lambda: f.read(2**20), b""):
contents_hasher.update(chunk)
path = os.path.expanduser(path)
if os.path.isdir(path):
dirs = []
for dirpath, _, filenames in os.walk(path):
dirs.append((dirpath, sorted(filenames)))
for dirpath, filenames in sorted(dirs):
contents_hasher.update(dirpath.encode("utf-8"))
for name in filenames:
contents_hasher.update(name.encode("utf-8"))
fpath = os.path.join(dirpath, name)
add_hash_of_file(fpath)
else:
add_hash_of_file(path)
conf_str = (json.dumps(file_mounts, sort_keys=True).encode("utf-8") +
json.dumps(extra_objs, sort_keys=True).encode("utf-8"))
# Only hash the files once, unless use_cached_contents_hash is false.
if not use_cached_contents_hash or conf_str not in _hash_cache:
config_hasher.update(conf_str)
for local_path in sorted(file_mounts.values()):
add_content_hashes(local_path)
_hash_cache[conf_str] = (config_hasher.hexdigest(),
contents_hasher.hexdigest())
return _hash_cache[conf_str]
|
10,636 |
def main(args=None):
""" Called to initiate the connect to the remote device
"""
parser = opt_help.argparse.ArgumentParser(prog='ansible-connection', add_help=False)
opt_help.add_verbosity_options(parser)
parser.add_argument('playbook_pid')
parser.add_argument('task_uuid')
args = parser.parse_args(args[1:] if args is not None else args)
# initialize verbosity
display.verbosity = args.verbosity
rc = 0
result = {}
messages = list()
socket_path = None
# Need stdin as a byte stream
stdin = sys.stdin.buffer
# Note: update the below log capture code after Display.display() is refactored.
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
# read the play context data via stdin, which means depickling it
vars_data = read_stream(stdin)
init_data = read_stream(stdin)
pc_data = pickle.loads(init_data, encoding='bytes')
variables = pickle.loads(vars_data, encoding='bytes')
play_context = PlayContext()
play_context.deserialize(pc_data)
except Exception as e:
rc = 1
result.update({
'error': to_text(e),
'exception': traceback.format_exc()
})
if rc == 0:
ssh = connection_loader.get('ssh', class_only=True)
ansible_playbook_pid = args.playbook_pid
task_uuid = args.task_uuid
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid)
# create the persistent connection dir if need be and create the paths
# which we will be using later
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
makedirs_safe(tmp_path)
socket_path = unfrackpath(cp % dict(directory=tmp_path))
lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(socket_path))
with file_lock(lock_path):
if not os.path.exists(socket_path):
messages.append(('vvvv', 'local domain socket does not exist, starting it'))
original_path = os.getcwd()
r, w = os.pipe()
pid = fork_process()
if pid == 0:
try:
os.close(r)
wfd = os.fdopen(w, 'w')
process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, ansible_playbook_pid)
process.start(variables)
except Exception:
messages.append(('error', traceback.format_exc()))
rc = 1
if rc == 0:
process.run()
else:
process.shutdown()
sys.exit(rc)
else:
os.close(w)
rfd = os.fdopen(r, 'r')
data = json.loads(rfd.read(), cls=AnsibleJSONDecoder)
messages.extend(data.pop('messages'))
result.update(data)
else:
messages.append(('vvvv', 'found existing local domain socket, using it!'))
conn = Connection(socket_path)
try:
conn.set_options(var_options=variables)
except ConnectionError as exc:
messages.append(('debug', to_text(exc)))
raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
pc_data = to_text(init_data)
try:
conn.update_play_context(pc_data)
conn.set_check_prompt(task_uuid)
except Exception as exc:
# Only network_cli has update_play context and set_check_prompt, so missing this is
# not fatal e.g. netconf
if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601:
pass
else:
result.update({
'error': to_text(exc),
'exception': traceback.format_exc()
})
if os.path.exists(socket_path):
messages.extend(Connection(socket_path).pop_messages())
messages.append(('vvvv', sys.stdout.getvalue()))
result.update({
'messages': messages,
'socket_path': socket_path
})
sys.stdout = saved_stdout
if 'exception' in result:
rc = 1
sys.stderr.write(json.dumps(result, cls=AnsibleJSONEncoder))
else:
rc = 0
sys.stdout.write(json.dumps(result, cls=AnsibleJSONEncoder))
sys.exit(rc)
|
def main(args=None):
""" Called to initiate the connect to the remote device
"""
parser = opt_help.create_base_parser(prog='ansible-connection')
parser.add_argument('playbook_pid')
parser.add_argument('task_uuid')
args = parser.parse_args(args[1:] if args is not None else args)
# initialize verbosity
display.verbosity = args.verbosity
rc = 0
result = {}
messages = list()
socket_path = None
# Need stdin as a byte stream
stdin = sys.stdin.buffer
# Note: update the below log capture code after Display.display() is refactored.
saved_stdout = sys.stdout
sys.stdout = io.StringIO()
try:
# read the play context data via stdin, which means depickling it
vars_data = read_stream(stdin)
init_data = read_stream(stdin)
pc_data = pickle.loads(init_data, encoding='bytes')
variables = pickle.loads(vars_data, encoding='bytes')
play_context = PlayContext()
play_context.deserialize(pc_data)
except Exception as e:
rc = 1
result.update({
'error': to_text(e),
'exception': traceback.format_exc()
})
if rc == 0:
ssh = connection_loader.get('ssh', class_only=True)
ansible_playbook_pid = args.playbook_pid
task_uuid = args.task_uuid
cp = ssh._create_control_path(play_context.remote_addr, play_context.port, play_context.remote_user, play_context.connection, ansible_playbook_pid)
# create the persistent connection dir if need be and create the paths
# which we will be using later
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
makedirs_safe(tmp_path)
socket_path = unfrackpath(cp % dict(directory=tmp_path))
lock_path = unfrackpath("%s/.ansible_pc_lock_%s" % os.path.split(socket_path))
with file_lock(lock_path):
if not os.path.exists(socket_path):
messages.append(('vvvv', 'local domain socket does not exist, starting it'))
original_path = os.getcwd()
r, w = os.pipe()
pid = fork_process()
if pid == 0:
try:
os.close(r)
wfd = os.fdopen(w, 'w')
process = ConnectionProcess(wfd, play_context, socket_path, original_path, task_uuid, ansible_playbook_pid)
process.start(variables)
except Exception:
messages.append(('error', traceback.format_exc()))
rc = 1
if rc == 0:
process.run()
else:
process.shutdown()
sys.exit(rc)
else:
os.close(w)
rfd = os.fdopen(r, 'r')
data = json.loads(rfd.read(), cls=AnsibleJSONDecoder)
messages.extend(data.pop('messages'))
result.update(data)
else:
messages.append(('vvvv', 'found existing local domain socket, using it!'))
conn = Connection(socket_path)
try:
conn.set_options(var_options=variables)
except ConnectionError as exc:
messages.append(('debug', to_text(exc)))
raise ConnectionError('Unable to decode JSON from response set_options. See the debug log for more information.')
pc_data = to_text(init_data)
try:
conn.update_play_context(pc_data)
conn.set_check_prompt(task_uuid)
except Exception as exc:
# Only network_cli has update_play context and set_check_prompt, so missing this is
# not fatal e.g. netconf
if isinstance(exc, ConnectionError) and getattr(exc, 'code', None) == -32601:
pass
else:
result.update({
'error': to_text(exc),
'exception': traceback.format_exc()
})
if os.path.exists(socket_path):
messages.extend(Connection(socket_path).pop_messages())
messages.append(('vvvv', sys.stdout.getvalue()))
result.update({
'messages': messages,
'socket_path': socket_path
})
sys.stdout = saved_stdout
if 'exception' in result:
rc = 1
sys.stderr.write(json.dumps(result, cls=AnsibleJSONEncoder))
else:
rc = 0
sys.stdout.write(json.dumps(result, cls=AnsibleJSONEncoder))
sys.exit(rc)
|
42,942 |
def perpp(R: np.ndarray, sigma: float, n_mean: int, ns: int) -> np.ndarray:
"""This function generates permanental point process (PerPP) samples with thermal states.
**Example usage:**
>>> R = np.array([[0, 1], [1, 0], [0, 0],[1, 1]])
>>> perpp(R, 1.0, 1, 10)
array([[0, 0, 0, 2],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
Args:
R (array): Coordinate matrix. Rows of this array are the coordinates of the points.
sigma (float): kernel parameter
n_mean (int): average number of points in each sample
ns (int): number of samples to be generated
Returns:
samples (array): samples generated by PerPP
"""
K = kernel(R, sigma)
ls, O = rescale_adjacency_matrix_thermal(K, n_mean)
return np.array(generate_thermal_samples(ls, O, num_samples=ns))
|
def sample(R: np.ndarray, sigma: float, n_mean: int, n_samples: int) -> np.ndarray:
"""This function generates permanental point process (PerPP) samples with thermal states.
**Example usage:**
>>> R = np.array([[0, 1], [1, 0], [0, 0],[1, 1]])
>>> perpp(R, 1.0, 1, 10)
array([[0, 0, 0, 2],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]])
Args:
R (array): Coordinate matrix. Rows of this array are the coordinates of the points.
sigma (float): kernel parameter
n_mean (int): average number of points in each sample
ns (int): number of samples to be generated
Returns:
samples (array): samples generated by PerPP
"""
K = kernel(R, sigma)
ls, O = rescale_adjacency_matrix_thermal(K, n_mean)
return np.array(generate_thermal_samples(ls, O, num_samples=ns))
|
57,886 |
def get_built_in_query_results_polling_command(client: Client, args: dict) -> Union[CommandResults, list]:
"""Retrieve results of a built in XQL query, execute as a scheduled command.
:type client: ``Client``
:param client: The XDR Client.
:type args: ``dict``
:param args: The arguments to pass to the API call.
:return: The command results.
:rtype: ``Union[CommandResults, dict]``
"""
# build query, if no endpoint_id was given, the query will search in every endpoint_id (*).
endpoint_id_list = format_arg(args.get('endpoint_id', '*'))
available_commands = init_built_in_commands()
query = available_commands.get(demisto.command(), {}).get('func')(endpoint_id_list, args)
# add extra fields to query
extra_fields_list = ", ".join(str(e) for e in argToList(args.get('extra_fields', [])))
extra_fields_list = f', {extra_fields_list}' if extra_fields_list else '' # add comma to the beginning of fields
query = f'{query}{extra_fields_list}'
# add limit to query
if 'limit' in args:
query = f"{query} | limit {args.get('limit')}"
query_args = {
'query': query,
'tenants': argToList(args.get('tenants', [])),
'time_frame': args.get('time_frame', '')
}
return start_xql_query_polling_command(client, query_args)
|
def get_built_in_query_results_polling_command(client: Client, args: dict) -> Union[CommandResults, list]:
"""Retrieve results of a built in XQL query, execute as a scheduled command.
:type client: ``Client``
:param client: The XDR Client.
:type args: ``dict``
:param args: The arguments to pass to the API call.
:return: The command results.
:rtype: ``Union[CommandResults, dict]``
"""
# build query, if no endpoint_id was given, the query will search in every endpoint_id (*).
endpoint_id_list = format_arg(args.get('endpoint_id', '*'))
available_commands = init_built_in_commands()
query = available_commands.get(demisto.command(), {}).get('func')(endpoint_id_list, args)
# add extra fields to query
extra_fields = argToList(args.get('extra_fields', []))
if extra_fields:
extra_fields_list = ", ".join(str(e) for e in extra_fields)
query = f'{query}, {extra_fields_list}'
# add limit to query
if 'limit' in args:
query = f"{query} | limit {args.get('limit')}"
query_args = {
'query': query,
'tenants': argToList(args.get('tenants', [])),
'time_frame': args.get('time_frame', '')
}
return start_xql_query_polling_command(client, query_args)
|
53,382 |
def print_good():
print("String {}, {} or {}".format(*PARAM_LIST))
print("String {}, {}, {} or {}".format(*PARAM_LIST_SINGLE, *PARAM_LIST))
print("String {Param}, {}, {} or {}".format(Param=PARAM_1, *PARAM_LIST))
print("String {Param} {Param}".format(Param=PARAM_1))
print("{Param_1} {Param_2}".format(**PARAM_DICT))
print("{Param_1} {Param_2} {Param_3}".format(**PARAM_DICT_SINGLE, **PARAM_DICT))
print("{Param_1} {Param_2} {Param_3}".format(Param_1=PARAM_1, **PARAM_DICT))
print("{Param_1} {Param_2}".format(**PARAM_DICT))
print("{Param_1} {Param_2}".format(**return_dict()))
print("%(Param_1)s %(Param_2)s" % PARAM_LIST)
print("%(Param_1)s %(Param_2)s" % PARAM_DICT)
print("%(Param_1)s %(Param_2)s" % return_dict())
print("{a[Param_1]}{a[Param_2]}".format(a=PARAM_DICT))
print("{}".format("\n"))
print("{}".format("\n".join(i for i in "string")))
print("%s" % "\n")
print("%s" % "\n".join(i for i in "string"))
|
def print_good():
print("String {}, {} or {}".format(*PARAM_LIST))
print("String {}, {}, {} or {}".format(*PARAM_LIST_SINGLE, *PARAM_LIST))
print("String {Param}, {}, {} or {}".format(Param=PARAM_1, *PARAM_LIST))
print("String {Param} {Param}".format(Param=PARAM_1))
print("{Param_1} {Param_2}".format(**PARAM_DICT))
print("{Param_1} {Param_2} {Param_3}".format(**PARAM_DICT_SINGLE, **PARAM_DICT))
print("{Param_1} {Param_2} {Param_3}".format(Param_1=PARAM_1, **PARAM_DICT))
print("{Param_1} {Param_2}".format(**PARAM_DICT))
print("{Param_1} {Param_2}".format(**return_dict()))
print("%(Param_1)s %(Param_2)s" % PARAM_LIST)
print("%(Param_1)s %(Param_2)s" % PARAM_DICT)
print("%(Param_1)s %(Param_2)s" % return_dict())
print("{a[Param_1]}{a[Param_2]}".format(a=PARAM_DICT))
print("{}".format("\n")) # [consider-using-f-string]
print("{}".format("\n".join(i for i in "string")))
print("%s" % "\n") # [consider-using-f-string]
print("%s" % "\n".join(i for i in "string"))
|
6,982 |
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
aggregate_function = ""
for sep in (" as ", " AS "):
if sep in key:
key = key.split(sep)[0]
if key.startswith(("count(", "sum(", "avg(")):
if key.strip().endswith(")"):
aggregate_function = key.split("(")[0].lower()
key = key.split("(", 1)[1][:-1]
else:
continue
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
if parenttype == doctype and fieldname == "name":
label = _("ID", context="Label of name column in report")
else:
df = frappe.get_meta(parenttype).get_field(fieldname)
label = _(df.label if df else fieldname.title())
if parenttype != doctype:
# If the column is from a child table, append the child doctype.
# For example, "Item Code (Sales Invoice Item)".
label += f" ({ _(parenttype) })"
if aggregate_function:
label = _("{0} of {1}").format(aggregate_function.capitalize(), label)
labels.append(label)
return labels
|
def get_labels(fields, doctype):
"""get column labels based on column names"""
labels = []
for key in fields:
aggregate_function = ""
for sep in (" as ", " AS "):
if sep in key:
key = key.split(sep)[0]
if key.startswith(("count(", "sum(", "avg(")):
if key.strip().endswith(")"):
aggregate_function = key.split("(", 1)[0].lower()
key = key.split("(", 1)[1][:-1]
else:
continue
if "." in key:
parenttype, fieldname = key.split(".")[0][4:-1], key.split(".")[1].strip("`")
else:
parenttype = doctype
fieldname = fieldname.strip("`")
if parenttype == doctype and fieldname == "name":
label = _("ID", context="Label of name column in report")
else:
df = frappe.get_meta(parenttype).get_field(fieldname)
label = _(df.label if df else fieldname.title())
if parenttype != doctype:
# If the column is from a child table, append the child doctype.
# For example, "Item Code (Sales Invoice Item)".
label += f" ({ _(parenttype) })"
if aggregate_function:
label = _("{0} of {1}").format(aggregate_function.capitalize(), label)
labels.append(label)
return labels
|
6,613 |
def execute():
reposts = frappe.get_all("Repost Item Valuation",
{"status": "Failed", "modified": [">", "2021-10-6"] },
["name", "modified", "error_log"])
for repost in reposts:
if "check_freezing_date" in cstr(repost.error_log):
frappe.db.set_value("Repost Item Valuation", repost.name, "status", "Queued")
frappe.db.commit()
|
def execute():
reposts = frappe.get_all("Repost Item Valuation",
{"status": "Failed", "modified": [">", "2021-10-05"] },
["name", "modified", "error_log"])
for repost in reposts:
if "check_freezing_date" in cstr(repost.error_log):
frappe.db.set_value("Repost Item Valuation", repost.name, "status", "Queued")
frappe.db.commit()
|
32,354 |
def malware_query_filter(needs_attention: str, malware_type: str, malware_status: str, time_stamp: str, limit_range: int) -> dict:
query = []
if bool(needs_attention) is True:
query.append({"fieldName": "needsAttention", "operator": "Is", "values": [bool(needs_attention)]})
if bool(malware_type) is True:
types = malware_type.split(",")
query.append({"fieldName": "type", "operator": "Equals", "values": types})
if bool(malware_status) is True:
is_status = malware_status.split(",")
query.append({"fieldName": "status", "operator": "Equals", "values": is_status})
if bool(time_stamp) is True:
query.append({"fieldName": "timestamp", "operator": "GreaterThan", "values": [int(time_stamp)]})
response = malware_query(query, limit_range)
return response
|
def malware_query_filter(needs_attention: str, malware_type: str, malware_status: str, time_stamp: str, limit_range: int) -> dict:
query = []
if bool(needs_attention) is True:
query.append({"fieldName": "needsAttention", "operator": "Is", "values": [bool(needs_attention)]})
if bool(malware_type) is True:
types = malware_type.split(",")
query.append({"fieldName": "type", "operator": "Equals", "values": types})
if malware_status:
is_status = malware_status.split(",")
query.append({"fieldName": "status", "operator": "Equals", "values": is_status})
if bool(time_stamp) is True:
query.append({"fieldName": "timestamp", "operator": "GreaterThan", "values": [int(time_stamp)]})
response = malware_query(query, limit_range)
return response
|
44,834 |
def test_cache_return_value_per_process():
path1 = _gen_random_str1(True)
path2 = _gen_random_str1(True)
assert path1 == path2
path3 = _gen_random_str1(False)
assert path3 != path2
no_arg_path1 = _gen_random_no_arg()
no_arg_path2 = _gen_random_no_arg()
assert no_arg_path1 == no_arg_path2
with pytest.raises(
ValueError,
match="The function decorated by `cache_return_value_per_process` is not allowed to be"
"called with key-word style arguments.",
):
_gen_random_str1(v=True)
f2_path1 = _gen_random_str2(True)
f2_path2 = _gen_random_str2(False)
assert len({path1, path3, f2_path1, f2_path2}) == 4
if os.name != "nt":
# Test child process invalidates the cache.
# We don't create child process by `multiprocessing.Process` because
# `multiprocessing.Process` creates child process by pickling the target function
# and start a new process to run the pickled function. But the global variable
# `_per_process_value_cache_map` dict content is not pickled, this make child process
# automatically clear the `_per_process_value_cache_map` dict content.
pid = os.fork()
if pid > 0:
# in parent process
child_pid = pid
# check child process exit with return value 0.
assert os.waitpid(child_pid, 0)[1] == 0
else:
# in forked out child process
child_path1 = _gen_random_str1(True)
child_path2 = _gen_random_str1(False)
test_pass = len({path1, path3, child_path1, child_path2}) == 4
# exit forked out child process with exit code representing testing pass or fail.
os._exit(0 if test_pass else 1)
|
def test_cache_return_value_per_process():
path1 = _gen_random_str1(True)
path2 = _gen_random_str1(True)
assert path1 == path2
path3 = _gen_random_str1(False)
assert path3 != path2
no_arg_path1 = _gen_random_no_arg()
no_arg_path2 = _gen_random_no_arg()
assert no_arg_path1 == no_arg_path2
with pytest.raises(
ValueError,
match="The function decorated by `cache_return_value_per_process` is not allowed to be"
"called with key-word style arguments.",
):
_gen_random_str1(v=True)
f2_path1 = _gen_random_str2(True)
f2_path2 = _gen_random_str2(False)
assert len({path1, path3, f2_path1, f2_path2}) == 4
if os.name != "nt":
# Test child process invalidates the cache.
# We don't create child process by `multiprocessing.Process` because
# `multiprocessing.Process` creates child process by pickling the target function
# and start a new process to run the pickled function. But the global variable
# `_per_process_value_cache_map` dict content is not pickled, this makes child process
# automatically clear the `_per_process_value_cache_map` dict content.
pid = os.fork()
if pid > 0:
# in parent process
child_pid = pid
# check child process exit with return value 0.
assert os.waitpid(child_pid, 0)[1] == 0
else:
# in forked out child process
child_path1 = _gen_random_str1(True)
child_path2 = _gen_random_str1(False)
test_pass = len({path1, path3, child_path1, child_path2}) == 4
# exit forked out child process with exit code representing testing pass or fail.
os._exit(0 if test_pass else 1)
|
5,715 |
def _krandinit(data, k, rng):
"""Returns k samples of a random variable whose parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable whose mean and covariances are the ones estimated from the data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
mu = data.mean(axis=0)
if data.ndim == 1:
cov = np.cov(data)
x = rng.standard_normal(size=k)
x *= np.sqrt(cov)
elif data.shape[1] > data.shape[0]:
# initialize when the covariance matrix is rank deficient
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = rng.standard_normal(size=(k, s.size))
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = x.dot(sVh)
else:
cov = np.atleast_2d(np.cov(data, rowvar=False))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = rng.standard_normal(size=(k, mu.size))
x = x.dot(np.linalg.cholesky(cov).T)
x += mu
return x
|
def _krandinit(data, k, rng):
"""Returns k samples of a random variable whose parameters depend on data.
More precisely, it returns k observations sampled from a Gaussian random
variable whose mean and covariances are the ones estimated from the data.
Parameters
----------
data : ndarray
Expect a rank 1 or 2 array. Rank 1 is assumed to describe 1-D
data, rank 2 multidimensional data, in which case one
row is one observation.
k : int
Number of samples to generate.
rng : `numpy.random.Generator` or `numpy.random.RandomState`
Random number generator.
Returns
-------
x : ndarray
A 'k' by 'N' containing the initial centroids
"""
mu = data.mean(axis=0)
if data.ndim == 1:
cov = np.cov(data)
x = rng.standard_normal(size=k)
x *= np.sqrt(cov)
elif data.shape[1] > data.shape[0]:
# initialize when the covariance matrix is rank deficient
_, s, vh = np.linalg.svd(data - mu, full_matrices=False)
x = rng.standard_normal(size=(k, s.size))
sVh = s[:, None] * vh / np.sqrt(data.shape[0] - 1)
x = x.dot(sVh)
else:
cov = np.atleast_2d(np.cov(data, rowvar=False))
# k rows, d cols (one row = one obs)
# Generate k sample of a random variable ~ Gaussian(mu, cov)
x = rng.standard_normal(size=(k, mu.size))
x = x.dot(np.linalg.cholesky(cov).T)
x += mu
return x
|
22,105 |
def run_module():
'''Run this module'''
module_args = dict(
path=dict(aliases=['dest', 'name'], required=True, type='path'),
registry=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
test_login=dict(type='bool', required=False, default=True),
proxy_vars=dict(type='str', required=False, default=''),
test_timeout=dict(type='str', required=False, default='20'),
test_image=dict(type='str', required=True),
tls_verify=dict(type='bool', required=False, default=True)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
# First, create our dest dir if necessary
dest = module.params['path']
registry = module.params['registry']
username = module.params['username']
password = module.params['password']
test_login = module.params['test_login']
proxy_vars = module.params['proxy_vars']
test_timeout = module.params['test_timeout']
test_image = module.params['test_image']
tls_verify = module.params['tls_verify']
if not check_dest_dir_exists(module, dest):
create_dest_dir(module, dest)
docker_config = {}
else:
# We want to scrape the contents of dest/config.json
# in case there are other registries/settings already present.
docker_config = load_config_file(module, dest)
# Test the credentials
if test_login:
skopeo_command = gen_skopeo_cmd(registry, username, password,
proxy_vars, test_timeout, test_image, tls_verify)
validate_registry_login(module, skopeo_command)
# base64 encode our username:password string
encoded_auth = base64.b64encode('{}:{}'.format(username, password).encode())
# Put the registry auth info into the config dict.
changed = update_config(docker_config, registry, encoded_auth)
if changed:
write_config(module, docker_config, dest)
result = {'changed': changed, 'rc': 0}
module.exit_json(**result)
|
def run_module():
'''Run this module'''
module_args = dict(
path=dict(aliases=['dest', 'name'], required=True, type='path'),
registry=dict(type='str', required=True),
username=dict(type='str', required=True),
password=dict(type='str', required=True, no_log=True),
test_login=dict(type='bool', required=False, default=True),
proxy_vars=dict(type='str', required=False, default=''),
test_timeout=dict(type='int', required=False, default=20),
test_image=dict(type='str', required=True),
tls_verify=dict(type='bool', required=False, default=True)
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
# First, create our dest dir if necessary
dest = module.params['path']
registry = module.params['registry']
username = module.params['username']
password = module.params['password']
test_login = module.params['test_login']
proxy_vars = module.params['proxy_vars']
test_timeout = module.params['test_timeout']
test_image = module.params['test_image']
tls_verify = module.params['tls_verify']
if not check_dest_dir_exists(module, dest):
create_dest_dir(module, dest)
docker_config = {}
else:
# We want to scrape the contents of dest/config.json
# in case there are other registries/settings already present.
docker_config = load_config_file(module, dest)
# Test the credentials
if test_login:
skopeo_command = gen_skopeo_cmd(registry, username, password,
proxy_vars, test_timeout, test_image, tls_verify)
validate_registry_login(module, skopeo_command)
# base64 encode our username:password string
encoded_auth = base64.b64encode('{}:{}'.format(username, password).encode())
# Put the registry auth info into the config dict.
changed = update_config(docker_config, registry, encoded_auth)
if changed:
write_config(module, docker_config, dest)
result = {'changed': changed, 'rc': 0}
module.exit_json(**result)
|
40,491 |
def normalize_state(body, state):
'''
Normalizes one or more states using a running mean and standard deviation
Details of the normalization from Deep RL Bootcamp, L6
https://www.youtube.com/watch?v=8EcdaCk9KaQ&feature=youtu.be
'''
same_shape = False if type(state) == list else state.shape == body.state_mean.shape
has_preprocess = getattr(body.memory, 'preprocess_state', False)
if ("Atari" in body.memory.__class__.__name__):
# never normalize atari, it has its own normalization step
logger.debug('skipping normalizing for Atari, already handled by preprocess')
return state
elif ("Replay" in body.memory.__class__.__name__) and has_preprocess:
# normalization handled by preprocess_state function in the memory
logger.debug('skipping normalizing, already handled by preprocess')
return state
elif same_shape:
# if not atari, always normalize the state the first time we see it during act
# if the shape is not transformed in some way
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
else:
# broadcastable sample from an un-normalized memory so we should normalize
logger.debug('normalizing sample from memory')
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
|
def normalize_state(body, state):
'''
Normalizes one or more states using a running mean and standard deviation
Details of the normalization from Deep RL Bootcamp, L6
https://www.youtube.com/watch?v=8EcdaCk9KaQ&feature=youtu.be
'''
same_shape = False if type(state) == list else state.shape == body.state_mean.shape
has_preprocess = getattr(body.memory, 'preprocess_state', False)
if ("Atari" in body.memory.__class__.__name__):
# never normalize atari, it has its own normalization step
logger.debug('skipping normalizing for Atari, already handled by preprocess')
return state
elif ('Replay' in body.memory.__class__.__name__) and has_preprocess:
# normalization handled by preprocess_state function in the memory
logger.debug('skipping normalizing, already handled by preprocess')
return state
elif same_shape:
# if not atari, always normalize the state the first time we see it during act
# if the shape is not transformed in some way
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
else:
# broadcastable sample from an un-normalized memory so we should normalize
logger.debug('normalizing sample from memory')
if np.sum(body.state_std_dev) == 0:
return np.clip(state - body.state_mean, -10, 10)
else:
return np.clip((state - body.state_mean) / body.state_std_dev, -10, 10)
|
46,351 |
def get_branch(s):
if len(s) == 0:
return 'NO INFO'
for i in s:
if 'uses original Scikit-learn solver,' in i:
return 'was in IDP, but go in Scikit'
for i in s:
if 'uses Intel(R) oneAPI Data Analytics Library solver' in i:
return 'IDP'
return 'Scikit'
|
def get_branch(s):
if len(s) == 0:
return 'NO INFO'
for i in s:
if 'uses original Scikit-learn solver,' in i:
return 'was in IDP, but go in Scikit'
for i in s:
if 'uses Intel(R) oneAPI Data Analytics Library solver' in i:
return 'OPT'
return 'Scikit'
|
43,087 |
def load(f, ir="blackbird"):
"""Load a quantum program from a Blackbird .xbb file.
**Example:**
The following Blackbird file, ``program1.xbb``,
.. code-block:: python3
name test_program
version 1.0
Sgate(0.543, 0.0) | 1
BSgate(0.6, 0.1) | [2, 0]
MeasureFock() | [0, 1, 2]
can be imported into Strawberry Fields using the ``loads``
function:
>>> sf.loads("program1.xbb")
>>> prog.name
'test_program'
>>> prog.num_subsystems
3
>>> prog.print()
Sgate(0.543, 0) | (q[1])
BSgate(0.6, 0.1) | (q[2], q[0])
MeasureFock | (q[0], q[1], q[2])
Args:
f (Union[file, str, pathlib.Path]): File or filename from which
the data is loaded. If file is a string or Path, a value with the
.xbb extension is expected.
ir (str): Intermediate representation language to use. Can be either "blackbird" or "xir".
Returns:
prog (Program): Strawberry Fields program
"""
own_file = False
try:
if hasattr(f, "read"):
# argument file is a file-object
fid = f
else:
# argument file is a Path or string
filename = os.fspath(f)
fid = open(filename, "r")
own_file = True
except TypeError as e:
raise ValueError("file must be a string, pathlib.Path, or file-like object") from e
try:
prog_str = fid.read()
finally:
if own_file:
# safely close the file
fid.close()
# load blackbird program
return loads(prog_str, ir=ir)
|
def load(f, ir="blackbird"):
"""Load a quantum program from a Blackbird .xbb file.
**Example:**
The following Blackbird file, ``program1.xbb``,
.. code-block:: python3
name test_program
version 1.0
Sgate(0.543, 0.0) | 1
BSgate(0.6, 0.1) | [2, 0]
MeasureFock() | [0, 1, 2]
can be imported into Strawberry Fields using the ``loads``
function:
>>> sf.loads("program1.xbb")
>>> prog.name
'test_program'
>>> prog.num_subsystems
3
>>> prog.print()
Sgate(0.543, 0) | (q[1])
BSgate(0.6, 0.1) | (q[2], q[0])
MeasureFock | (q[0], q[1], q[2])
Args:
f (Union[file, str, pathlib.Path]): File or filename from which
the data is loaded. If file is a string or Path, a value with the
.xbb extension is expected.
ir (str): Intermediate representation language to use. Can be either "blackbird" or "xir".
Returns:
prog (Program): Strawberry Fields program
"""
own_file = False
try:
if hasattr(f, "read"):
# argument file is a file-object
fid = f
else:
# argument file is a Path or string
filename = os.fspath(f)
fid = open(filename, "r")
own_file = True
except TypeError as e:
raise ValueError("file must be a string, pathlib.Path, or file-like object") from e
try:
prog_str = fid.read()
finally:
if own_file:
# safely close the file
fid.close()
# load Blackbird or XIR program
return loads(prog_str, ir=ir)
|
22,021 |
def example(download=True):
"""Returns an example DataFrame which comes with vaex for testing/learning purposes.
:rtype: DataFrame
"""
return vaex.datasets.helmi_de_zeeuw_10percent.fetch()
|
def example():
"""Returns an example DataFrame which comes with vaex for testing/learning purposes.
:rtype: DataFrame
"""
return vaex.datasets.helmi_de_zeeuw_10percent.fetch()
|
13,493 |
def pid_by_name(name):
"""pid_by_name(name) -> int list
Arguments:
name (str): Name of program.
Returns:
List of PIDs matching `name` sorted by lifetime, youngest to oldest.
Example:
>>> os.getpid() in pid_by_name(name(os.getpid()))
True
"""
def match(p):
if p.status() == 'zombie':
return False
if p.name() == name:
return True
try:
if p.exe() == name:
return True
except Exception:
pass
return False
processes = (p for p in psutil.process_iter() if match(p))
processes = sorted(processes, key=lambda p: p.create_time())
return list(reversed([p.pid for p in processes]))
|
def pid_by_name(name):
"""pid_by_name(name) -> int list
Arguments:
name (str): Name of program.
Returns:
List of PIDs matching `name` sorted by lifetime, youngest to oldest.
Example:
>>> os.getpid() in pid_by_name(name(os.getpid()))
True
"""
def match(p):
if p.status() == 'zombie':
return False
if p.name() == name:
return True
try:
if p.exe() == name:
return True
except Exception:
pass
return False
processes = (p for p in psutil.process_iter() if match(p))
processes = sorted(processes, key=lambda p: p.create_time(), reverse=True)
return list(reversed([p.pid for p in processes]))
|
57,298 |
def has_cuda_context():
"""Check whether the current process already has a CUDA context created.
Returns
-------
``False`` if current process has no CUDA context created, otherwise returns the
index of the device for which there's a CUDA context.
"""
init_once()
for index in range(device_get_count()):
handle = pynvml.nvmlDeviceGetHandleByIndex(index)
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for proc in running_processes:
if os.getpid() == proc.pid:
return index
return False
|
def has_cuda_context():
"""Check whether the current process already has a CUDA context created.
Returns
-------
``False`` if current process has no CUDA context created, otherwise returns the
index of the device for which there's a CUDA context.
"""
init_once()
for index in range(device_get_count()):
handle = pynvml.nvmlDeviceGetHandleByIndex(index)
if hasattr(pynvml, "nvmlDeviceGetComputeRunningProcesses_v2"):
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses_v2(handle)
else:
running_processes = pynvml.nvmlDeviceGetComputeRunningProcesses(handle)
for proc in running_processes:
if os.getpid() == proc.pid:
return index
return False
|
2,877 |
def test_assert_ovr_roc_auc_chance_level():
# Build equal probability predictions to multiclass problem
y_true = np.array([3, 1, 2, 0])
y_pred = 0.25 * np.ones((4, 4))
macro_roc_auc = roc_auc_score(y_true, y_pred, multi_class="ovr", average="macro")
assert_allclose(macro_roc_auc, 0.5)
micro_roc_auc = roc_auc_score(y_true, y_pred, multi_class="ovr", average="micro")
assert_allclose(micro_roc_auc, 0.5)
|
def test_assert_ovr_roc_auc_chance_level():
# Build equal probability predictions to multiclass problem
y_true = np.array([3, 1, 2, 0])
y_pred = 0.25 * np.ones((4, 4))
macro_roc_auc = roc_auc_score(y_true, y_pred, multi_class="ovr", average="macro")
assert macro_roc_auc == pytest.approx(0.5)
micro_roc_auc = roc_auc_score(y_true, y_pred, multi_class="ovr", average="micro")
assert_allclose(micro_roc_auc, 0.5)
|
49,013 |
def createResolver(servers=None, resolvconf=None, hosts=None):
if hosts is None:
hosts = (b'/etc/hosts' if platform.getType() == 'posix'
else r'c:\windows\hosts')
theResolver = Resolver(resolvconf, servers)
hostResolver = hostsModule.Resolver(hosts)
L = [hostResolver, cache.CacheResolver(), theResolver]
return resolve.ResolverChain(L)
|
def createResolver(servers=None, resolvconf=None, hosts=None):
if hosts is None:
hosts = (b'/etc/hosts' if platform.getType() == 'posix'
else r'c:\windows\hosts')
theResolver = Resolver(resolvconf, servers)
hostResolver = hostsModule.Resolver(hosts)
chain = [hostResolver, cache.CacheResolver(), theResolver]
return resolve.ResolverChain(chain)
|
42,093 |
def _get_pareto_front_3d(info: _ParetoFrontInfo) -> "Axes":
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.set_title("Pareto-front Plot")
cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly.
ax.set_xlabel(info.target_names[info.axis_order[0]])
ax.set_ylabel(info.target_names[info.axis_order[1]])
ax.set_zlabel(info.target_names[info.axis_order[2]])
if info.infeasible_trials_with_values is not None and len(info.infeasible_trials_with_values) > 0:
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.infeasible_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.infeasible_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.infeasible_trials_with_values],
color=cmap(1),
label="Infeasible",
)
if info.non_best_trials_with_values is not None and len(info.non_best_trials_with_values) > 0:
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.non_best_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.non_best_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.non_best_trials_with_values],
color=cmap(0),
label="Trial",
)
if info.best_trials_with_values is not None and len(info.best_trials_with_values):
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.best_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.best_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.best_trials_with_values],
color=cmap(3),
label="Best Trial",
)
if info.non_best_trials_with_values is not None and ax.has_data():
ax.legend()
return ax
|
def _get_pareto_front_3d(info: _ParetoFrontInfo) -> "Axes":
# Set up the graph style.
plt.style.use("ggplot") # Use ggplot style sheet for similar outputs to plotly.
fig = plt.figure()
ax = fig.add_subplot(projection="3d")
ax.set_title("Pareto-front Plot")
cmap = plt.get_cmap("tab10") # Use tab10 colormap for similar outputs to plotly.
ax.set_xlabel(info.target_names[info.axis_order[0]])
ax.set_ylabel(info.target_names[info.axis_order[1]])
ax.set_zlabel(info.target_names[info.axis_order[2]])
if info.infeasible_trials_with_values is not None and len(info.infeasible_trials_with_values) > 0:
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.infeasible_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.infeasible_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.infeasible_trials_with_values],
color=cmap(1),
label="Infeasible Trial",
)
if info.non_best_trials_with_values is not None and len(info.non_best_trials_with_values) > 0:
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.non_best_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.non_best_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.non_best_trials_with_values],
color=cmap(0),
label="Trial",
)
if info.best_trials_with_values is not None and len(info.best_trials_with_values):
ax.scatter(
xs=[values[info.axis_order[0]] for _, values in info.best_trials_with_values],
ys=[values[info.axis_order[1]] for _, values in info.best_trials_with_values],
zs=[values[info.axis_order[2]] for _, values in info.best_trials_with_values],
color=cmap(3),
label="Best Trial",
)
if info.non_best_trials_with_values is not None and ax.has_data():
ax.legend()
return ax
|
30,915 |
def build_human_readable(entry_context: dict) -> str:
human_readable = ""
entry_context = entry_context.get("TroubleShout", {})
# Engine docker container
engine: dict = dict_safe_get(entry_context, ['Engine', 'SSL/TLS'], {}, dict)
human_readable += "## Docker container engine - custom certificate\n"
readable_engine_issuer = [item.get('Decode').get('Issuer') for item in engine.get('CustomCertificateAuthorities', {})]
readable_engine_subject = [item.get('Decode').get('Subject') for item in engine.get('CustomCertificateAuthorities', {})]
readable_engine_vars = engine.get('ShellVariables')
human_readable += tableToMarkdown(name="Enviorment variables", t=readable_engine_vars)
human_readable += tableToMarkdown(name="Issuer", t=readable_engine_issuer, removeNull=True)
human_readable += tableToMarkdown(name="Subject", t=readable_engine_subject, removeNull=True)
# Endpoint
endpoint: dict = entry_context.get('Endpoint', {}).get('SSL/TLS', {})
readable_endpoint_issuer = [item.get('Decode').get('Issuer') for item in endpoint.get('Certificates', {})]
readable_endpoint_subject = [item.get('Decode').get('Subject') for item in endpoint.get('Certificates', {})]
human_readable += f"\n\n## Endpoint certificate - {endpoint.get('Identifier')}\n"
human_readable += tableToMarkdown(name="Issuer", t=readable_endpoint_issuer, removeNull=True)
human_readable += tableToMarkdown(name="Subject", t=readable_endpoint_subject, removeNull=True)
human_readable += "\n"
return human_readable
|
def build_human_readable(entry_context: dict) -> str:
human_readable = ""
entry_context = entry_context.get("TroubleShout", {})
# Engine docker container
engine: dict = dict_safe_get(entry_context, ['Engine', 'SSL/TLS'], {}, dict)
human_readable += "## Docker container engine - custom certificate\n"
readable_engine_issuer = [item.get('Decode').get('Issuer') for item in engine.get('CustomCertificateAuthorities', {})]
readable_engine_subject = [dict_safe_get(item, ('Decode', 'Subject')) for item in engine.get('CustomCertificateAuthorities', {})]
readable_engine_vars = engine.get('ShellVariables')
human_readable += tableToMarkdown(name="Enviorment variables", t=readable_engine_vars)
human_readable += tableToMarkdown(name="Issuer", t=readable_engine_issuer, removeNull=True)
human_readable += tableToMarkdown(name="Subject", t=readable_engine_subject, removeNull=True)
# Endpoint
endpoint: dict = entry_context.get('Endpoint', {}).get('SSL/TLS', {})
readable_endpoint_issuer = [item.get('Decode').get('Issuer') for item in endpoint.get('Certificates', {})]
readable_endpoint_subject = [item.get('Decode').get('Subject') for item in endpoint.get('Certificates', {})]
human_readable += f"\n\n## Endpoint certificate - {endpoint.get('Identifier')}\n"
human_readable += tableToMarkdown(name="Issuer", t=readable_endpoint_issuer, removeNull=True)
human_readable += tableToMarkdown(name="Subject", t=readable_endpoint_subject, removeNull=True)
human_readable += "\n"
return human_readable
|
45,353 |
def get_benchmarks_shapes(bench_id: str, default: list):
"""
Get custom benchmarks shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.
If `bench_id` benchmark is not found in the file, then the default value will
be used.
Parameters
----------
bench_id : str
Unique benchmark identifier that is used to get shapes.
default : list
Default shapes.
Returns
-------
list
Benchmark shapes.
"""
try:
from modin.config import AsvDatasizeConfig
filename = AsvDatasizeConfig.get()
except ImportError:
filename = os.environ.get("MODIN_ASV_DATASIZE_CONFIG", None)
if filename:
global CONFIG_FROM_FILE
if not CONFIG_FROM_FILE:
# should be json
with open(filename) as _f:
CONFIG_FROM_FILE = json.load(_f)
if bench_id in CONFIG_FROM_FILE:
# convert strings to tuples;
# example: "omnisci.TimeReadCsvNames": ["(5555, 55)", "(3333, 33)"]
shapes = [eval(shape) for shape in CONFIG_FROM_FILE[bench_id]]
return shapes
return default
|
def get_benchmark_shapes(bench_id: str, default: list):
"""
Get custom benchmarks shapes from a json file stored in MODIN_ASV_DATASIZE_CONFIG.
If `bench_id` benchmark is not found in the file, then the default value will
be used.
Parameters
----------
bench_id : str
Unique benchmark identifier that is used to get shapes.
default : list
Default shapes.
Returns
-------
list
Benchmark shapes.
"""
try:
from modin.config import AsvDatasizeConfig
filename = AsvDatasizeConfig.get()
except ImportError:
filename = os.environ.get("MODIN_ASV_DATASIZE_CONFIG", None)
if filename:
global CONFIG_FROM_FILE
if not CONFIG_FROM_FILE:
# should be json
with open(filename) as _f:
CONFIG_FROM_FILE = json.load(_f)
if bench_id in CONFIG_FROM_FILE:
# convert strings to tuples;
# example: "omnisci.TimeReadCsvNames": ["(5555, 55)", "(3333, 33)"]
shapes = [eval(shape) for shape in CONFIG_FROM_FILE[bench_id]]
return shapes
return default
|
29,800 |
def setup_kube_deployments(
kube_client: KubeClient,
service_instances: Sequence[str],
cluster: str,
rate_limit: int = 0,
soa_dir: str = DEFAULT_SOA_DIR,
) -> bool:
if service_instances:
existing_kube_deployments = set(list_all_deployments(kube_client))
existing_apps = {
(deployment.service, deployment.instance)
for deployment in existing_kube_deployments
}
service_instances_with_valid_names = [
decompose_job_id(service_instance)
for service_instance in service_instances
if validate_job_name(service_instance)
]
applications = [
create_application_object(
kube_client=kube_client,
service=service_instance[0],
instance=service_instance[1],
cluster=cluster,
soa_dir=soa_dir,
)
for service_instance in service_instances_with_valid_names
]
api_updates = 0
for _, app in applications:
if app:
try:
if (
app.kube_deployment.service,
app.kube_deployment.instance,
) not in existing_apps:
log.info(f"Creating {app} because it does not exist yet.")
app.create(kube_client)
api_updates += 1
elif app.kube_deployment not in existing_kube_deployments:
log.info(f"Updating {app} because configs have changed.")
app.update(kube_client)
api_updates += 1
else:
log.info(f"{app} is up-to-date!")
log.info(f"Ensuring related API objects for {app} are in sync")
app.update_related_api_objects(kube_client)
except Exception:
log.error(f"Error while processing: {app}")
log.error(traceback.format_exc())
if rate_limit > 0 and api_updates >= rate_limit:
log.info(
f"Not doing any further updates as we reached the limit ({api_updates})"
)
break
return (False, None) not in applications and len(
service_instances_with_valid_names
) == len(service_instances)
|
def setup_kube_deployments(
kube_client: KubeClient,
service_instances: Sequence[str],
cluster: str,
rate_limit: int = 0,
soa_dir: str = DEFAULT_SOA_DIR,
) -> bool:
if service_instances:
existing_kube_deployments = set(list_all_deployments(kube_client))
existing_apps = {
(deployment.service, deployment.instance)
for deployment in existing_kube_deployments
}
service_instances_with_valid_names = [
decompose_job_id(service_instance)
for service_instance in service_instances
if validate_job_name(service_instance)
]
applications = [
create_application_object(
kube_client=kube_client,
service=service_instance[0],
instance=service_instance[1],
cluster=cluster,
soa_dir=soa_dir,
)
for service_instance in service_instances_with_valid_names
]
api_updates = 0
for _, app in applications:
if app:
try:
if (
app.kube_deployment.service,
app.kube_deployment.instance,
) not in existing_apps:
log.info(f"Creating {app} because it does not exist yet.")
app.create(kube_client)
api_updates += 1
elif app.kube_deployment not in existing_kube_deployments:
log.info(f"Updating {app} because configs have changed.")
app.update(kube_client)
api_updates += 1
else:
log.info(f"{app} is up-to-date!")
log.info(f"Ensuring related API objects for {app} are in sync")
app.update_related_api_objects(kube_client)
except Exception:
log.exception(f"Error while processing: {app}")
if rate_limit > 0 and api_updates >= rate_limit:
log.info(
f"Not doing any further updates as we reached the limit ({api_updates})"
)
break
return (False, None) not in applications and len(
service_instances_with_valid_names
) == len(service_instances)
|
45,998 |
def bihome_loss(
patch_1: torch.Tensor,
patch_2: torch.Tensor,
delta_hat_12: torch.Tensor,
delta_hat_21: torch.Tensor,
triplet_mu: float,
loss_network: nn.Module,
) -> torch.Tensor:
r"""biHomE loss implementation.
Based on: :cite:`koguciuk2021perceptual` and https://github.com/NeurAI-Lab/biHomE.
Args:
patch_1: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of classes
patch_2: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of classes
delta_hat_12: predicted corner differences from image 1 to image 2 with shape
:math:`(B, 4, 2)`, where B = batch size.
delta_hat_21: predicted corner differences from image 2 to image 1 with shape
:math:`(B, 4, 2)`, where B = batch size.
triplet_mu: Homography matrix regularization weight.
loss_network: loss network used.
Return:
the computed loss.
"""
if not isinstance(patch_1, torch.Tensor):
raise TypeError(f"patch_1 type is not a torch.Tensor. Got {type(patch_1)}")
if not len(patch_1.shape) == 4:
raise ValueError(f"Invalid input shape of patch_1, we expect BxCxHxW. Got: {patch_1.shape}")
if not isinstance(patch_2, torch.Tensor):
raise TypeError(f"patch_2 type is not a torch.Tensor. Got {type(patch_2)}")
if not len(patch_2.shape) == 4:
raise ValueError(f"Invalid input shape of patch_2, we expect BxCxHxW. Got: {patch_2.shape}")
if patch_1.shape != patch_2.shape:
raise ValueError(f'Expected patch_1 shape ({patch_1.shape}) to match patch_2 shape ({patch_2.shape}).')
if not isinstance(delta_hat_12, torch.Tensor):
raise TypeError(f"delta_hat_12 type is not a torch.Tensor. Got {type(delta_hat_12)}")
if not len(delta_hat_12.shape) == 3 or not delta_hat_12.shape[1] == 4 or not delta_hat_12.shape[2] == 2:
raise ValueError(f"Invalid input shape of delta_hat_12, we expect Bx4x2. Got: {delta_hat_12.shape}")
if not delta_hat_12.size(0) == patch_1.size(0):
raise ValueError(f'Expected delta_hat_12 batch_size ({delta_hat_12.size(0)}) to match patch_1 batch size '
f'({patch_1.size(0)}).')
if not isinstance(delta_hat_21, torch.Tensor):
raise TypeError(f"delta_hat_21 type is not a torch.Tensor. Got {type(delta_hat_21)}")
if not len(delta_hat_21.shape) == 3 or not delta_hat_21.shape[1] == 4 or not delta_hat_21.shape[2] == 2:
raise ValueError(f"Invalid input shape of delta_hat_21, we expect Bx4x2. Got: {delta_hat_21.shape}")
if not delta_hat_21.size(0) == patch_1.size(0):
raise ValueError(f'Expected delta_hat_21 batch_size ({delta_hat_21.size(0)}) to match patch_1 batch size '
f'({patch_1.size(0)}).')
if not isinstance(loss_network, nn.Module):
raise TypeError(f"loss_network type is not a str. Got {type(loss_network)}")
# Compute features of both patches
patch_1_f = loss_network(patch_1)
patch_2_f = loss_network(patch_2)
# Warp patch 1 with delta hat_12
patch_1_prime, h1 = _warp(patch_1, delta_hat=delta_hat_12)
patch_1_prime_f = loss_network(patch_1_prime)
# Warp patch 2 with delta hat_21
patch_2_prime, h2 = _warp(patch_2, delta_hat=delta_hat_21)
patch_2_prime_f = loss_network(patch_2_prime)
# Create and warp masks
patch_1_m = torch.ones_like(patch_1)
patch_2_m = torch.ones_like(patch_2)
patch_1_m_prime, _ = _warp(patch_1_m, delta_hat=delta_hat_12)
patch_2_m_prime, _ = _warp(patch_2_m, delta_hat=delta_hat_21)
# Mask size mismatch downsampling
_, _, f_h, _ = patch_1_prime_f.shape
downsample_factor = patch_1_m.shape[-1] // f_h
downsample_layer = torch.nn.AvgPool2d(kernel_size=downsample_factor, stride=downsample_factor, padding=0)
patch_1_m = torch.squeeze(downsample_layer(patch_1_m), dim=1)
patch_2_m = torch.squeeze(downsample_layer(patch_2_m), dim=1)
patch_1_m_prime = torch.squeeze(downsample_layer(patch_1_m_prime), dim=1)
patch_2_m_prime = torch.squeeze(downsample_layer(patch_2_m_prime), dim=1)
# Triplet Margin Loss
l1 = torch.sum(torch.abs(patch_1_prime_f - patch_2_f), dim=1)
l2 = torch.sum(torch.abs(patch_1_f - patch_2_prime_f), dim=1)
l3 = torch.sum(torch.abs(patch_1_f - patch_2_f), dim=1)
ln1_nom = torch.sum(torch.sum(patch_1_m_prime * patch_2_m * (l1 - l3), dim=-1), dim=-1)
ln1_den = torch.sum(torch.sum(patch_1_m_prime * patch_2_m, dim=-1), dim=-1)
ln1_den = torch.max(ln1_den, torch.ones_like(ln1_den))
ln2_nom = torch.sum(torch.sum(patch_1_m * patch_2_m_prime * (l2 - l3), dim=-1), dim=-1)
ln2_den = torch.sum(torch.sum(patch_1_m * patch_2_m_prime, dim=-1), dim=-1)
ln2_den = torch.max(ln2_den, torch.ones_like(ln2_den))
ln1 = torch.sum(ln1_nom / ln1_den)
ln2 = torch.sum(ln2_nom / ln2_den)
# Regularization
batch_size = patch_1.size(0)
eye = torch.eye(3, dtype=h1.dtype, device=h1.device).unsqueeze(dim=0).repeat(batch_size, 1, 1)
ln3 = torch.sum((torch.matmul(h1, h2) - eye) ** 2) * triplet_mu
loss = ln1 + ln2 + ln3
return loss
|
def bihome_loss(
patch_1: Tensor,
patch_2: torch.Tensor,
delta_hat_12: torch.Tensor,
delta_hat_21: torch.Tensor,
triplet_mu: float,
loss_network: nn.Module,
) -> torch.Tensor:
r"""biHomE loss implementation.
Based on: :cite:`koguciuk2021perceptual` and https://github.com/NeurAI-Lab/biHomE.
Args:
patch_1: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of classes
patch_2: image tensor with shape :math:`(B, C, H, W)` where B = batch size,
C = number of classes
delta_hat_12: predicted corner differences from image 1 to image 2 with shape
:math:`(B, 4, 2)`, where B = batch size.
delta_hat_21: predicted corner differences from image 2 to image 1 with shape
:math:`(B, 4, 2)`, where B = batch size.
triplet_mu: Homography matrix regularization weight.
loss_network: loss network used.
Return:
the computed loss.
"""
if not isinstance(patch_1, torch.Tensor):
raise TypeError(f"patch_1 type is not a torch.Tensor. Got {type(patch_1)}")
if not len(patch_1.shape) == 4:
raise ValueError(f"Invalid input shape of patch_1, we expect BxCxHxW. Got: {patch_1.shape}")
if not isinstance(patch_2, torch.Tensor):
raise TypeError(f"patch_2 type is not a torch.Tensor. Got {type(patch_2)}")
if not len(patch_2.shape) == 4:
raise ValueError(f"Invalid input shape of patch_2, we expect BxCxHxW. Got: {patch_2.shape}")
if patch_1.shape != patch_2.shape:
raise ValueError(f'Expected patch_1 shape ({patch_1.shape}) to match patch_2 shape ({patch_2.shape}).')
if not isinstance(delta_hat_12, torch.Tensor):
raise TypeError(f"delta_hat_12 type is not a torch.Tensor. Got {type(delta_hat_12)}")
if not len(delta_hat_12.shape) == 3 or not delta_hat_12.shape[1] == 4 or not delta_hat_12.shape[2] == 2:
raise ValueError(f"Invalid input shape of delta_hat_12, we expect Bx4x2. Got: {delta_hat_12.shape}")
if not delta_hat_12.size(0) == patch_1.size(0):
raise ValueError(f'Expected delta_hat_12 batch_size ({delta_hat_12.size(0)}) to match patch_1 batch size '
f'({patch_1.size(0)}).')
if not isinstance(delta_hat_21, torch.Tensor):
raise TypeError(f"delta_hat_21 type is not a torch.Tensor. Got {type(delta_hat_21)}")
if not len(delta_hat_21.shape) == 3 or not delta_hat_21.shape[1] == 4 or not delta_hat_21.shape[2] == 2:
raise ValueError(f"Invalid input shape of delta_hat_21, we expect Bx4x2. Got: {delta_hat_21.shape}")
if not delta_hat_21.size(0) == patch_1.size(0):
raise ValueError(f'Expected delta_hat_21 batch_size ({delta_hat_21.size(0)}) to match patch_1 batch size '
f'({patch_1.size(0)}).')
if not isinstance(loss_network, nn.Module):
raise TypeError(f"loss_network type is not a str. Got {type(loss_network)}")
# Compute features of both patches
patch_1_f = loss_network(patch_1)
patch_2_f = loss_network(patch_2)
# Warp patch 1 with delta hat_12
patch_1_prime, h1 = _warp(patch_1, delta_hat=delta_hat_12)
patch_1_prime_f = loss_network(patch_1_prime)
# Warp patch 2 with delta hat_21
patch_2_prime, h2 = _warp(patch_2, delta_hat=delta_hat_21)
patch_2_prime_f = loss_network(patch_2_prime)
# Create and warp masks
patch_1_m = torch.ones_like(patch_1)
patch_2_m = torch.ones_like(patch_2)
patch_1_m_prime, _ = _warp(patch_1_m, delta_hat=delta_hat_12)
patch_2_m_prime, _ = _warp(patch_2_m, delta_hat=delta_hat_21)
# Mask size mismatch downsampling
_, _, f_h, _ = patch_1_prime_f.shape
downsample_factor = patch_1_m.shape[-1] // f_h
downsample_layer = torch.nn.AvgPool2d(kernel_size=downsample_factor, stride=downsample_factor, padding=0)
patch_1_m = torch.squeeze(downsample_layer(patch_1_m), dim=1)
patch_2_m = torch.squeeze(downsample_layer(patch_2_m), dim=1)
patch_1_m_prime = torch.squeeze(downsample_layer(patch_1_m_prime), dim=1)
patch_2_m_prime = torch.squeeze(downsample_layer(patch_2_m_prime), dim=1)
# Triplet Margin Loss
l1 = torch.sum(torch.abs(patch_1_prime_f - patch_2_f), dim=1)
l2 = torch.sum(torch.abs(patch_1_f - patch_2_prime_f), dim=1)
l3 = torch.sum(torch.abs(patch_1_f - patch_2_f), dim=1)
ln1_nom = torch.sum(torch.sum(patch_1_m_prime * patch_2_m * (l1 - l3), dim=-1), dim=-1)
ln1_den = torch.sum(torch.sum(patch_1_m_prime * patch_2_m, dim=-1), dim=-1)
ln1_den = torch.max(ln1_den, torch.ones_like(ln1_den))
ln2_nom = torch.sum(torch.sum(patch_1_m * patch_2_m_prime * (l2 - l3), dim=-1), dim=-1)
ln2_den = torch.sum(torch.sum(patch_1_m * patch_2_m_prime, dim=-1), dim=-1)
ln2_den = torch.max(ln2_den, torch.ones_like(ln2_den))
ln1 = torch.sum(ln1_nom / ln1_den)
ln2 = torch.sum(ln2_nom / ln2_den)
# Regularization
batch_size = patch_1.size(0)
eye = torch.eye(3, dtype=h1.dtype, device=h1.device).unsqueeze(dim=0).repeat(batch_size, 1, 1)
ln3 = torch.sum((torch.matmul(h1, h2) - eye) ** 2) * triplet_mu
loss = ln1 + ln2 + ln3
return loss
|
11,993 |
def barbs(u_cube, v_cube, *args, **kwargs):
"""
Draws a barb plot from two vector component cubes.
Args:
* u_cube, v_cube : (:class:`~iris.cube.Cube`)
u and v vector components. Must have same shape and units of knot.
If the cubes have geographic coordinates, the values are treated as
true distance differentials, e.g. windspeeds, and *not* map coordinate
vectors. The components are aligned with the North and East of the
cube coordinate system.
.. Note:
At present, if u_cube and v_cube have geographic coordinates, then they
must be in a lat-lon coordinate system, though it may be a rotated one.
To transform wind values between coordinate systems, use
:func:`iris.analysis.cartography.rotate_vectors`.
To transform coordinate grid points, you will need to create
2-dimensional arrays of x and y values. These can be transformed with
:meth:`cartopy.crs.CRS.transform_points`.
Kwargs:
* coords: (list of :class:`~iris.coords.Coord` or string)
Coordinates or coordinate names. Use the given coordinates as the axes
for the plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.barbs` for details of other valid
keyword arguments.
"""
#
# TODO: check u + v cubes for compatibility.
#
kwargs["_v_data"] = v_cube.data
return _draw_2d_from_points(
"barbs", _vector_component_args, u_cube, *args, **kwargs
)
|
def barbs(u_cube, v_cube, *args, **kwargs):
"""
Draws a barb plot from two vector component cubes.
Args:
* u_cube, v_cube : (:class:`~iris.cube.Cube`)
u and v vector components. Must have same shape and units of knot.
If the cubes have geographic coordinates, the values are treated as
true distance differentials, e.g. windspeeds, and *not* map coordinate
vectors. The components are aligned with the North and East of the
cube coordinate system.
.. Note:
At present, if u_cube and v_cube have geographic coordinates, then they
must be in a lat-lon coordinate system, though it may be a rotated one.
To transform wind values between coordinate systems, use
:func:`iris.analysis.cartography.rotate_vectors`.
To transform coordinate grid points, you will need to create
2-dimensional arrays of x and y values. These can be transformed with
:meth:`cartopy.crs.CRS.transform_points`.
Kwargs:
u and v vector components. Must have same shape and units.
Coordinates or coordinate names. Use the given coordinates as the axes
for the plot. The order of the given coordinates indicates which axis
to use for each, where the first element is the horizontal
axis of the plot and the second element is the vertical axis
of the plot.
* axes: the :class:`matplotlib.axes.Axes` to use for drawing.
Defaults to the current axes if none provided.
See :func:`matplotlib.pyplot.barbs` for details of other valid
keyword arguments.
"""
#
# TODO: check u + v cubes for compatibility.
#
kwargs["_v_data"] = v_cube.data
return _draw_2d_from_points(
"barbs", _vector_component_args, u_cube, *args, **kwargs
)
|
45,454 |
def test_staticfiles_with_package(test_client_factory):
app = StaticFiles(packages=["tests"])
client = test_client_factory(app)
response = client.get("/example.txt")
assert response.status_code == 200
assert "123" in response.text
assert response.text.endswith("\n")
|
def test_staticfiles_with_package(test_client_factory):
app = StaticFiles(packages=["tests"])
client = test_client_factory(app)
response = client.get("/example.txt")
assert response.status_code == 200
assert response.text == "123\n"
|
46,194 |
def view_multichannel(
data,
*,
axis=-1,
colormap=None,
contrast_limits=None,
interpolation='nearest',
rendering='mip',
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending='additive',
visible=True,
title='napari',
ndisplay=2,
order=None,
):
"""Create a viewer and add images layers expanding along one axis.
Parameters
----------
data : array
Image data. Can be N dimensional.
axis : int
Axis to expand colors along.
colormap : list, str, vispy.Color.Colormap, tuple, dict
Colormaps to use for luminance images. If a string must be the name
of a supported colormap from vispy or matplotlib. If a tuple the
first value must be a string to assign as a name to a colormap and
the second item must be a Colormap. If a dict the key must be a
string to assign as a name to a colormap and the value must be a
Colormap. If a list then must be same length as the axis that is
being expanded and then each colormap is applied to each image.
contrast_limits : list (2,)
Color limits to be used for determining the colormap bounds for
luminance images. If not passed is calculated as the min and max of
the image. If list of lists then must be same length as the axis
that is being expanded and then each colormap is applied to each
image.
interpolation : str
Interpolation mode used by vispy. Must be one of our supported
modes.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
title : string
The title of the viewer window.
ndisplay : int
Number of displayed dimensions.
tuple of int
Order in which dimensions are displayed where the last two or last
three dimensions correspond to row x column or plane x row x column if
ndisplay is 2 or 3.
Returns
-------
viewer : :class:`napari.Viewer`
The newly-created viewer.
"""
viewer = Viewer(title=title, ndisplay=ndisplay, order=order)
viewer.add_multichannel(
image,
axis=-1,
colormap=cmap,
contrast_limits=clims,
interpolation=interpolation,
rendering=rendering,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
return viewer
|
def view_multichannel(
data,
*,
axis=-1,
colormap=None,
contrast_limits=None,
interpolation='nearest',
rendering='mip',
name=None,
metadata=None,
scale=None,
translate=None,
opacity=1,
blending='additive',
visible=True,
title='napari',
ndisplay=2,
order=None,
):
"""Create a viewer and add images layers expanding along one axis.
Parameters
----------
data : array
Image data. Can be N dimensional.
axis : int
Axis to expand colors along.
colormap : list, str, vispy.Color.Colormap, tuple, dict
Colormaps to use for luminance images. If a string must be the name
of a supported colormap from vispy or matplotlib. If a tuple the
first value must be a string to assign as a name to a colormap and
the second item must be a Colormap. If a dict the key must be a
string to assign as a name to a colormap and the value must be a
Colormap. If a list then must be same length as the axis that is
being expanded and then each colormap is applied to each image.
contrast_limits : list (2,)
Color limits to be used for determining the colormap bounds for
luminance images. If not passed is calculated as the min and max of
the image. If list of lists then must be same length as the axis
that is being expanded and then each colormap is applied to each
image.
interpolation : str
Interpolation mode used by vispy. Must be one of our supported
modes.
name : str
Name of the layer.
metadata : dict
Layer metadata.
scale : tuple of float
Scale factors for the layer.
translate : tuple of float
Translation values for the layer.
opacity : float
Opacity of the layer visual, between 0.0 and 1.0.
blending : str
One of a list of preset blending modes that determines how RGB and
alpha values of the layer visual get mixed. Allowed values are
{'opaque', 'translucent', and 'additive'}.
visible : bool
Whether the layer visual is currently being displayed.
title : string
The title of the viewer window.
ndisplay : {2, 3}
Number of displayed dimensions.
tuple of int
Order in which dimensions are displayed where the last two or last
three dimensions correspond to row x column or plane x row x column if
ndisplay is 2 or 3.
Returns
-------
viewer : :class:`napari.Viewer`
The newly-created viewer.
"""
viewer = Viewer(title=title, ndisplay=ndisplay, order=order)
viewer.add_multichannel(
image,
axis=-1,
colormap=cmap,
contrast_limits=clims,
interpolation=interpolation,
rendering=rendering,
name=name,
metadata=metadata,
scale=scale,
translate=translate,
opacity=opacity,
blending=blending,
visible=visible,
)
return viewer
|
40,024 |
def binary_encoding(v: Variable, upper_bound: int) -> BinaryQuadraticModel:
"""Return a binary quadratic model encoding an integer.
Args:
v: The integer variable label.
upper_bound: The upper bound on the integer value (inclusive).
Returns:
A binary quadratic model. The variables in the BQM will be labelled
with tuples of length two or three. The first value of the tuple will
be the variable label ``v`` provided. The second value will the the
coefficient in the integer encoding. One of the variables will
have a third value in the tuple, ``'msb'``. This is the variable
occupying the position of the most significant bit. Though it may
actually be a smaller number in order to enforce the ``upper_bound``.
Example:
>>> bqm = dimod.generators.binary_encoding('i', 6)
>>> bqm
BinaryQuadraticModel({('i', 1): 1.0, ('i', 2): 2.0, ('i', 3, 'msb'): 3.0}, {}, 0.0, 'BINARY')
We can use a sample to restore the original integer value.
>>> sample = {('i', 1): 1, ('i', 2): 0, ('i', 3, 'msb'): 1}
>>> bqm.energy(sample)
4.0
>>> sum(v[1]*val for v, val in sample.items()) + bqm.offset
4.0
If you wish to encode integers with a lower bound, you can use the
binary quadratic model's :attr:`~BinaryQuadraticModel.offset` attribute.
>>> i = dimod.generators.binary_encoding('i', 10) + 5 # integer in [5, 15]
References:
[1]: Sahar Karimi, Pooya Ronagh (2017), Practical Integer-to-Binary
Mapping for Quantum Annealers. arxiv.org:1706.01945.
"""
# note: the paper above also gives a nice way to handle bounded coefficients
# if we want to do that in the future.
if upper_bound <= 1:
raise ValueError("upper_bound must be a greater than or equal to 1, "
f"received {upper_bound}")
upper_bound = math.floor(upper_bound)
bqm = BinaryQuadraticModel(Vartype.BINARY)
max_pow = math.floor(math.log2(upper_bound))
for exp in range(max_pow):
val = 1 << exp
bqm.set_linear((v, val), val)
else:
val = upper_bound - ((1 << max_pow) - 1)
bqm.set_linear((v, val, 'msb'), val)
return bqm
|
def binary_encoding(v: Variable, upper_bound: int) -> BinaryQuadraticModel:
"""Return a binary quadratic model encoding an integer.
Args:
v: The integer variable label.
upper_bound: The upper bound on the integer value (inclusive).
Returns:
A binary quadratic model. The variables in the BQM will be labelled
with tuples of length two or three. The first value of the tuple will
be the variable label ``v`` provided. The second value will the the
coefficient in the integer encoding. One of the variables will
have a third value in the tuple, ``'msb'``. This is the variable
occupying the position of the most significant bit. Though it may
actually be a smaller number in order to enforce the ``upper_bound``.
Example:
>>> bqm = dimod.generators.binary_encoding('i', 6)
>>> bqm
BinaryQuadraticModel({('i', 1): 1.0, ('i', 2): 2.0, ('i', 3, 'msb'): 3.0}, {}, 0.0, 'BINARY')
We can use a sample to restore the original integer value.
>>> sample = {('i', 1): 1, ('i', 2): 0, ('i', 3, 'msb'): 1}
>>> bqm.energy(sample)
4.0
>>> sum(v[1]*val for v, val in sample.items()) + bqm.offset
4.0
If you wish to encode integers with a lower bound, you can use the
binary quadratic model's :attr:`~BinaryQuadraticModel.offset` attribute.
>>> i = dimod.generators.binary_encoding('i', 10) + 5 # integer in [5, 15]
References:
[1]: Sahar Karimi, Pooya Ronagh (2017), Practical Integer-to-Binary
Mapping for Quantum Annealers. arxiv.org:1706.01945.
"""
# note: the paper above also gives a nice way to handle bounded coefficients
# if we want to do that in the future.
if upper_bound <= 1:
raise ValueError("upper_bound must be greater than or equal to 1, "
f"received {upper_bound}")
upper_bound = math.floor(upper_bound)
bqm = BinaryQuadraticModel(Vartype.BINARY)
max_pow = math.floor(math.log2(upper_bound))
for exp in range(max_pow):
val = 1 << exp
bqm.set_linear((v, val), val)
else:
val = upper_bound - ((1 << max_pow) - 1)
bqm.set_linear((v, val, 'msb'), val)
return bqm
|
21,980 |
def validateSessionWithToken(sydent, sid, clientSecret, token, next_link=None):
"""
Attempt to validate a session, identified by the sid, using
the token from out-of-band. The client secret is given to
prevent attempts to guess the token for a sid.
If the session was sucessfully validated, return a dict
with 'success': True that can be sent to the client,
otherwise return False.
:param sid: The session ID
:type sid: str
:param clientSecret: The client_secret originally set when requesting the session
:type clientSecret: str
:param token: The validation token
:type token: str
:param next_link: The link to redirect the client to after validation, if provided
:type next_link: str|None
:return: The JSON to return to the client on success, or False on fail
:rtype: Dict|bool
:raises IncorrectClientSecretException if the client secret does not match the sid
:raises SessionExpiredException is the provided session has expired
:raises NextLinkValidationException if the next_link provided is different
from one provided in a previous, successful validation attempt
"""
valSessionStore = ThreePidValSessionStore(sydent)
s = valSessionStore.getTokenSessionById(sid)
if not s:
logger.info("Session ID %s not found", (sid,))
return False
if not clientSecret == s.clientSecret:
logger.info("Incorrect client secret", (sid,))
raise IncorrectClientSecretException()
if s.mtime + ValidationSession.THREEPID_SESSION_VALIDATION_TIMEOUT_MS < time_msec():
logger.info("Session expired")
raise SessionExpiredException()
# Check whether this session has already been validated with a next_link provided
# If so, and the next_link this time around is different than previously, then the
# user may be getting phished. Reject the validation attempt.
if next_link and valSessionStore.next_link_differs(sid, token, next_link):
logger.info(
"Validation attempt rejected as provided next_link is different "
"from that in a previous, successful validation attempt with this "
"session id"
)
raise NextLinkValidationException()
# TODO once we can validate the token oob
#if tokenObj.validated and clientSecret == tokenObj.clientSecret:
# return True
if s.token == token:
logger.info("Setting session %s as validated", s.id)
valSessionStore.setValidated(s.id, True)
if next_link:
valSessionStore.set_next_link_for_token(s.id, s.token, next_link)
return {'success': True}
else:
logger.info("Incorrect token submitted")
return False
|
def validateSessionWithToken(sydent, sid, clientSecret, token, next_link=None):
"""
Attempt to validate a session, identified by the sid, using
the token from out-of-band. The client secret is given to
prevent attempts to guess the token for a sid.
If the session was sucessfully validated, return a dict
with 'success': True that can be sent to the client,
otherwise return False.
:param sid: The session ID
:type sid: str
:param clientSecret: The client_secret originally set when requesting the session
:type clientSecret: str
:param token: The validation token
:type token: str
:param next_link: The link to redirect the client to after validation, if provided
:type next_link: str|None
:return: The JSON to return to the client on success, or False on fail
:rtype: Dict|bool
:raises IncorrectClientSecretException if the client secret does not match the sid
:raises SessionExpiredException is the provided session has expired
:raises NextLinkValidationException if the next_link provided is different
from one provided in a previous, successful validation attempt
"""
valSessionStore = ThreePidValSessionStore(sydent)
s = valSessionStore.getTokenSessionById(sid)
if not s:
logger.info("Session ID %s not found", (sid,))
return False
if not clientSecret == s.clientSecret:
logger.info("Incorrect client secret", (sid,))
raise IncorrectClientSecretException()
if s.mtime + ValidationSession.THREEPID_SESSION_VALIDATION_TIMEOUT_MS < time_msec():
logger.info("Session expired")
raise SessionExpiredException()
# Check whether this session has already been validated with a next_link provided
# If so, and the next_link this time around is different than previously, then the
# user may be getting phished. Reject the validation attempt.
if next_link and valSessionStore.next_link_differs(sid, token, next_link):
logger.info(
"Validation attempt rejected as provided 'next_link' is different "
"from that in a previous, successful validation attempt with this "
"session id"
)
raise NextLinkValidationException()
# TODO once we can validate the token oob
#if tokenObj.validated and clientSecret == tokenObj.clientSecret:
# return True
if s.token == token:
logger.info("Setting session %s as validated", s.id)
valSessionStore.setValidated(s.id, True)
if next_link:
valSessionStore.set_next_link_for_token(s.id, s.token, next_link)
return {'success': True}
else:
logger.info("Incorrect token submitted")
return False
|
17,032 |
def state_changed_event_is_same_state(event: Event) -> bool:
"""Check if a state changed event is the same state."""
event_data = event.data
old_state: State | None = event_data.get("old_state")
new_state: State | None = event_data.get("new_state")
return bool(new_state and old_state and new_state.state == old_state.state)
|
def state_changed_event_is_same_state(event: Event) -> bool:
"""Check if a state changed event is the same state."""
event_data = event.data
old_state: State | None = event_data.get("old_state")
new_state: State | None = event_data.get("new_state")
return bool(new_state and old_state and new_state.last_changed != old_state.last_updated)
|
30,343 |
def get_errors_string_from_bad_request(bad_request_results, status_code):
if status_code == 404:
return 'Object does not exist.\n'
# Errors could be retrieved in two forms:
# 1. A dictionary of fields and errors list related to the fields, all under 'data' key in the response json object
# 2. A list, directly within the response object
errors_string = 'Errors from service:\n\n'
# First form
errors_dict = bad_request_results.json().get('data', {}).get('errors', {})
if errors_dict:
for error_num, (key, lst) in enumerate(errors_dict.items(), 1):
curr_error_string = '\n'.join(lst) + '\n\n'
errors_string += '{0}. In \'{1}\':\n{2}'.format(error_num, key, curr_error_string)
return errors_string
# Second form
errors_list = bad_request_results.json().get('errors', [])
if errors_list:
for error_num, error in enumerate(errors_list, 1):
if isinstance(error, str):
errors_string += 'Error #{0}: {1}\n'.format(error_num, error)
else: # error is a list
for i in range(len(error)):
errors_string += 'Error #{0}.{1}: {2}\n'.format(error_num, i, error[i])
return errors_string
return str() # Service did not provide any errors.
|
def get_errors_string_from_bad_request(bad_request_results, status_code):
if status_code == 404:
return 'Object does not exist.\n'
# Errors could be retrieved in two forms:
# 1. A dictionary of fields and errors list related to the fields, all under 'data' key in the response json object
# 2. A list, directly within the response object
errors_string = 'Errors from service:\n\n'
# First form
errors_dict = bad_request_results.json().get('data', {}).get('errors', {})
if errors_dict:
for error_num, (key, lst) in enumerate(errors_dict.items(), 1):
curr_error_string = '\n'.join(lst) + '\n\n'
errors_string += 'Error #{0}. In \'{1}\':\n{2}'.format(error_num, key, curr_error_string)
return errors_string
# Second form
errors_list = bad_request_results.json().get('errors', [])
if errors_list:
for error_num, error in enumerate(errors_list, 1):
if isinstance(error, str):
errors_string += 'Error #{0}: {1}\n'.format(error_num, error)
else: # error is a list
for i in range(len(error)):
errors_string += 'Error #{0}.{1}: {2}\n'.format(error_num, i, error[i])
return errors_string
return str() # Service did not provide any errors.
|
57,575 |
def _build_ssl_context(
disable_ssl_certificate_validation,
ca_certs,
cert_file=None,
key_file=None,
maximum_version=None,
minimum_version=None,
key_password=None,
):
if not hasattr(ssl, "SSLContext"):
raise RuntimeError("httplib2 requires Python 3.2+ for ssl.SSLContext")
context = ssl.SSLContext(DEFAULT_TLS_VERSION)
context.verify_mode = ssl.CERT_NONE if disable_ssl_certificate_validation else ssl.CERT_REQUIRED
# SSLContext.maximum_version and SSLContext.minimum_version are python 3.7+.
# source: https://docs.python.org/3/library/ssl.html#ssl.SSLContext.maximum_version
if maximum_version is not None:
if hasattr(context, "maximum_version"):
if isinstance(maximum_version, str):
context.maximum_version = getattr(ssl.TLSVersion, maximum_version)
else:
context.maximum_version = maximum_version
else:
raise RuntimeError("setting tls_maximum_version requires Python 3.7 and OpenSSL 1.1 or newer")
if minimum_version is not None:
if hasattr(context, "minimum_version"):
if isinstance(minimum_version, str):
context.minimum_version = getattr(ssl.TLSVersion, minimum_version)
else:
context.maximum_version = minimum_version
else:
raise RuntimeError("setting tls_minimum_version requires Python 3.7 and OpenSSL 1.1 or newer")
# check_hostname requires python 3.4+
# we will perform the equivalent in HTTPSConnectionWithTimeout.connect() by calling ssl.match_hostname
# if check_hostname is not supported.
if hasattr(context, "check_hostname"):
context.check_hostname = not disable_ssl_certificate_validation
context.load_verify_locations(ca_certs)
if cert_file:
context.load_cert_chain(cert_file, key_file, key_password)
return context
|
def _build_ssl_context(
disable_ssl_certificate_validation,
ca_certs,
cert_file=None,
key_file=None,
maximum_version=None,
minimum_version=None,
key_password=None,
):
if not hasattr(ssl, "SSLContext"):
raise RuntimeError("httplib2 requires Python 3.2+ for ssl.SSLContext")
context = ssl.SSLContext(DEFAULT_TLS_VERSION)
context.verify_mode = ssl.CERT_NONE if disable_ssl_certificate_validation else ssl.CERT_REQUIRED
# SSLContext.maximum_version and SSLContext.minimum_version are python 3.7+.
# source: https://docs.python.org/3/library/ssl.html#ssl.SSLContext.maximum_version
if maximum_version is not None:
if hasattr(context, "maximum_version"):
if not isinstance(maximum_version, ssl.TLSVersion):
maximum_version = getattr(ssl.TLSVersion, maximum_version)
context.maximum_version = maximum_version
else:
raise RuntimeError("setting tls_maximum_version requires Python 3.7 and OpenSSL 1.1 or newer")
if minimum_version is not None:
if hasattr(context, "minimum_version"):
if isinstance(minimum_version, str):
context.minimum_version = getattr(ssl.TLSVersion, minimum_version)
else:
context.maximum_version = minimum_version
else:
raise RuntimeError("setting tls_minimum_version requires Python 3.7 and OpenSSL 1.1 or newer")
# check_hostname requires python 3.4+
# we will perform the equivalent in HTTPSConnectionWithTimeout.connect() by calling ssl.match_hostname
# if check_hostname is not supported.
if hasattr(context, "check_hostname"):
context.check_hostname = not disable_ssl_certificate_validation
context.load_verify_locations(ca_certs)
if cert_file:
context.load_cert_chain(cert_file, key_file, key_password)
return context
|
32,311 |
def main(): # pragma: no cover
try:
demisto_params = demisto.params() | demisto.args()
last_run = demisto.getLastRun()
last_object_ids = last_run.get('ids')
if 'after' not in last_run:
after = dateparser.parse(demisto_params['after'].strip())
last_run = after.timestamp()
last_run = {LogType[LogType.AUTHENTICATION]: last_run,
LogType[LogType.ADMINISTRATION]: last_run,
LogType[LogType.TELEPHONY]: last_run}
else:
last_run = last_run['after']
request_order = last_run.get('request_order',
[LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY])
demisto_params['params'] = Params(**demisto_params, mintime=last_run)
client = Client(demisto_params)
get_events = GetEvents(client, request_order)
command = demisto.command()
if command == 'test-module':
get_events.aggregated_results()
demisto.results('ok')
elif command == 'duo-get-events' or command == 'fetch-events':
events = get_events.aggregated_results(last_object_ids=last_object_ids)
demisto.setLastRun(get_events.get_last_run(events))
send_events_to_xsiam(events, 'duo', 'duo')
if command == 'duo-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('Duo Logs', events, headerTransform=pascalToSpace),
raw_response=events,
)
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main(): # pragma: no cover
try:
demisto_params = demisto.params() | demisto.args()
last_run = demisto.getLastRun()
last_object_ids = last_run.get('ids')
if 'after' not in last_run:
after = dateparser.parse(demisto_params['after'].strip())
last_run = after.timestamp()
last_run = {LogType[LogType.AUTHENTICATION]: last_run,
LogType[LogType.ADMINISTRATION]: last_run,
LogType[LogType.TELEPHONY]: last_run}
else:
last_run = last_run['after']
request_order = last_run.get('request_order',
[LogType.AUTHENTICATION, LogType.ADMINISTRATION, LogType.TELEPHONY])
demisto_params['params'] = Params(**demisto_params, mintime=last_run)
client = Client(demisto_params)
get_events = GetEvents(client, request_order)
command = demisto.command()
if command == 'test-module':
get_events.aggregated_results()
return_results('ok')
elif command == 'duo-get-events' or command == 'fetch-events':
events = get_events.aggregated_results(last_object_ids=last_object_ids)
demisto.setLastRun(get_events.get_last_run(events))
send_events_to_xsiam(events, 'duo', 'duo')
if command == 'duo-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('Duo Logs', events, headerTransform=pascalToSpace),
raw_response=events,
)
return_results(command_results)
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
12,138 |
def file_obj_to_premis(file_obj):
"""
Converts an File model object to a PREMIS event object via metsrw.
Returns:
metsrw.plugins.premisrw.premis.PREMISObject
"""
premis_digest_algorithm = convert_to_premis_hash_function(file_obj.checksumtype)
premis_data = (
"object",
metsrw.plugins.premisrw.PREMIS_2_2_META,
(
"object_identifier",
("object_identifier_type", "UUID"),
("object_identifier_value", file_obj.uuid),
),
(
"object_characteristics",
("composition_level", "0"),
(
"fixity",
("message_digest_algorithm", premis_digest_algorithm),
("message_digest", file_obj.checksum),
),
("size", str(file_obj.size)),
get_premis_format_data(file_obj.fileid_set.all()),
(
"creating_application",
(
"date_created_by_application",
file_obj.modificationtime.strftime("%Y-%m-%d"),
),
),
),
("original_name", escape(file_obj.originallocation)),
) + get_premis_relationship_data(
file_obj.related_is_source_of, file_obj.related_has_source
)
return metsrw.plugins.premisrw.data_to_premis(premis_data)
|
def file_obj_to_premis(file_obj):
"""
Converts an File model object to a PREMIS event object via metsrw.
Returns:
metsrw.plugins.premisrw.premis.PREMISObject
"""
premis_digest_algorithm = convert_to_premis_hash_function(file_obj.checksumtype)
premis_data = (
"object",
metsrw.plugins.premisrw.PREMIS_3_0_META,
(
"object_identifier",
("object_identifier_type", "UUID"),
("object_identifier_value", file_obj.uuid),
),
(
"object_characteristics",
("composition_level", "0"),
(
"fixity",
("message_digest_algorithm", premis_digest_algorithm),
("message_digest", file_obj.checksum),
),
("size", str(file_obj.size)),
get_premis_format_data(file_obj.fileid_set.all()),
(
"creating_application",
(
"date_created_by_application",
file_obj.modificationtime.strftime("%Y-%m-%d"),
),
),
),
("original_name", escape(file_obj.originallocation)),
) + get_premis_relationship_data(
file_obj.related_is_source_of, file_obj.related_has_source
)
return metsrw.plugins.premisrw.data_to_premis(premis_data)
|
29,030 |
def extract_ssh_keys(credentials: Sequence[Mapping]) -> Sequence[Mapping]:
ssh_keys = []
for credential in credentials:
if credential["_type"] == CredentialComponentType.SSH_KEYPAIR.name:
ssh_keys.append(credential)
return ssh_keys
|
def extract_ssh_keys(credentials: Sequence[Mapping]) -> Sequence[Mapping]:
return [c for c in credentials if c["_type"] == CredentialComponentType.SSH_KEYPAIR.name]
|
17,610 |
def is_version_esync(path):
"""Determines if a Wine build is Esync capable
Params:
path: the path to the Wine version
Returns:
bool: True is the build is Esync capable
"""
try:
version = path.split("/")[-3].lower()
except IndexError:
logger.error("Invalid path '%s'", path)
return False
_version_number, version_prefix, version_suffix = parse_version(version)
esync_compatible_versions = ["esync", "lutris", "tkg", "ge", "proton", "staging"]
for esync_version in esync_compatible_versions:
if esync_version in version_prefix or esync_version in version_suffix:
return True
wine_version = get_wine_version(path)
if wine_version is not None:
wine_version = wine_version.lower()
return "esync" in wine_version or "staging" in wine_version
return False
|
def is_version_esync(path):
"""Determines if a Wine build is Esync capable
Params:
path: the path to the Wine version
Returns:
bool: True is the build is Esync capable
"""
try:
version = path.split("/")[-3].lower()
except IndexError:
logger.error("Invalid path '%s'", path)
return False
_version_number, version_prefix, version_suffix = parse_version(version)
esync_compatible_versions = ["esync", "lutris", "tkg", "ge", "proton", "staging"]
for esync_version in esync_compatible_versions:
if esync_version in version_prefix or esync_version in version_suffix:
return True
wine_version = get_wine_version(path)
if wine_version:
wine_version = wine_version.lower()
return "esync" in wine_version or "staging" in wine_version
return False
|
31,752 |
def main():
args = demisto.args()
client = Client(
server_url=args['url'],
use_ssl=not args.get('insecure', False),
proxy=args.get('proxy', False),
)
try:
rss_raw_data = client.get_feed_data()
parsed_feed_data = parse_feed_data(rss_raw_data)
entries_data = collect_entries_data_from_response(parsed_feed_data)
content = create_widget_content(entries_data)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(str(e))
demisto.results({
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.MARKDOWN,
'Contents': content,
})
|
def main():
args = demisto.args()
client = Client(
server_url=args['url'],
use_ssl=not args.get('insecure', False),
proxy=args.get('proxy', False),
)
try:
rss_raw_data = client.get_feed_data()
parsed_feed_data = parse_feed_data(rss_raw_data)
entries_data = collect_entries_data_from_response(parsed_feed_data)
content = create_widget_content(entries_data)
except Exception as e:
demisto.error(traceback.format_exc())
return_error(str(e))
return_results({
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.MARKDOWN,
'Contents': content,
})
|
35,707 |
def _resnet_fpn_extractor(
backbone: resnet.ResNet,
trainable_layers: int,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> BackboneWithFPN:
# select layers that wont be frozen
if trainable_layers not in range(0, 6):
raise ValueError(f" trainable_layers expected to be in [0,5], got {trainable_layers}")
layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers]
if trainable_layers == 5:
layers_to_train.append("bn1")
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [1, 2, 3, 4]
if min(returned_layers) <= 0 or max(returned_layers) >= 5:
raise ValueError(f" `returned_layers` object should contain integers between [1,4], got {returned_layers} ")
return_layers = {f"layer{k}": str(v) for v, k in enumerate(returned_layers)}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]
out_channels = 256
return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)
|
def _resnet_fpn_extractor(
backbone: resnet.ResNet,
trainable_layers: int,
returned_layers: Optional[List[int]] = None,
extra_blocks: Optional[ExtraFPNBlock] = None,
) -> BackboneWithFPN:
# select layers that wont be frozen
if trainable_layers < 0 or trainable_layers > 5:
raise ValueError(f" trainable_layers expected to be in [0,5], got {trainable_layers}")
layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers]
if trainable_layers == 5:
layers_to_train.append("bn1")
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
if extra_blocks is None:
extra_blocks = LastLevelMaxPool()
if returned_layers is None:
returned_layers = [1, 2, 3, 4]
if min(returned_layers) <= 0 or max(returned_layers) >= 5:
raise ValueError(f" `returned_layers` object should contain integers between [1,4], got {returned_layers} ")
return_layers = {f"layer{k}": str(v) for v, k in enumerate(returned_layers)}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers]
out_channels = 256
return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks)
|
31,321 |
def get_report(task_id):
"""Make API call to ANYRUN to get task report
Parameters
----------
task_id : str
The unique task ID of the analysis whose report to fetch.
Returns
-------
dict
Response JSON from ANYRUN API call.
"""
try:
# according to the any-run documentation, this request should work:
# https://any.run/api-documentation/#api-Analysis-GetReport
url_suffix = 'analysis/' + task_id
response = http_request('GET', url_suffix=url_suffix)
except DemistoException as exc:
if exc.res and exc.res.status_code != 403:
raise
# in case of 403, try a work-around suggested by customer
url_suffix = 'analysis/'
params = {
'task': task_id,
}
response = http_request('GET', url_suffix=url_suffix, params=params)
return response
|
def get_report(task_id):
"""Make API call to ANYRUN to get task report
Parameters
----------
task_id : str
The unique task ID of the analysis whose report to fetch.
Returns
-------
dict
Response JSON from ANYRUN API call.
"""
try:
# according to the any-run documentation, this request should work:
# https://any.run/api-documentation/#api-Analysis-GetReport
url_suffix = f'analysis/{task_id}'
response = http_request('GET', url_suffix=url_suffix)
except DemistoException as exc:
if exc.res and exc.res.status_code != 403:
raise
# in case of 403, try a work-around suggested by customer
url_suffix = 'analysis/'
params = {
'task': task_id,
}
response = http_request('GET', url_suffix=url_suffix, params=params)
return response
|
32,366 |
def main():
SCOs: dict[str, str] = {
"file md5": "[file:hashes.md5 ='{}']",
"file sha1": "[file:hashes.sha1 = '{}']",
"file sha256": "[file:hashes.sha256 = '{}']",
"ssdeep": "[file:hashes.ssdeep = '']",
"ip": "[ipv4-addr:value = '{}']",
"cidr": "[ipv4-addr:value = '{}']",
"ipv6": "[ipv6-addr:value = '{}']",
"ipv6cidr": "[ipv6-addr:value = '{}']",
"url": "[url:value = '{}']",
"email": "[email-message:sender_ref.value = '{}']",
"username": "[user-account:account_login = '{}']",
"domain": "[domain-name:value = '{}']",
"hostname": "[domain-name:value = '{}']",
"registry key": "[windows-registry-key:key = '{}']"
}
SDOs: dict[str, Callable] = {
"malware": Malware,
"attack pattern": AttackPattern,
"campaign": Campaign,
"infrastructure": Infrastructure,
"tool": Tool,
"intrusion set": IntrusionSet,
"report": Report,
"threat actor": ThreatActor,
"cve": Vulnerability,
"course of action": CourseOfAction
}
user_args = demisto.args().get('indicators', 'Unknown')
doubleBackslash = demisto.args().get('doubleBackslash', True)
all_args = {}
if isinstance(user_args, dict):
all_args = json.loads(json.dumps(user_args))
else:
try:
all_args = json.loads(demisto.args().get('indicators', 'Unknown'))
except: # noqa: E722
return_error('indicators argument is invalid json object')
indicators = []
for indicator_fields in all_args:
kwargs: dict[str, Any] = {"allow_custom": True}
demisto_indicator_type = all_args[indicator_fields].get('indicator_type', 'Unknown')
if doubleBackslash:
value = all_args[indicator_fields].get('value', '').replace('\\', r'\\')
else:
value = all_args[indicator_fields].get('value', '')
demisto_score = all_args[indicator_fields].get('score', '').lower()
if demisto_score in ["bad", "malicious"]:
kwargs["score"] = "High"
elif demisto_score == "suspicious":
kwargs["score"] = "Medium"
elif demisto_score in ["good", "benign"]:
kwargs["score"] = "None"
else:
kwargs["score"] = "Not Specified"
kwargs["created"] = dateparser.parse(all_args[indicator_fields].get('timestamp', ''))
kwargs["modified"] = dateparser.parse(all_args[indicator_fields].get('lastSeen', f'{kwargs["created"]}'))
kwargs["id"] = all_args[indicator_fields].get('stixid', '')
kwargs["labels"] = [demisto_indicator_type.lower()]
kwargs["description"] = all_args[indicator_fields].get('description', '')
kwargs = {k: v for k, v in kwargs.items() if v} # Removing keys with empty strings
try:
indicator_type = demisto_indicator_type.lower().replace("-", "")
indicator = Indicator(pattern=SCOs[indicator_type].format(value),
pattern_type='stix',
**kwargs)
indicators.append(indicator)
except KeyError:
try:
indicator_type = demisto_indicator_type.lower()
if indicator_type == 'cve':
kwargs["external_references"] = [ExternalReference(source_name="cve", external_id=value)]
elif indicator_type == "attack pattern":
try:
mitreid = all_args[indicator_fields].get('mitreid', '')
if mitreid:
kwargs["external_references"] = [ExternalReference(source_name="mitre", external_id=mitreid)]
except KeyError:
pass
indicator = SDOs[indicator_type](
name=value,
**kwargs
)
indicators.append(indicator)
except (KeyError, TypeError) as e:
demisto.info(
"Indicator type: {}, with the value: {} is not STIX compatible".format(demisto_indicator_type, value))
demisto.info("Export failure excpetion: {}".format(e))
continue
if len(indicators) > 1:
bundle = Bundle(indicators)
context = {
'StixExportedIndicators(val.pattern && val.pattern == obj.pattern)': json.loads(str(bundle))
}
res = (CommandResults(readable_output="",
outputs=context,
raw_response=str(bundle)))
elif len(indicators) == 1:
context = {
'StixExportedIndicators(val.pattern && val.pattern == obj.pattern)': json.loads(str(indicators[0]))
}
res = (CommandResults(readable_output="",
outputs=context,
raw_response=str(indicators[0])))
else:
context = {
'StixExportedIndicators': {}
}
res = CommandResults(readable_output="",
outputs=context,
raw_response={})
return_results(res)
|
def main():
SCOs: dict[str, str] = {
"file md5": "[file:hashes.md5 ='{}']",
"file sha1": "[file:hashes.sha1 = '{}']",
"file sha256": "[file:hashes.sha256 = '{}']",
"ssdeep": "[file:hashes.ssdeep = '']",
"ip": "[ipv4-addr:value = '{}']",
"cidr": "[ipv4-addr:value = '{}']",
"ipv6": "[ipv6-addr:value = '{}']",
"ipv6cidr": "[ipv6-addr:value = '{}']",
"url": "[url:value = '{}']",
"email": "[email-message:sender_ref.value = '{}']",
"username": "[user-account:account_login = '{}']",
"domain": "[domain-name:value = '{}']",
"hostname": "[domain-name:value = '{}']",
"registry key": "[windows-registry-key:key = '{}']"
}
SDOs: dict[str, Callable] = {
"malware": Malware,
"attack pattern": AttackPattern,
"campaign": Campaign,
"infrastructure": Infrastructure,
"tool": Tool,
"intrusion set": IntrusionSet,
"report": Report,
"threat actor": ThreatActor,
"cve": Vulnerability,
"course of action": CourseOfAction
}
user_args = demisto.args().get('indicators', 'Unknown')
doubleBackslash = demisto.args().get('doubleBackslash', True)
all_args = {}
if isinstance(user_args, dict):
all_args = json.loads(json.dumps(user_args))
else:
try:
all_args = json.loads(demisto.args().get('indicators', 'Unknown'))
except: # noqa: E722
return_error('indicators argument is invalid json object')
indicators = []
for indicator_fields in all_args:
kwargs: dict[str, Any] = {"allow_custom": True}
demisto_indicator_type = all_args[indicator_fields].get('indicator_type', 'Unknown')
if doubleBackslash:
value = all_args[indicator_fields].get('value', '').replace('\\', r'\\')
else:
value = all_args[indicator_fields].get('value', '')
demisto_score = all_args[indicator_fields].get('score', '').lower()
if demisto_score in ["bad", "malicious"]:
kwargs["score"] = "High"
elif demisto_score == "suspicious":
kwargs["score"] = "Medium"
elif demisto_score in ["good", "benign"]:
kwargs["score"] = "None"
else:
kwargs["score"] = "Not Specified"
kwargs["created"] = dateparser.parse(all_args[indicator_fields].get('timestamp', ''))
kwargs["modified"] = dateparser.parse(all_args[indicator_fields].get('lastSeen', f'{kwargs["created"]}'))
kwargs["id"] = all_args[indicator_fields].get('stixid', '')
kwargs["labels"] = [demisto_indicator_type.lower()]
kwargs["description"] = all_args[indicator_fields].get('description', '')
kwargs = {k: v for k, v in kwargs.items() if v} # Removing keys with empty strings
try:
indicator_type = demisto_indicator_type.lower().replace("-", "")
indicator = Indicator(pattern=f"[{SCOs[indicator_type]} = '{value}']",
pattern_type='stix',
**kwargs)
indicators.append(indicator)
except KeyError:
try:
indicator_type = demisto_indicator_type.lower()
if indicator_type == 'cve':
kwargs["external_references"] = [ExternalReference(source_name="cve", external_id=value)]
elif indicator_type == "attack pattern":
try:
mitreid = all_args[indicator_fields].get('mitreid', '')
if mitreid:
kwargs["external_references"] = [ExternalReference(source_name="mitre", external_id=mitreid)]
except KeyError:
pass
indicator = SDOs[indicator_type](
name=value,
**kwargs
)
indicators.append(indicator)
except (KeyError, TypeError) as e:
demisto.info(
"Indicator type: {}, with the value: {} is not STIX compatible".format(demisto_indicator_type, value))
demisto.info("Export failure excpetion: {}".format(e))
continue
if len(indicators) > 1:
bundle = Bundle(indicators)
context = {
'StixExportedIndicators(val.pattern && val.pattern == obj.pattern)': json.loads(str(bundle))
}
res = (CommandResults(readable_output="",
outputs=context,
raw_response=str(bundle)))
elif len(indicators) == 1:
context = {
'StixExportedIndicators(val.pattern && val.pattern == obj.pattern)': json.loads(str(indicators[0]))
}
res = (CommandResults(readable_output="",
outputs=context,
raw_response=str(indicators[0])))
else:
context = {
'StixExportedIndicators': {}
}
res = CommandResults(readable_output="",
outputs=context,
raw_response={})
return_results(res)
|
27,742 |
def apply_warning_filters(
config_filters: Iterable[str], cmdline_filters: Iterable[str]
) -> None:
"""Applies pytest-configured filters to the warnings module"""
# Filters should have this precedence: mark, cmdline options, ini.
# Filters should be applied in the inverse order of precedence.
for arg in config_filters:
warnings.filterwarnings(*parse_warning_filter(arg, escape=False))
for arg in cmdline_filters:
warnings.filterwarnings(*parse_warning_filter(arg, escape=True))
|
def apply_warning_filters(
config_filters: Iterable[str], cmdline_filters: Iterable[str]
) -> None:
"""Applies pytest-configured filters to the warnings module"""
# Filters should have this precedence: mark, cmdline options, config.
# Filters should be applied in the inverse order of precedence.
for arg in config_filters:
warnings.filterwarnings(*parse_warning_filter(arg, escape=False))
for arg in cmdline_filters:
warnings.filterwarnings(*parse_warning_filter(arg, escape=True))
|
5,187 |
def _unpack_to_numpy(x):
"""Internal helper to extract data from e.g. pandas and xarray objects."""
if isinstance(x, np.ndarray):
# If numpy, return directly
return x
if hasattr(x, '__array__'):
xtmp = np.array(x)
if isinstance(xtmp, np.ndarray) and xtmp.ndim > 0:
# if ndim==0 then its a singleton, and should
# stay as such (despite having an __array__)
return xtmp
if hasattr(x, 'to_numpy'):
# Assume that any function to_numpy() do actually return a numpy array
return x.to_numpy()
if hasattr(x, 'values'):
xtmp = x.values
# For example a dict has a 'values' attribute, but it is not a property
# so in this case we do not want to return a function
if isinstance(xtmp, np.ndarray):
return xtmp
return x
|
def _unpack_to_numpy(x):
"""Internal helper to extract data from e.g. pandas and xarray objects."""
if isinstance(x, np.ndarray):
# If numpy, return directly
return x
if hasattr(x, '__array__'):
xtmp = np.asarray(x)
if isinstance(xtmp, np.ndarray) and xtmp.ndim > 0:
# if ndim==0 then its a singleton, and should
# stay as such (despite having an __array__)
return xtmp
if hasattr(x, 'to_numpy'):
# Assume that any function to_numpy() do actually return a numpy array
return x.to_numpy()
if hasattr(x, 'values'):
xtmp = x.values
# For example a dict has a 'values' attribute, but it is not a property
# so in this case we do not want to return a function
if isinstance(xtmp, np.ndarray):
return xtmp
return x
|
46,752 |
def make_filename_template(schema, **kwargs):
"""Create codeblocks containing example filename patterns for a given
datatype.
Parameters
----------
schema : dict
The schema object, which is a dictionary with nested dictionaries and
lists stored within it.
kwargs : dict
Keyword arguments used to filter the schema.
Example kwargs that may be used include: "suffixes", "datatypes",
"extensions".
Returns
-------
codeblock : str
A multiline string containing the filename templates for file types
in the schema, after filtering.
"""
schema = filter_schema(schema, **kwargs)
entities = list(schema["entities"].keys())
paragraph = ""
# Parent folders
paragraph += "{}-<{}>/\n\t[{}-<{}>/]\n".format(
schema["entities"]["subject"]["entity"],
schema["entities"]["subject"]["format"],
schema["entities"]["session"]["entity"],
schema["entities"]["session"]["format"],
)
for datatype in schema["datatypes"].keys():
paragraph += "\t\t{}/\n".format(datatype)
# Unique filename patterns
for group in schema["datatypes"][datatype]:
string = "\t\t\t"
for ent in entities:
ent_format = "{}-<{}>".format(
schema["entities"][ent]["entity"],
schema["entities"][ent].get("format", "label")
)
if ent in group["entities"]:
if group["entities"][ent] == "required":
if len(string.strip()):
string += "_" + ent_format
else:
# Only the first entity doesn't need an underscore
string += ent_format
else:
if len(string.strip()):
string += "[_" + ent_format + "]"
else:
# Only the first entity doesn't need an underscore
string += "[" + ent_format + "]"
# In cases of large numbers of suffixes,
# we use the "suffix" variable and expect a table later in the spec
if len(group["suffixes"]) > 5:
suffix = "_<suffix>"
string += suffix
strings = [string]
else:
strings = [
string + "_" + suffix for suffix in group["suffixes"]
]
# Add extensions
full_strings = []
extensions = group["extensions"]
extensions = [
ext if ext != "*" else ".<extension>" for ext in extensions
]
extensions = utils.combine_extensions(extensions)
if len(extensions) > 5:
# Combine exts when there are many, but keep JSON separate
if ".json" in extensions:
extensions = [".<extension>", ".json"]
else:
extensions = [".<extension>"]
for extension in extensions:
for string in strings:
new_string = string + extension
full_strings.append(new_string)
full_strings = sorted(full_strings)
if full_strings:
paragraph += "\n".join(full_strings) + "\n"
paragraph = paragraph.rstrip()
codeblock = "Template:\n```Text\n" + paragraph + "\n```"
codeblock = codeblock.expandtabs(4)
return codeblock
|
def make_filename_template(schema, **kwargs):
"""Create codeblocks containing example filename patterns for a given
datatype.
Parameters
----------
schema : dict
The schema object, which is a dictionary with nested dictionaries and
lists stored within it.
kwargs : dict
Keyword arguments used to filter the schema.
Example kwargs that may be used include: "suffixes", "datatypes",
"extensions".
Returns
-------
codeblock : str
A multiline string containing the filename templates for file types
in the schema, after filtering.
"""
schema = filter_schema(schema, **kwargs)
entities = list(schema["entities"].keys())
paragraph = ""
# Parent folders
paragraph += "{}-<{}>/\n\t[{}-<{}>/]\n".format(
schema["entities"]["subject"]["entity"],
schema["entities"]["subject"]["format"],
schema["entities"]["session"]["entity"],
schema["entities"]["session"]["format"],
)
for datatype in schema["datatypes"].keys():
paragraph += "\t\t{}/\n".format(datatype)
# Unique filename patterns
for group in schema["datatypes"][datatype]:
string = "\t\t\t"
for ent in entities:
ent_format = "{}-<{}>".format(
schema["entities"][ent]["entity"],
schema["entities"][ent].get("format", "label")
)
if ent in group["entities"]:
if group["entities"][ent] == "required":
if len(string.strip()):
string += "_" + ent_format
else:
# Only the first entity doesn't need an underscore
string += ent_format
else:
if len(string.strip()):
string += "[_" + ent_format + "]"
else:
# Only the first entity doesn't need an underscore
string += "[" + ent_format + "]"
# In cases of large numbers of suffixes,
# we use the "suffix" variable and expect a table later in the spec
if len(group["suffixes"]) > 5:
suffix = "_<suffix>"
string += suffix
strings = [string]
else:
strings = [
string + "_" + suffix for suffix in group["suffixes"]
]
# Add extensions
full_strings = []
extensions = group["extensions"]
extensions = [
ext if ext != "*" else ".<extension>" for ext in extensions
]
extensions = utils.combine_extensions(extensions)
if len(extensions) > 5:
# Combine exts when there are many, but keep JSON separate
if ".json" in extensions:
extensions = [".<extension>", ".json"]
else:
extensions = [".<extension>"]
for extension in extensions:
for string in strings:
new_string = string + extension
full_strings.append(new_string)
full_strings = sorted(full_strings)
if full_strings:
paragraph += "\n".join(full_strings) + "\n"
paragraph = paragraph.rstrip()
codeblock = "Template:\n```Text\n" + paragraph + "\n```"
codeblock = codeblock.expandtabs(4)
return codeblock
|
40,182 |
def test_extra_covariates_transfer():
adata = synthetic_iid()
adata.obs["cont1"] = np.random.normal(size=(adata.shape[0],))
adata.obs["cont2"] = np.random.normal(size=(adata.shape[0],))
adata.obs["cat1"] = np.random.randint(0, 5, size=(adata.shape[0],))
adata.obs["cat2"] = np.random.randint(0, 5, size=(adata.shape[0],))
setup_anndata(
adata,
batch_key="batch",
labels_key="labels",
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
continuous_covariate_keys=["cont1", "cont2"],
categorical_covariate_keys=["cat1", "cat2"],
)
bdata = synthetic_iid()
bdata.obs["cont1"] = np.random.normal(size=(bdata.shape[0],))
bdata.obs["cont2"] = np.random.normal(size=(bdata.shape[0],))
bdata.obs["cat1"] = 0
bdata.obs["cat2"] = 1
transfer_anndata_setup(adata_source=adata, adata_target=bdata)
# give it a new category
del bdata.uns["_scvi"]
bdata.obs["cat1"] = 6
transfer_anndata_setup(
adata_source=adata, adata_target=bdata, extend_categories=True
)
assert bdata.uns["_scvi"]["extra_categoricals"]["mappings"][-1] == 6
|
def test_extra_covariates_transfer():
adata = synthetic_iid()
adata.obs["cont1"] = np.random.normal(size=(adata.shape[0],))
adata.obs["cont2"] = np.random.normal(size=(adata.shape[0],))
adata.obs["cat1"] = np.random.randint(0, 5, size=(adata.shape[0],))
adata.obs["cat2"] = np.random.randint(0, 5, size=(adata.shape[0],))
setup_anndata(
adata,
batch_key="batch",
labels_key="labels",
protein_expression_obsm_key="protein_expression",
protein_names_uns_key="protein_names",
continuous_covariate_keys=["cont1", "cont2"],
categorical_covariate_keys=["cat1", "cat2"],
)
bdata = synthetic_iid()
bdata.obs["cont1"] = np.random.normal(size=(bdata.shape[0],))
bdata.obs["cont2"] = np.random.normal(size=(bdata.shape[0],))
bdata.obs["cat1"] = 0
bdata.obs["cat2"] = 1
transfer_anndata_setup(adata_source=adata, adata_target=bdata)
# give it a new category
del bdata.uns["_scvi"]
bdata.obs["cat1"] = 6
transfer_anndata_setup(
adata_source=adata, adata_target=bdata, extend_categories=True
)
assert bdata.uns["_scvi"]["extra_categoricals"]["mappings"]["cat1"][-1] == 6
|
32,014 |
def search_ip_command(ip, reliability, create_relationships):
indicator_type = 'IP'
ip_list = argToList(ip)
command_results = []
relationships = []
for ip_address in ip_list:
ip_type = 'ipv6_address' if is_ipv6_valid(ip_address) else 'ipv4_address'
raw_res = search_indicator(ip_type, ip_address)
indicator = raw_res.get('indicator')
if indicator:
raw_tags = raw_res.get('tags')
score = calculate_dbot_score(indicator, indicator_type)
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability
)
if create_relationships:
relationships = create_relationships_list(entity_a=ip_address, entity_a_type=indicator_type, tags=raw_tags,
reliability=reliability)
ip = Common.IP(
ip=ip_address,
dbot_score=dbot_score,
malware_family=get_tags_for_tags_and_malware_family_fields(raw_tags, True),
tags=get_tags_for_tags_and_malware_family_fields(raw_tags),
relationships=relationships
)
autofocus_ip_output = parse_indicator_response(indicator, raw_tags, indicator_type)
# create human readable markdown for ip
tags = autofocus_ip_output.get('Tags')
table_name = f'{VENDOR_NAME} {indicator_type} reputation for: {ip_address}'
if tags:
indicators_data = autofocus_ip_output.copy()
del indicators_data['Tags']
md = tableToMarkdown(table_name, indicators_data)
md += tableToMarkdown('Indicator Tags:', tags)
else:
md = tableToMarkdown(table_name, autofocus_ip_output)
else:
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=0,
reliability=reliability
)
ip = Common.IP(
ip=ip_address,
dbot_score=dbot_score
)
md = f'### The IP indicator: {ip_address} was not found in AutoFocus'
autofocus_ip_output = {'IndicatorValue': ip_address}
command_results.append(CommandResults(
outputs_prefix='AutoFocus.IP',
outputs_key_field='IndicatorValue',
outputs=autofocus_ip_output,
readable_output=md,
raw_response=raw_res,
indicator=ip,
relationships=relationships
))
return command_results
|
def search_ip_command(ip, reliability, create_relationships):
indicator_type = 'IP'
ip_list = argToList(ip)
command_results = []
relationships = []
for ip_address in ip_list:
ip_type = 'ipv6_address' if is_ipv6_valid(ip_address) else 'ipv4_address'
raw_res = search_indicator(ip_type, ip_address)
indicator = raw_res.get('indicator')
if indicator:
raw_tags = raw_res.get('tags')
score = calculate_dbot_score(indicator, indicator_type)
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=score,
reliability=reliability
)
if create_relationships:
relationships = create_relationships_list(entity_a=ip_address, entity_a_type=indicator_type, tags=raw_tags,
reliability=reliability)
ip = Common.IP(
ip=ip_address,
dbot_score=dbot_score,
malware_family=get_tags_for_tags_and_malware_family_fields(raw_tags, True),
tags=get_tags_for_tags_and_malware_family_fields(raw_tags),
relationships=relationships
)
autofocus_ip_output = parse_indicator_response(indicator, raw_tags, indicator_type)
# create human readable markdown for ip
tags = autofocus_ip_output.get('Tags')
table_name = f'{VENDOR_NAME} {indicator_type} reputation for: {ip_address}'
if tags:
indicators_data = autofocus_ip_output.copy()
del indicators_data['Tags']
md = tableToMarkdown(table_name, indicators_data)
md += tableToMarkdown('Indicator Tags:', tags)
else:
md = tableToMarkdown(table_name, autofocus_ip_output)
else:
dbot_score = Common.DBotScore(
indicator=ip_address,
indicator_type=DBotScoreType.IP,
integration_name=VENDOR_NAME,
score=0,
reliability=reliability,
)
ip = Common.IP(
ip=ip_address,
dbot_score=dbot_score
)
md = f'### The IP indicator: {ip_address} was not found in AutoFocus'
autofocus_ip_output = {'IndicatorValue': ip_address}
command_results.append(CommandResults(
outputs_prefix='AutoFocus.IP',
outputs_key_field='IndicatorValue',
outputs=autofocus_ip_output,
readable_output=md,
raw_response=raw_res,
indicator=ip,
relationships=relationships
))
return command_results
|
45,786 |
def rgb_to_grayscale(image: torch.Tensor,
rgb_weights: torch.Tensor = torch.tensor([0.299, 0.587, 0.114])) -> torch.Tensor:
r"""Convert a RGB image to grayscale version of image.
The image data is assumed to be in the range of (0, 1).
Args:
image (torch.Tensor): RGB image to be converted to grayscale with shape :math:`(*,3,H,W)`.
rgb_weights (torch.Tensor): Weights that will be applied on each channel (RGB).
The sum of the weights must add up to one.
Returns:
torch.Tensor: grayscale version of the image with shape :math:`(*,1,H,W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> gray = rgb_to_grayscale(input) # 2x1x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
if not isinstance(rgb_weights, torch.Tensor):
raise TypeError("rgb_weights is not a torch.Tensor. Got {}".format(
type(rgb_weights)))
if len(rgb_weights.shape) != 1 or rgb_weights.shape[0] != 3:
raise ValueError("rgb_weights must have a shape of (3). Got {}"
.format(rgb_weights.shape))
if not torch.isclose(torch.sum(rgb_weights), torch.tensor(1.0)):
raise ValueError("The sum of rgb_weights must be 1. Got {}"
.format(torch.sum(rgb_weights)))
r: torch.Tensor = image[..., 0:1, :, :]
g: torch.Tensor = image[..., 1:2, :, :]
b: torch.Tensor = image[..., 2:3, :, :]
gray: torch.Tensor = rgb_weights[0] * r + rgb_weights[1] * g + rgb_weights[2] * b
return gray
|
def rgb_to_grayscale(image: torch.Tensor,
rgb_weights: torch.Tensor = torch.tensor([0.299, 0.587, 0.114])) -> torch.Tensor:
r"""Convert a RGB image to grayscale version of image.
The image data is assumed to be in the range of (0, 1).
Args:
image (torch.Tensor): RGB image to be converted to grayscale with shape :math:`(*,3,H,W)`.
rgb_weights (torch.Tensor): Weights that will be applied on each channel (RGB).
The sum of the weights must add up to one.
Returns:
torch.Tensor: grayscale version of the image with shape :math:`(*,1,H,W)`.
Example:
>>> input = torch.rand(2, 3, 4, 5)
>>> gray = rgb_to_grayscale(input) # 2x1x4x5
"""
if not isinstance(image, torch.Tensor):
raise TypeError("Input type is not a torch.Tensor. Got {}".format(
type(image)))
if len(image.shape) < 3 or image.shape[-3] != 3:
raise ValueError("Input size must have a shape of (*, 3, H, W). Got {}"
.format(image.shape))
if not isinstance(rgb_weights, torch.Tensor):
raise TypeError("rgb_weights is not a torch.Tensor. Got {}".format(
type(rgb_weights)))
if len(rgb_weights.shape) != 1 or rgb_weights.shape[-3] != 3:
raise ValueError("rgb_weights must have a shape of (3). Got {}"
.format(rgb_weights.shape))
if not torch.isclose(torch.sum(rgb_weights), torch.tensor(1.0)):
raise ValueError("The sum of rgb_weights must be 1. Got {}"
.format(torch.sum(rgb_weights)))
r: torch.Tensor = image[..., 0:1, :, :]
g: torch.Tensor = image[..., 1:2, :, :]
b: torch.Tensor = image[..., 2:3, :, :]
gray: torch.Tensor = rgb_weights[0] * r + rgb_weights[1] * g + rgb_weights[2] * b
return gray
|
28,336 |
def dond(
*params: Union[AbstractSweep, Union[ParamMeasT, Sequence[ParamMeasT]]],
write_period: Optional[float] = None,
measurement_name: str = "",
exp: Optional[Experiment] = None,
enter_actions: ActionsT = (),
exit_actions: ActionsT = (),
do_plot: Optional[bool] = None,
show_progress: Optional[bool] = None,
use_threads: Optional[bool] = None,
additional_setpoints: Sequence[ParameterBase] = tuple(),
log_info: Optional[str] = None,
break_condition: Optional[BreakConditionT] = None,
) -> Union[AxesTupleListWithDataSet, MultiAxesTupleListWithDataSet]:
"""
Perform n-dimentional scan from slowest (first) to the fastest (last), to
measure m measurement parameters. The dimensions should be specified
as sweep objects, and after them the parameters to measure should be passed.
Args:
params: Instances of n sweep classes and m measurement parameters,
e.g. if linear sweep is considered:
.. code-block::
LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ...,
LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n),
param_meas_1, param_meas_2, ..., param_meas_m
If multiple DataSets creation is needed, measurement parameters should
be grouped, so one dataset will be created for each group. e.g.:
.. code-block::
LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ...,
LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n),
[param_meas_1, param_meas_2], ..., [param_meas_m]
write_period: The time after which the data is actually written to the
database.
measurement_name: Name of the measurement. This will be passed down to
the dataset produced by the measurement. If not given, a default
value of 'results' is used for the dataset.
exp: The experiment to use for this measurement.
enter_actions: A list of functions taking no arguments that will be
called before the measurements start.
exit_actions: A list of functions taking no arguments that will be
called after the measurements ends.
do_plot: should png and pdf versions of the images be saved and plots
are shown after the run. If None the setting will be read from
``qcodesrc.json``
show_progress: should a progress bar be displayed during the
measurement. If None the setting will be read from ``qcodesrc.json``
use_threads: If True, measurements from each instrument will be done on
separate threads. If you are measuring from several instruments
this may give a significant speedup.
additional_setpoints: A list of setpoint parameters to be registered in
the measurement but not scanned/swept-over.
log_info: Message that is logged during the measurement. If None a default
message is used.
break_condition: Callable that takes no arguments. If returned True,
measurement is interrupted.
Returns:
A tuple of QCoDeS DataSet, Matplotlib axis, Matplotlib colorbar. If
more than one group of measurement parameters is supplied, the output
will be a tuple of tuple(QCoDeS DataSet), tuple(Matplotlib axis),
tuple(Matplotlib colorbar), in which each element of each sub-tuple
belongs to one group, and the order of elements is the order of
the supplied groups.
"""
if do_plot is None:
do_plot = config.dataset.dond_plot
if show_progress is None:
show_progress = config.dataset.dond_show_progress
sweep_instances, params_meas = _parse_dond_arguments(*params)
nested_setpoints = _make_nested_setpoints(sweep_instances)
all_setpoint_params = tuple(sweep.param for sweep in sweep_instances) + tuple(
s for s in additional_setpoints
)
(
measured_all,
grouped_parameters,
measured_parameters,
) = _extract_paramters_by_type_and_group(measurement_name, params_meas)
LOG.info(
"Starting a doNd with scan with\n setpoints: %s,\n measuring: %s",
all_setpoint_params,
measured_all,
)
LOG.debug(
"Measured parameters have been grouped into:\n " "%s",
{name: group["params"] for name, group in grouped_parameters.items()},
)
try:
loop_shape = tuple(sweep.num_points for sweep in sweep_instances) + tuple(
1 for _ in additional_setpoints
)
shapes: Shapes = detect_shape_of_measurement(measured_parameters, loop_shape)
LOG.debug("Detected shapes to be %s", shapes)
except TypeError:
LOG.exception(
f"Could not detect shape of {measured_parameters} "
f"falling back to unknown shape."
)
shapes = None
meas_list = _create_measurements(
all_setpoint_params,
enter_actions,
exit_actions,
exp,
grouped_parameters,
shapes,
write_period,
log_info,
)
post_delays: List[float] = []
params_set: List[ParameterBase] = []
post_actions: List[ActionsT] = []
for sweep in sweep_instances:
post_delays.append(sweep.delay)
params_set.append(sweep.param)
post_actions.append(sweep.post_actions)
datasets = []
plots_axes = []
plots_colorbar = []
if use_threads is None:
use_threads = config.dataset.use_threads
params_meas_caller = (
ThreadPoolParamsCaller(*measured_all)
if use_threads
else SequentialParamsCaller(*measured_all)
)
try:
with _catch_interrupts() as interrupted, ExitStack() as stack, params_meas_caller as call_params_meas:
datasavers = [stack.enter_context(measure.run()) for measure in meas_list]
additional_setpoints_data = process_params_meas(additional_setpoints)
previous_setpoints = np.empty(len(sweep_instances))
for setpoints in tqdm(nested_setpoints, disable=not show_progress):
active_actions, delays = _select_active_actions_delays(
post_actions,
post_delays,
setpoints,
previous_setpoints,
)
previous_setpoints = setpoints
param_set_list = []
param_value_action_delay = zip(
params_set,
setpoints,
active_actions,
delays,
)
for setpoint_param, setpoint, action, delay in param_value_action_delay:
_conditional_parameter_set(setpoint_param, setpoint)
param_set_list.append((setpoint_param, setpoint))
for act in action:
act()
time.sleep(delay)
meas_value_pair = call_params_meas()
for group in grouped_parameters.values():
group["measured_params"] = []
for measured in meas_value_pair:
if measured[0] in group["params"]:
group["measured_params"].append(measured)
for ind, datasaver in enumerate(datasavers):
datasaver.add_result(
*param_set_list,
*grouped_parameters[f"group_{ind}"]["measured_params"],
*additional_setpoints_data,
)
if callable(break_condition):
if break_condition():
raise BreakConditionInterrupt("Break condition was met.")
finally:
for datasaver in datasavers:
ds, plot_axis, plot_color = _handle_plotting(
datasaver.dataset, do_plot, interrupted()
)
datasets.append(ds)
plots_axes.append(plot_axis)
plots_colorbar.append(plot_color)
if len(grouped_parameters) == 1:
return datasets[0], plots_axes[0], plots_colorbar[0]
else:
return tuple(datasets), tuple(plots_axes), tuple(plots_colorbar)
|
def dond(
*params: Union[AbstractSweep, Union[ParamMeasT, Sequence[ParamMeasT]]],
write_period: Optional[float] = None,
measurement_name: str = "",
exp: Optional[Experiment] = None,
enter_actions: ActionsT = (),
exit_actions: ActionsT = (),
do_plot: Optional[bool] = None,
show_progress: Optional[bool] = None,
use_threads: Optional[bool] = None,
additional_setpoints: Sequence[ParameterBase] = tuple(),
log_info: Optional[str] = None,
break_condition: Optional[BreakConditionT] = None,
) -> Union[AxesTupleListWithDataSet, MultiAxesTupleListWithDataSet]:
"""
Perform n-dimentional scan from slowest (first) to the fastest (last), to
measure m measurement parameters. The dimensions should be specified
as sweep objects, and after them the parameters to measure should be passed.
Args:
params: Instances of n sweep classes and m measurement parameters,
e.g. if linear sweep is considered:
.. code-block::
LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ...,
LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n),
param_meas_1, param_meas_2, ..., param_meas_m
If multiple DataSets creation is needed, measurement parameters should
be grouped, so one dataset will be created for each group. e.g.:
.. code-block::
LinSweep(param_set_1, start_1, stop_1, num_points_1, delay_1), ...,
LinSweep(param_set_n, start_n, stop_n, num_points_n, delay_n),
[param_meas_1, param_meas_2], ..., [param_meas_m]
write_period: The time after which the data is actually written to the
database.
measurement_name: Name of the measurement. This will be passed down to
the dataset produced by the measurement. If not given, a default
value of 'results' is used for the dataset.
exp: The experiment to use for this measurement.
enter_actions: A list of functions taking no arguments that will be
called before the measurements start.
exit_actions: A list of functions taking no arguments that will be
called after the measurements ends.
do_plot: should png and pdf versions of the images be saved and plots
are shown after the run. If None the setting will be read from
``qcodesrc.json``
show_progress: should a progress bar be displayed during the
measurement. If None the setting will be read from ``qcodesrc.json``
use_threads: If True, measurements from each instrument will be done on
separate threads. If you are measuring from several instruments
this may give a significant speedup.
additional_setpoints: A list of setpoint parameters to be registered in
the measurement but not scanned/swept-over.
log_info: Message that is logged during the measurement. If None a default
message is used.
break_condition: Callable that takes no arguments. If returned True,
measurement is interrupted.
Returns:
A tuple of QCoDeS DataSet, Matplotlib axis, Matplotlib colorbar. If
more than one group of measurement parameters is supplied, the output
will be a tuple of tuple(QCoDeS DataSet), tuple(Matplotlib axis),
tuple(Matplotlib colorbar), in which each element of each sub-tuple
belongs to one group, and the order of elements is the order of
the supplied groups.
"""
if do_plot is None:
do_plot = config.dataset.dond_plot
if show_progress is None:
show_progress = config.dataset.dond_show_progress
sweep_instances, params_meas = _parse_dond_arguments(*params)
nested_setpoints = _make_nested_setpoints(sweep_instances)
all_setpoint_params = tuple(sweep.param for sweep in sweep_instances) + tuple(
s for s in additional_setpoints
)
(
measured_all,
grouped_parameters,
measured_parameters,
) = _extract_paramters_by_type_and_group(measurement_name, params_meas)
LOG.info(
"Starting a doNd with scan with\n setpoints: %s,\n measuring: %s",
all_setpoint_params,
measured_all,
)
LOG.debug(
"Measured parameters have been grouped into:\n " "%s",
"Measured parameters have been grouped into:\n%s",
)
try:
loop_shape = tuple(sweep.num_points for sweep in sweep_instances) + tuple(
1 for _ in additional_setpoints
)
shapes: Shapes = detect_shape_of_measurement(measured_parameters, loop_shape)
LOG.debug("Detected shapes to be %s", shapes)
except TypeError:
LOG.exception(
f"Could not detect shape of {measured_parameters} "
f"falling back to unknown shape."
)
shapes = None
meas_list = _create_measurements(
all_setpoint_params,
enter_actions,
exit_actions,
exp,
grouped_parameters,
shapes,
write_period,
log_info,
)
post_delays: List[float] = []
params_set: List[ParameterBase] = []
post_actions: List[ActionsT] = []
for sweep in sweep_instances:
post_delays.append(sweep.delay)
params_set.append(sweep.param)
post_actions.append(sweep.post_actions)
datasets = []
plots_axes = []
plots_colorbar = []
if use_threads is None:
use_threads = config.dataset.use_threads
params_meas_caller = (
ThreadPoolParamsCaller(*measured_all)
if use_threads
else SequentialParamsCaller(*measured_all)
)
try:
with _catch_interrupts() as interrupted, ExitStack() as stack, params_meas_caller as call_params_meas:
datasavers = [stack.enter_context(measure.run()) for measure in meas_list]
additional_setpoints_data = process_params_meas(additional_setpoints)
previous_setpoints = np.empty(len(sweep_instances))
for setpoints in tqdm(nested_setpoints, disable=not show_progress):
active_actions, delays = _select_active_actions_delays(
post_actions,
post_delays,
setpoints,
previous_setpoints,
)
previous_setpoints = setpoints
param_set_list = []
param_value_action_delay = zip(
params_set,
setpoints,
active_actions,
delays,
)
for setpoint_param, setpoint, action, delay in param_value_action_delay:
_conditional_parameter_set(setpoint_param, setpoint)
param_set_list.append((setpoint_param, setpoint))
for act in action:
act()
time.sleep(delay)
meas_value_pair = call_params_meas()
for group in grouped_parameters.values():
group["measured_params"] = []
for measured in meas_value_pair:
if measured[0] in group["params"]:
group["measured_params"].append(measured)
for ind, datasaver in enumerate(datasavers):
datasaver.add_result(
*param_set_list,
*grouped_parameters[f"group_{ind}"]["measured_params"],
*additional_setpoints_data,
)
if callable(break_condition):
if break_condition():
raise BreakConditionInterrupt("Break condition was met.")
finally:
for datasaver in datasavers:
ds, plot_axis, plot_color = _handle_plotting(
datasaver.dataset, do_plot, interrupted()
)
datasets.append(ds)
plots_axes.append(plot_axis)
plots_colorbar.append(plot_color)
if len(grouped_parameters) == 1:
return datasets[0], plots_axes[0], plots_colorbar[0]
else:
return tuple(datasets), tuple(plots_axes), tuple(plots_colorbar)
|
36,911 |
def _filter_missing(repo, paths):
repo_tree = RepoTree(repo, stream=True)
for path in paths:
metadata = repo_tree.metadata(path)
if metadata.is_output or metadata.part_of_output:
out = metadata.outs[0]
if out.status()[str(out)] == "not in cache":
yield path
|
def _filter_missing(repo, paths):
repo_tree = RepoTree(repo, stream=True)
for path in paths:
metadata = repo_tree.metadata(path)
if metadata.is_dvc:
out = metadata.outs[0]
if out.status()[str(out)] == "not in cache":
yield path
|
20,528 |
def init_sct(log_level=1, update=False):
"""
Initialize the sct for typical terminal usage
:param log_level: int: 0: warning, 1: info, 2: debug.
:param update: Bool: If True, only update logging log level. Otherwise, set logging + Sentry.
:return:
"""
dict_log_levels = {0: 'WARNING', 1: 'INFO', 2: 'DEBUG'}
def _format_wrap(old_format):
def _format(record):
res = old_format(record)
if record.levelno >= logging.ERROR:
res = "\x1B[31;1m{}\x1B[0m".format(res)
elif record.levelno >= logging.WARNING:
res = "\x1B[33m{}\x1B[0m".format(res)
else:
pass
return res
return _format
# Set logging level for logger and increase level for global config (to avoid logging when calling child functions)
logger.setLevel(getattr(logging, dict_log_levels[log_level]))
logging.root.setLevel(getattr(logging, dict_log_levels[log_level]))
if not update:
# Initialize logging
hdlr = logging.StreamHandler(sys.stdout)
fmt = logging.Formatter()
fmt.format = _format_wrap(fmt.format)
hdlr.setFormatter(fmt)
logging.root.addHandler(hdlr)
# Sentry config
init_error_client()
if os.environ.get("SCT_TIMER", None) is not None:
add_elapsed_time_counter()
# Display SCT version
logger.info('\n--\nSpinal Cord Toolbox ({})\n'.format(__version__))
# Display command (Only if called from CLI: check for .py in first arg)
# Use next(iter()) to not fail on empty list (vs. sys.argv[0])
if '.py' in next(iter(sys.argv), None):
script = os.path.basename(sys.argv[0]).strip(".py")
arguments = ' '.join(sys.argv[1:])
logger.info(f"Command run: {script} {arguments}\n"
f"--\n")
|
def init_sct(log_level=1, update=False):
"""
Initialize the sct for typical terminal usage
:param log_level: int: 0: warning, 1: info, 2: debug.
:param update: Bool: If True, only update logging log level. Otherwise, set logging + Sentry.
:return:
"""
dict_log_levels = {0: 'WARNING', 1: 'INFO', 2: 'DEBUG'}
def _format_wrap(old_format):
def _format(record):
res = old_format(record)
if record.levelno >= logging.ERROR:
res = "\x1B[31;1m{}\x1B[0m".format(res)
elif record.levelno >= logging.WARNING:
res = "\x1B[33m{}\x1B[0m".format(res)
else:
pass
return res
return _format
# Set logging level for logger and increase level for global config (to avoid logging when calling child functions)
logger.setLevel(getattr(logging, dict_log_levels[log_level]))
logging.root.setLevel(getattr(logging, dict_log_levels[log_level]))
if not update:
# Initialize logging
hdlr = logging.StreamHandler(sys.stdout)
fmt = logging.Formatter()
fmt.format = _format_wrap(fmt.format)
hdlr.setFormatter(fmt)
logging.root.addHandler(hdlr)
# Sentry config
init_error_client()
if os.environ.get("SCT_TIMER", None) is not None:
add_elapsed_time_counter()
# Display SCT version
logger.info('\n--\nSpinal Cord Toolbox ({})\n'.format(__version__))
# Display command (Only if called from CLI: check for .py in first arg)
# Use next(iter()) to not fail on empty list (vs. sys.argv[0])
if '.py' in next(iter(sys.argv), None):
script = os.path.basename(sys.argv[0]).strip(".py")
arguments = ' '.join(sys.argv[1:])
logger.info(f"{script} {arguments}\n"
f"--\n")
|
43,188 |
def make_models(num_layers, size, dtype=torch.float32, overlap_communication=True):
# Construct models with same parameters
ref_model = SimpleModel(num_layers, size).to(dtype=dtype, device='cuda')
dist_model = SimpleModel(num_layers, size).to(dtype=dtype, device='cuda')
with torch.no_grad():
for ref_param, dist_param in zip(dist_model.parameters(),
ref_model.parameters()):
dist_param.data.copy_(ref_param.data)
# Initialize reference model with data-parallelism
rank = torch.distributed.get_rank()
ref_model = torch.nn.parallel.DistributedDataParallel(
ref_model,
device_ids=[rank],
output_device=rank,
)
# Construct optimizers with same hyperparameters
optim_args = { 'lr': 1, 'betas': (0.5,0.75), 'eps': 0.1, 'weight_decay': 0.1 }
ref_optim = torch.optim.AdamW(
[
{'params': list(ref_model.parameters())[1::2], 'lr': 0.5},
{'params': list(ref_model.parameters())[0::2]},
],
**optim_args,
)
dist_optim = DistributedFusedAdam(
[
{'params': list(dist_model.parameters())[1::2], 'lr': 0.5},
{'params': list(dist_model.parameters())[0::2]},
],
overlap_grad_sync=overlap_communication,
bucket_cap_mb=71/(4*1024*1024),
**optim_args,
)
return ref_model, ref_optim, dist_model, dist_optim
|
def make_models(num_layers, size, dtype=torch.float32, overlap_communication=True):
# Construct models with same parameters
ref_model = SimpleModel(num_layers, size).to(dtype=dtype, device='cuda')
dist_model = SimpleModel(num_layers, size).to(dtype=dtype, device='cuda')
with torch.no_grad():
for ref_param, dist_param in zip(dist_model.parameters(),
ref_model.parameters()):
dist_param.copy_(ref_param)
# Initialize reference model with data-parallelism
rank = torch.distributed.get_rank()
ref_model = torch.nn.parallel.DistributedDataParallel(
ref_model,
device_ids=[rank],
output_device=rank,
)
# Construct optimizers with same hyperparameters
optim_args = { 'lr': 1, 'betas': (0.5,0.75), 'eps': 0.1, 'weight_decay': 0.1 }
ref_optim = torch.optim.AdamW(
[
{'params': list(ref_model.parameters())[1::2], 'lr': 0.5},
{'params': list(ref_model.parameters())[0::2]},
],
**optim_args,
)
dist_optim = DistributedFusedAdam(
[
{'params': list(dist_model.parameters())[1::2], 'lr': 0.5},
{'params': list(dist_model.parameters())[0::2]},
],
overlap_grad_sync=overlap_communication,
bucket_cap_mb=71/(4*1024*1024),
**optim_args,
)
return ref_model, ref_optim, dist_model, dist_optim
|
9,828 |
def remove_initiator(module, array, ini):
changed = False
if module.check_mode:
module.exit_json(changed=changed)
try:
ini_id = ini['id']
ok = array.remove_initiator(
ini_id)
if ok:
module.log(msg='Initiator {0} removed.'.format(ini_id))
changed = True
else:
module.fail_json(msg='Initiator {0} remove failed.'.format(ini_id))
except Exception:
pass
module.exit_json(changed=changed)
|
def remove_initiator(module, array, ini):
changed = False
if module.check_mode:
module.exit_json(changed=changed)
try:
ini_id = ini['id']
ok = array.remove_initiator(
ini_id)
if ok:
module.log(msg='Initiator {0} removed.'.format(ini_id))
changed = True
else:
module.fail_json(msg='Initiator {0} remove failed.'.format(ini_id))
except Exception:
module.fail_json(msg='Initiator {0} remove failed.'.format(ini_id))
module.exit_json(changed=changed)
|
27,721 |
def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown):
assert not caplog.records
assert not caplog.get_records("call")
logger.info("a_call_log")
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
# This reaches into private API, don't use this type of thing in real tests!
assert set(caplog._item._store[catch_log_records_key].keys()) == {"setup", "call"}
|
def test_caplog_captures_for_all_stages(caplog, logging_during_setup_and_teardown):
assert not caplog.records
assert not caplog.get_records("call")
logger.info("a_call_log")
assert [x.message for x in caplog.get_records("call")] == ["a_call_log"]
assert [x.message for x in caplog.get_records("setup")] == ["a_setup_log"]
# This reaches into private API, don't use this type of thing in real tests!
assert set(caplog._item._store[catch_log_records_key]) == {"setup", "call"}
|
47,902 |
def main():
args = build_argparser()
logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
log = logging.getLogger()
log.info("Creating Inference Engine")
ie = IECore()
if args.device == "CPU" and args.cpu_extension:
ie.add_extension(args.cpu_extension, 'CPU')
log.info("Loading model {}".format(args.model))
model_path = args.model[:-4]
net = ie.read_network(model_path + ".xml", model_path + ".bin")
if args.device == "CPU":
supported_layers = ie.query_network(net, args.device)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) > 0:
raise RuntimeError("Following layers are not supported by the {} plugin:\n {}"
.format(args.device, ', '.join(not_supported_layers)))
if len(net.inputs) != 1:
log.error("Demo supports only models with 1 input layer")
sys.exit(1)
input_blob = next(iter(net.inputs))
input_shape = net.inputs[input_blob].shape
if len(net.outputs) != 1:
log.error("Demo supports only models with 1 output layer")
sys.exit(1)
output_blob = next(iter(net.outputs))
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
log.info("Preparing input")
labels = []
if args.labels:
with open(args.labels, "r") as file:
labels = [l.rstrip() for l in file.readlines()]
batch_size, channels, _, length = input_shape
audio = AudioSource(args.input, channels=channels, framerate=args.framerate)
audio.load()
hop = length - args.overlap if isinstance(args.overlap, int) else int(length * (1.0 - args.overlap))
if hop < 0:
log.error("Wrong value for '-ol/--overlap' argument - overlapping more than clip length")
sys.exit(1)
log.info("Starting inference")
outputs = []
clips = 0
infer_time = []
for idx, chunk in enumerate(audio.chunks(length, hop, num_chunks=batch_size)):
if len(chunk.shape) != len(input_shape):
chunk = np.reshape(chunk, newshape=input_shape)
infer_start_time = datetime.now()
output = exec_net.infer(inputs={input_blob: chunk})
infer_time.append(datetime.now() - infer_start_time)
clips += batch_size
output = output[output_blob]
for batch, data in enumerate(output):
start_time = (idx*batch_size + batch)*hop / audio.framerate
end_time = ((idx*batch_size + batch)*hop + length) / audio.framerate
outputs.append(data)
label = np.argmax(data)
log.info("[{:.2f}:{:.2f}] - {:s}: {:.2f}%".format(start_time, end_time,
labels[label] if labels else "Class {}".format(label),
data[label] * 100))
if clips == 0:
log.error("Audio too short for inference by that model")
sys.exit(1)
total = np.mean(outputs, axis=0)
label = np.argmax(total)
log.info("Total over audio - {:s}: {:.2f}%".format(labels[label] if labels else "Class {}".format(label),
total[label]*100))
logging.info("Average infer time - {:.3f}s per clip".format((np.array(infer_time).sum() / clips).total_seconds()))
|
def main():
args = build_argparser()
logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
log = logging.getLogger()
log.info("Creating Inference Engine")
ie = IECore()
if args.device == "CPU" and args.cpu_extension:
ie.add_extension(args.cpu_extension, 'CPU')
log.info("Loading model {}".format(args.model))
model_path = args.model[:-4]
net = ie.read_network(args.model, model_path + ".bin")
if args.device == "CPU":
supported_layers = ie.query_network(net, args.device)
not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
if len(not_supported_layers) > 0:
raise RuntimeError("Following layers are not supported by the {} plugin:\n {}"
.format(args.device, ', '.join(not_supported_layers)))
if len(net.inputs) != 1:
log.error("Demo supports only models with 1 input layer")
sys.exit(1)
input_blob = next(iter(net.inputs))
input_shape = net.inputs[input_blob].shape
if len(net.outputs) != 1:
log.error("Demo supports only models with 1 output layer")
sys.exit(1)
output_blob = next(iter(net.outputs))
log.info("Loading model to the plugin")
exec_net = ie.load_network(network=net, device_name=args.device)
log.info("Preparing input")
labels = []
if args.labels:
with open(args.labels, "r") as file:
labels = [l.rstrip() for l in file.readlines()]
batch_size, channels, _, length = input_shape
audio = AudioSource(args.input, channels=channels, framerate=args.framerate)
audio.load()
hop = length - args.overlap if isinstance(args.overlap, int) else int(length * (1.0 - args.overlap))
if hop < 0:
log.error("Wrong value for '-ol/--overlap' argument - overlapping more than clip length")
sys.exit(1)
log.info("Starting inference")
outputs = []
clips = 0
infer_time = []
for idx, chunk in enumerate(audio.chunks(length, hop, num_chunks=batch_size)):
if len(chunk.shape) != len(input_shape):
chunk = np.reshape(chunk, newshape=input_shape)
infer_start_time = datetime.now()
output = exec_net.infer(inputs={input_blob: chunk})
infer_time.append(datetime.now() - infer_start_time)
clips += batch_size
output = output[output_blob]
for batch, data in enumerate(output):
start_time = (idx*batch_size + batch)*hop / audio.framerate
end_time = ((idx*batch_size + batch)*hop + length) / audio.framerate
outputs.append(data)
label = np.argmax(data)
log.info("[{:.2f}:{:.2f}] - {:s}: {:.2f}%".format(start_time, end_time,
labels[label] if labels else "Class {}".format(label),
data[label] * 100))
if clips == 0:
log.error("Audio too short for inference by that model")
sys.exit(1)
total = np.mean(outputs, axis=0)
label = np.argmax(total)
log.info("Total over audio - {:s}: {:.2f}%".format(labels[label] if labels else "Class {}".format(label),
total[label]*100))
logging.info("Average infer time - {:.3f}s per clip".format((np.array(infer_time).sum() / clips).total_seconds()))
|
4,525 |
def test_ignore(tmpdir, capsys):
"""Test ignoring of files and directories.
Test the following file hierarchy:
tmpdir
βββ good.txt
βββ bad.txt
βββ ignoredir
βΒ Β βββ subdir
βΒ Β βββ bad.txt
βββ bad.js
"""
d = str(tmpdir)
goodtxt = op.join(d, 'good.txt')
with open(goodtxt, 'w') as f:
f.write('this file is okay')
assert cs.main(d) == 0
badtxt = op.join(d, 'bad.txt')
with open(badtxt, 'w') as f:
f.write('abandonned')
assert cs.main(d) == 1
assert cs.main('--skip=bad*', d) == 0
assert cs.main('--skip=bad.txt', d) == 0
subdir = op.join(d, 'ignoredir')
os.mkdir(subdir)
with open(op.join(subdir, 'bad.txt'), 'w') as f:
f.write('abandonned')
assert cs.main(d) == 2
assert cs.main('--skip=bad*', d) == 0
assert cs.main('--skip=*ad*', d) == 0
assert cs.main('--skip=*ignoredir*', d) == 1
assert cs.main('--skip=ignoredir*', d) == 1
assert cs.main('--skip=*gnoredir*', d) == 1
assert cs.main('--skip=ignoredir', d) == 1
assert cs.main('--skip=*ignoredir/bad*', d) == 1 # FIXME
assert cs.main('--skip=ignoredir/', d) == 2 # FIXME
assert cs.main('--skip=*ignoredir/subdir*', d) == 2 # FIXME
assert cs.main('--skip=*gnoredir/subdi*', d) == 2 # FIXME
assert cs.main('--skip=ignoredir/subdir', d) == 2 # FIXME
assert cs.main('--skip=ignoredir/subdir/', d) == 2 # FIXME
assert cs.main('--skip=*ignoredir/subdir/bad*', d) == 2 # FIXME
assert cs.main('--skip=ignoredir/subdir/bad.txt', d) == 2 # FIXME
assert cs.main('--skip=*subdir*', d) == 2 # FIXME
assert cs.main('--skip=*ubdi*', d) == 2 # FIXME
assert cs.main('--skip=subdir', d) == 2 # FIXME
assert cs.main('--skip=*subdir/bad*', d) == 2 # FIXME
assert cs.main('--skip=subdir/bad*', d) == 2 # FIXME
# test ./... from outside "tmpdir"
assert cs.main('--skip=.', d) == 2
assert cs.main('--skip=./bad.txt', d) == 2
assert cs.main('--skip=./ignoredir', d) == 2
assert cs.main('--skip=./ignoredir/subdir/', d) == 2
assert cs.main('--skip=./ignoredir/subdir/bad.txt', d) == 2
assert cs.main(f'--skip={d}', d) == 0
assert cs.main(f'--skip={d}/ignoredir', d) == 1
assert cs.main(f'--skip={d}/ignoredir/subdir', d) == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir/', d) == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir/bad.txt', d) == 2 # FIXME
# test ./... from inside "tmpdir"
cwd = os.getcwd()
os.chdir(tmpdir)
assert cs.main('--skip=.') == 0
assert cs.main('--skip=./bad.txt') == 1
assert cs.main('--skip=./bad.txt', '.') == 1
assert cs.main('--skip=./ignoredir') == 1
assert cs.main('--skip=./ignoredir/subdir/') == 2 # FIXME
assert cs.main('--skip=./ignoredir/subdir/bad.txt') == 2 # FIXME
assert cs.main(f'--skip={d}') == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir') == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir') == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir/') == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir/bad.txt') == 2 # FIXME
os.chdir(cwd)
# test different file extension
badjs = op.join(d, 'bad.js')
copyfile(badtxt, badjs)
assert cs.main('--skip=*.js', goodtxt, badtxt, badjs) == 1
|
def test_ignore(tmpdir, capsys):
"""Test ignoring of files and directories.
Test the following file hierarchy:
tmpdir
βββ good.txt
βββ bad.txt
βββ ignoredir
β βββ bad.txt
βββ bad.js
"""
d = str(tmpdir)
goodtxt = op.join(d, 'good.txt')
with open(goodtxt, 'w') as f:
f.write('this file is okay')
assert cs.main(d) == 0
badtxt = op.join(d, 'bad.txt')
with open(badtxt, 'w') as f:
f.write('abandonned')
assert cs.main(d) == 1
assert cs.main('--skip=bad*', d) == 0
assert cs.main('--skip=bad.txt', d) == 0
subdir = op.join(d, 'ignoredir')
os.mkdir(subdir)
with open(op.join(subdir, 'bad.txt'), 'w') as f:
f.write('abandonned')
assert cs.main(d) == 2
assert cs.main('--skip=bad*', d) == 0
assert cs.main('--skip=*ad*', d) == 0
assert cs.main('--skip=*ignoredir*', d) == 1
assert cs.main('--skip=ignoredir*', d) == 1
assert cs.main('--skip=*gnoredir*', d) == 1
assert cs.main('--skip=ignoredir', d) == 1
assert cs.main('--skip=*ignoredir/bad*', d) == 1 # FIXME
assert cs.main('--skip=ignoredir/', d) == 2 # FIXME
assert cs.main('--skip=*ignoredir/subdir*', d) == 2 # FIXME
assert cs.main('--skip=*gnoredir/subdi*', d) == 2 # FIXME
assert cs.main('--skip=ignoredir/subdir', d) == 2 # FIXME
assert cs.main('--skip=ignoredir/subdir/', d) == 2 # FIXME
assert cs.main('--skip=*ignoredir/subdir/bad*', d) == 2 # FIXME
assert cs.main('--skip=ignoredir/subdir/bad.txt', d) == 2 # FIXME
assert cs.main('--skip=*subdir*', d) == 2 # FIXME
assert cs.main('--skip=*ubdi*', d) == 2 # FIXME
assert cs.main('--skip=subdir', d) == 2 # FIXME
assert cs.main('--skip=*subdir/bad*', d) == 2 # FIXME
assert cs.main('--skip=subdir/bad*', d) == 2 # FIXME
# test ./... from outside "tmpdir"
assert cs.main('--skip=.', d) == 2
assert cs.main('--skip=./bad.txt', d) == 2
assert cs.main('--skip=./ignoredir', d) == 2
assert cs.main('--skip=./ignoredir/subdir/', d) == 2
assert cs.main('--skip=./ignoredir/subdir/bad.txt', d) == 2
assert cs.main(f'--skip={d}', d) == 0
assert cs.main(f'--skip={d}/ignoredir', d) == 1
assert cs.main(f'--skip={d}/ignoredir/subdir', d) == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir/', d) == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir/bad.txt', d) == 2 # FIXME
# test ./... from inside "tmpdir"
cwd = os.getcwd()
os.chdir(tmpdir)
assert cs.main('--skip=.') == 0
assert cs.main('--skip=./bad.txt') == 1
assert cs.main('--skip=./bad.txt', '.') == 1
assert cs.main('--skip=./ignoredir') == 1
assert cs.main('--skip=./ignoredir/subdir/') == 2 # FIXME
assert cs.main('--skip=./ignoredir/subdir/bad.txt') == 2 # FIXME
assert cs.main(f'--skip={d}') == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir') == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir') == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir/') == 2 # FIXME
assert cs.main(f'--skip={d}/ignoredir/subdir/bad.txt') == 2 # FIXME
os.chdir(cwd)
# test different file extension
badjs = op.join(d, 'bad.js')
copyfile(badtxt, badjs)
assert cs.main('--skip=*.js', goodtxt, badtxt, badjs) == 1
|
35,097 |
def verify_meta_schedule_with_tensorrt(
mod, params, data_shape, use_meta_sched: bool = True, use_trt: bool = True, mode: str = "vm"
):
if use_meta_sched:
# With meta_schedule
dev = "nvidia/geforce-rtx-2080"
# Build
if use_trt:
def relay_build_with_tensorrt(
mod: Module,
target: Target,
params: dict,
) -> List[BuilderResult]:
from tvm.relay.op.contrib.tensorrt import partition_for_tensorrt
mod, config = partition_for_tensorrt(mod, params)
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.tensorrt.options": config}
):
return tvm.relay.build_module._build_module_no_factory(
mod, "cuda", "llvm", params
)
builder = LocalBuilder(f_build=relay_build_with_tensorrt)
else:
def relay_build_without_tensorrt(
mod: Module,
target: Target,
params: dict,
) -> List[BuilderResult]:
# @Sung: Weird. Cannot pass keyword arg
return tvm.relay.build_module._build_module_no_factory(mod, "cuda", "llvm", params)
builder = LocalBuilder(f_build=relay_build_without_tensorrt)
builder_input = BuilderInput(mod, Target(dev, host="llvm"), params)
(builder_result,) = builder.build([builder_input])
assert builder_result.error_msg is None
assert builder_result.artifact_path is not None
# Run
evaluator_config = EvaluatorConfig(
number=5,
repeat=2,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner_input = RunnerInput(
builder_result.artifact_path, "cuda", [TensorInfo("float32", data_shape)]
)
def eval_func(rt_mod, device, evaluator_config, repeated_args):
rt_mod = tvm.contrib.graph_executor.GraphModule(rt_mod["default"](device))
eval = rt_mod.module.time_evaluator(
func_name="run",
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
profile_result = eval(*args)
repeated_costs.append(profile_result.results)
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
return costs
runner = LocalRunner(
evaluator_config=evaluator_config,
f_run_evaluator=eval_func,
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result is not None
assert runner_result.run_secs is not None
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
else:
# Without meta_schedule
if use_trt:
mod, config = tensorrt.partition_for_tensorrt(mod)
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.tensorrt.options": config}
):
func = relay.create_executor(
mode, mod=mod, device=tvm.cuda(0), target="cuda"
).evaluate()
else:
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=mod, device=tvm.cuda(0), target="cuda", params=params
).evaluate()
|
def verify_meta_schedule_with_tensorrt(
mod, params, data_shape, use_meta_sched: bool = True, use_trt: bool = True, mode: str = "vm"
):
if use_meta_sched:
# With meta_schedule
dev = "cuda"
# Build
if use_trt:
def relay_build_with_tensorrt(
mod: Module,
target: Target,
params: dict,
) -> List[BuilderResult]:
from tvm.relay.op.contrib.tensorrt import partition_for_tensorrt
mod, config = partition_for_tensorrt(mod, params)
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.tensorrt.options": config}
):
return tvm.relay.build_module._build_module_no_factory(
mod, "cuda", "llvm", params
)
builder = LocalBuilder(f_build=relay_build_with_tensorrt)
else:
def relay_build_without_tensorrt(
mod: Module,
target: Target,
params: dict,
) -> List[BuilderResult]:
# @Sung: Weird. Cannot pass keyword arg
return tvm.relay.build_module._build_module_no_factory(mod, "cuda", "llvm", params)
builder = LocalBuilder(f_build=relay_build_without_tensorrt)
builder_input = BuilderInput(mod, Target(dev, host="llvm"), params)
(builder_result,) = builder.build([builder_input])
assert builder_result.error_msg is None
assert builder_result.artifact_path is not None
# Run
evaluator_config = EvaluatorConfig(
number=5,
repeat=2,
min_repeat_ms=0,
enable_cpu_cache_flush=False,
)
runner_input = RunnerInput(
builder_result.artifact_path, "cuda", [TensorInfo("float32", data_shape)]
)
def eval_func(rt_mod, device, evaluator_config, repeated_args):
rt_mod = tvm.contrib.graph_executor.GraphModule(rt_mod["default"](device))
eval = rt_mod.module.time_evaluator(
func_name="run",
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
f_preproc="cache_flush_cpu_non_first_arg"
if evaluator_config.enable_cpu_cache_flush
else "",
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
profile_result = eval(*args)
repeated_costs.append(profile_result.results)
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
return costs
runner = LocalRunner(
evaluator_config=evaluator_config,
f_run_evaluator=eval_func,
)
# Run the module
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
assert runner_result is not None
assert runner_result.run_secs is not None
assert runner_result.error_msg is None
for result in runner_result.run_secs:
if isinstance(result, FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
else:
# Without meta_schedule
if use_trt:
mod, config = tensorrt.partition_for_tensorrt(mod)
with tvm.transform.PassContext(
opt_level=3, config={"relay.ext.tensorrt.options": config}
):
func = relay.create_executor(
mode, mod=mod, device=tvm.cuda(0), target="cuda"
).evaluate()
else:
with tvm.transform.PassContext(opt_level=3):
func = relay.create_executor(
mode, mod=mod, device=tvm.cuda(0), target="cuda", params=params
).evaluate()
|
17,349 |
def map_blocks(
func: Callable[..., T_DSorDA],
obj: Union[DataArray, Dataset],
args: Sequence[Any] = (),
kwargs: Mapping[str, Any] = None,
) -> T_DSorDA:
"""Apply a function to each chunk of a DataArray or Dataset. This function is
experimental and its signature may change.
Parameters
----------
func: callable
User-provided function that accepts a DataArray or Dataset as its first
parameter. The function will receive a subset of 'obj' (see below),
corresponding to one chunk along each chunked dimension. ``func`` will be
executed as ``func(obj_subset, *args, **kwargs)``.
The function will be first run on mocked-up data, that looks like 'obj' but
has sizes 0, to determine properties of the returned object such as dtype,
variable names, new dimensions and new indexes (if any).
This function must return either a single DataArray or a single Dataset.
This function cannot change size of existing dimensions, or add new chunked
dimensions.
obj: DataArray, Dataset
Passed to the function as its first argument, one dask chunk at a time.
args: Sequence
Passed verbatim to func after unpacking, after the sliced obj. xarray objects,
if any, will not be split by chunks. Passing dask collections is not allowed.
kwargs: Mapping
Passed verbatim to func after unpacking. xarray objects, if any, will not be
split by chunks. Passing dask collections is not allowed.
Returns
-------
A single DataArray or Dataset with dask backend, reassembled from the outputs of the
function.
Notes
-----
This function is designed for when one needs to manipulate a whole xarray object
within each chunk. In the more common case where one can work on numpy arrays, it is
recommended to use apply_ufunc.
If none of the variables in obj is backed by dask, calling this function is
equivalent to calling ``func(obj, *args, **kwargs)``.
See Also
--------
dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks,
xarray.DataArray.map_blocks
"""
def _wrapper(func, obj, to_array, args, kwargs):
if to_array:
obj = dataset_to_dataarray(obj)
result = func(obj, *args, **kwargs)
for name, index in result.indexes.items():
if name in obj.indexes:
if len(index) != len(obj.indexes[name]):
raise ValueError(
"Length of the %r dimension has changed. This is not allowed."
% name
)
return make_dict(result)
if not isinstance(args, Sequence):
raise TypeError("args must be a sequence (for example, a list or tuple).")
if kwargs is None:
kwargs = {}
elif not isinstance(kwargs, Mapping):
raise TypeError("kwargs must be a mapping (for example, a dict)")
for value in list(args) + list(kwargs.values()):
if dask.is_dask_collection(value):
raise TypeError(
"Cannot pass dask collections in args or kwargs yet. Please compute or "
"load values before passing to map_blocks."
)
if not dask.is_dask_collection(obj):
return func(obj, *args, **kwargs)
if isinstance(obj, DataArray):
# only using _to_temp_dataset would break
# func = lambda x: x.to_dataset()
# since that relies on preserving name.
if obj.name is None:
dataset = obj._to_temp_dataset()
else:
dataset = obj.to_dataset()
input_is_array = True
else:
dataset = obj
input_is_array = False
input_chunks = dataset.chunks
template: Union[DataArray, Dataset] = infer_template(func, obj, *args, **kwargs)
if isinstance(template, DataArray):
result_is_array = True
template_name = template.name
template = template._to_temp_dataset()
elif isinstance(template, Dataset):
result_is_array = False
else:
raise TypeError(
f"func output must be DataArray or Dataset; got {type(template)}"
)
template_indexes = set(template.indexes)
dataset_indexes = set(dataset.indexes)
preserved_indexes = template_indexes & dataset_indexes
new_indexes = template_indexes - dataset_indexes
indexes = {dim: dataset.indexes[dim] for dim in preserved_indexes}
indexes.update({k: template.indexes[k] for k in new_indexes})
graph: Dict[Any, Any] = {}
gname = "{}-{}".format(
dask.utils.funcname(func), dask.base.tokenize(dataset, args, kwargs)
)
# map dims to list of chunk indexes
ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()}
# mapping from chunk index to slice bounds
chunk_index_bounds = {
dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items()
}
# iterate over all possible chunk combinations
for v in itertools.product(*ichunk.values()):
chunk_index_dict = dict(zip(dataset.dims, v))
# this will become [[name1, variable1],
# [name2, variable2],
# ...]
# which is passed to dict and then to Dataset
data_vars = []
coords = []
for name, variable in dataset.variables.items():
# make a task that creates tuple of (dims, chunk)
if dask.is_dask_collection(variable.data):
# recursively index into dask_keys nested list to get chunk
chunk = variable.__dask_keys__()
for dim in variable.dims:
chunk = chunk[chunk_index_dict[dim]]
chunk_variable_task = ("{}-{}".format(gname, chunk[0]),) + v
graph[chunk_variable_task] = (
tuple,
[variable.dims, chunk, variable.attrs],
)
else:
# non-dask array with possibly chunked dimensions
# index into variable appropriately
subsetter = {}
for dim in variable.dims:
if dim in chunk_index_dict:
which_chunk = chunk_index_dict[dim]
subsetter[dim] = slice(
chunk_index_bounds[dim][which_chunk],
chunk_index_bounds[dim][which_chunk + 1],
)
subset = variable.isel(subsetter)
chunk_variable_task = (
"{}-{}".format(gname, dask.base.tokenize(subset)),
) + v
graph[chunk_variable_task] = (
tuple,
[subset.dims, subset, subset.attrs],
)
# this task creates dict mapping variable name to above tuple
if name in dataset._coord_names:
coords.append([name, chunk_variable_task])
else:
data_vars.append([name, chunk_variable_task])
from_wrapper = (gname,) + v
graph[from_wrapper] = (
_wrapper,
func,
(Dataset, (dict, data_vars), (dict, coords), dataset.attrs),
input_is_array,
args,
kwargs,
)
# mapping from variable name to dask graph key
var_key_map: Dict[Hashable, str] = {}
for name, variable in template.variables.items():
if name in indexes:
continue
gname_l = f"{gname}-{name}"
var_key_map[name] = gname_l
key: Tuple[Any, ...] = (gname_l,)
for dim in variable.dims:
if dim in chunk_index_dict:
key += (chunk_index_dict[dim],)
else:
# unchunked dimensions in the input have one chunk in the result
key += (0,)
graph[key] = (operator.getitem, from_wrapper, name)
graph = HighLevelGraph.from_collections(gname, graph, dependencies=[dataset])
result = Dataset(coords=indexes, attrs=template.attrs)
for name, gname_l in var_key_map.items():
dims = template[name].dims
var_chunks = []
for dim in dims:
if dim in input_chunks:
var_chunks.append(input_chunks[dim])
elif dim in indexes:
var_chunks.append((len(indexes[dim]),))
data = dask.array.Array(
graph, name=gname_l, chunks=var_chunks, dtype=template[name].dtype
)
result[name] = (dims, data, template[name].attrs)
result = result.set_coords(template._coord_names)
if result_is_array:
da = dataset_to_dataarray(result)
da.name = template_name
return da # type: ignore
return result # type: ignore
|
def map_blocks(
func: Callable[..., T_DSorDA],
obj: Union[DataArray, Dataset],
args: Sequence[Any] = (),
kwargs: Mapping[str, Any] = None,
) -> T_DSorDA:
"""Apply a function to each chunk of a DataArray or Dataset. This function is
experimental and its signature may change.
Parameters
----------
func: callable
User-provided function that accepts a DataArray or Dataset as its first
parameter. The function will receive a subset of 'obj' (see below),
corresponding to one chunk along each chunked dimension. ``func`` will be
executed as ``func(obj_subset, *args, **kwargs)``.
The function will be first run on mocked-up data, that looks like 'obj' but
has sizes 0, to determine properties of the returned object such as dtype,
variable names, new dimensions and new indexes (if any).
This function must return either a single DataArray or a single Dataset.
This function cannot change size of existing dimensions, or add new chunked
dimensions.
obj: DataArray, Dataset
Passed to the function as its first argument, one dask chunk at a time.
args: Sequence
Passed verbatim to func after unpacking, after the sliced obj. xarray objects,
if any, will not be split by chunks. Passing dask collections is not allowed.
kwargs: Mapping
Passed verbatim to func after unpacking. xarray objects, if any, will not be
split by chunks. Passing dask collections is not allowed.
Returns
-------
A single DataArray or Dataset with dask backend, reassembled from the outputs of the
function.
Notes
-----
This function is designed for when one needs to manipulate a whole xarray object
within each chunk. In the more common case where one can work on numpy arrays, it is
recommended to use apply_ufunc.
If none of the variables in obj is backed by dask, calling this function is
equivalent to calling ``func(obj, *args, **kwargs)``.
See Also
--------
dask.array.map_blocks, xarray.apply_ufunc, xarray.Dataset.map_blocks,
xarray.DataArray.map_blocks
"""
def _wrapper(func, obj, to_array, args, kwargs):
if to_array:
obj = dataset_to_dataarray(obj)
result = func(obj, *args, **kwargs)
for name, index in result.indexes.items():
if name in obj.indexes:
if len(index) != len(obj.indexes[name]):
raise ValueError(
"Length of the %r dimension has changed. This is not allowed."
% name
)
return make_dict(result)
if not isinstance(args, Sequence):
raise TypeError("args must be a sequence (for example, a list or tuple).")
if kwargs is None:
kwargs = {}
elif not isinstance(kwargs, Mapping):
raise TypeError("kwargs must be a mapping (for example, a dict)")
for value in list(args) + list(kwargs.values()):
if dask.is_dask_collection(value):
raise TypeError(
"Cannot pass dask collections in args or kwargs yet. Please compute or "
"load values before passing to map_blocks."
)
if not dask.is_dask_collection(obj):
return func(obj, *args, **kwargs)
if isinstance(obj, DataArray):
# only using _to_temp_dataset would break
# func = lambda x: x.to_dataset()
# since that relies on preserving name.
if obj.name is None:
dataset = obj._to_temp_dataset()
else:
dataset = obj.to_dataset()
input_is_array = True
else:
dataset = obj
input_is_array = False
input_chunks = dataset.chunks
template: Union[DataArray, Dataset] = infer_template(func, obj, *args, **kwargs)
if isinstance(template, DataArray):
result_is_array = True
template_name = template.name
template = template._to_temp_dataset()
elif isinstance(template, Dataset):
result_is_array = False
else:
raise TypeError(
f"func output must be DataArray or Dataset; got {type(template)}"
)
template_indexes = set(template.indexes)
dataset_indexes = set(dataset.indexes)
preserved_indexes = template_indexes & dataset_indexes
new_indexes = template_indexes - dataset_indexes
indexes = {dim: dataset.indexes[dim] for dim in preserved_indexes}
indexes.update({k: template.indexes[k] for k in new_indexes})
graph: Dict[Any, Any] = {}
gname = "{}-{}".format(
dask.utils.funcname(func), dask.base.tokenize(dataset, args, kwargs)
)
# map dims to list of chunk indexes
ichunk = {dim: range(len(chunks_v)) for dim, chunks_v in input_chunks.items()}
# mapping from chunk index to slice bounds
chunk_index_bounds = {
dim: np.cumsum((0,) + chunks_v) for dim, chunks_v in input_chunks.items()
}
# iterate over all possible chunk combinations
for v in itertools.product(*ichunk.values()):
chunk_index_dict = dict(zip(dataset.dims, v))
# this will become [[name1, variable1],
# [name2, variable2],
# ...]
# which is passed to dict and then to Dataset
data_vars = []
coords = []
for name, variable in dataset.variables.items():
# make a task that creates tuple of (dims, chunk)
if dask.is_dask_collection(variable.data):
# recursively index into dask_keys nested list to get chunk
chunk = variable.__dask_keys__()
for dim in variable.dims:
chunk = chunk[chunk_index_dict[dim]]
chunk_variable_task = (f"{gname}-{chunk[0]}",) + v
graph[chunk_variable_task] = (
tuple,
[variable.dims, chunk, variable.attrs],
)
else:
# non-dask array with possibly chunked dimensions
# index into variable appropriately
subsetter = {}
for dim in variable.dims:
if dim in chunk_index_dict:
which_chunk = chunk_index_dict[dim]
subsetter[dim] = slice(
chunk_index_bounds[dim][which_chunk],
chunk_index_bounds[dim][which_chunk + 1],
)
subset = variable.isel(subsetter)
chunk_variable_task = (
"{}-{}".format(gname, dask.base.tokenize(subset)),
) + v
graph[chunk_variable_task] = (
tuple,
[subset.dims, subset, subset.attrs],
)
# this task creates dict mapping variable name to above tuple
if name in dataset._coord_names:
coords.append([name, chunk_variable_task])
else:
data_vars.append([name, chunk_variable_task])
from_wrapper = (gname,) + v
graph[from_wrapper] = (
_wrapper,
func,
(Dataset, (dict, data_vars), (dict, coords), dataset.attrs),
input_is_array,
args,
kwargs,
)
# mapping from variable name to dask graph key
var_key_map: Dict[Hashable, str] = {}
for name, variable in template.variables.items():
if name in indexes:
continue
gname_l = f"{gname}-{name}"
var_key_map[name] = gname_l
key: Tuple[Any, ...] = (gname_l,)
for dim in variable.dims:
if dim in chunk_index_dict:
key += (chunk_index_dict[dim],)
else:
# unchunked dimensions in the input have one chunk in the result
key += (0,)
graph[key] = (operator.getitem, from_wrapper, name)
graph = HighLevelGraph.from_collections(gname, graph, dependencies=[dataset])
result = Dataset(coords=indexes, attrs=template.attrs)
for name, gname_l in var_key_map.items():
dims = template[name].dims
var_chunks = []
for dim in dims:
if dim in input_chunks:
var_chunks.append(input_chunks[dim])
elif dim in indexes:
var_chunks.append((len(indexes[dim]),))
data = dask.array.Array(
graph, name=gname_l, chunks=var_chunks, dtype=template[name].dtype
)
result[name] = (dims, data, template[name].attrs)
result = result.set_coords(template._coord_names)
if result_is_array:
da = dataset_to_dataarray(result)
da.name = template_name
return da # type: ignore
return result # type: ignore
|
37,069 |
def assemble_schedules(schedules, qobj_id, qobj_header, run_config):
"""Assembles a list of schedules into a qobj which can be run on the backend.
Args:
schedules (list[Schedule]): schedules to assemble
qobj_id (int): identifier for the generated qobj
qobj_header (QobjHeader): header to pass to the results
run_config (RunConfig): configuration of the runtime environment
Returns:
PulseQobj: the Qobj to be run on the backends
Raises:
QiskitError: when invalid schedules or configs are provided
"""
if hasattr(run_config, 'instruction_converter'):
instruction_converter = run_config.instruction_converter
else:
instruction_converter = InstructionToQobjConverter
qobj_config = run_config.to_dict()
qubit_lo_range = qobj_config.pop('qubit_lo_range')
meas_lo_range = qobj_config.pop('meas_lo_range')
meas_map = qobj_config.pop('meas_map', None)
memory_slots = qobj_config.pop('memory_slots', None)
max_memory_slot = 0
instruction_converter = instruction_converter(PulseQobjInstruction, **qobj_config)
lo_converter = LoConfigConverter(PulseQobjExperimentConfig, qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range, **qobj_config)
# Pack everything into the Qobj
qobj_schedules = []
user_pulselib = {}
for idx, schedule in enumerate(schedules):
# instructions
qobj_instructions = []
# Instructions are returned as tuple of shifted time and instruction
for shift, instruction in schedule.instructions:
# TODO: support conditional gate
if isinstance(instruction, PulseInstruction):
name = instruction.command.name
if name in user_pulselib and instruction.command != user_pulselib[name]:
name = "{0}-{1:x}".format(name, hash(instruction.command.samples.tostring()))
instruction = PulseInstruction(
command=SamplePulse(name=name, samples=instruction.command.samples),
name=instruction.name,
channel=instruction.timeslots.channels[0])
# add samples to pulse library
user_pulselib[name] = instruction.command
if isinstance(instruction, AcquireInstruction):
if meas_map:
# verify all acquires satisfy meas_map
_validate_meas_map(instruction, meas_map)
max_memory_slot = max(max_memory_slot,
*[slot.index for slot in instruction.mem_slots])
qobj_instructions.append(instruction_converter(shift, instruction))
# experiment header
qobj_experiment_header = QobjExperimentHeader(
name=schedule.name or 'Experiment-%d' % idx
)
qobj_schedules.append({
'header': qobj_experiment_header,
'instructions': qobj_instructions
})
# set number of memoryslots
qobj_config['memory_slots'] = memory_slots or max_memory_slot
# setup pulse_library
qobj_config['pulse_library'] = [PulseLibraryItem(name=pulse.name, samples=pulse.samples)
for pulse in user_pulselib.values()]
# create qobj experiment field
experiments = []
schedule_los = qobj_config.pop('schedule_los', [])
if len(schedule_los) == 1:
lo_dict = schedule_los[0]
# update global config
q_los = lo_converter.get_qubit_los(lo_dict)
if q_los:
qobj_config['qubit_lo_freq'] = q_los
m_los = lo_converter.get_meas_los(lo_dict)
if m_los:
qobj_config['meas_lo_freq'] = m_los
if schedule_los:
# multiple frequency setups
if len(qobj_schedules) == 1:
# frequency sweep
for lo_dict in schedule_los:
experiments.append(PulseQobjExperiment(
instructions=qobj_schedules[0]['instructions'],
header=qobj_schedules[0]['header'],
config=lo_converter(lo_dict)
))
elif len(qobj_schedules) == len(schedule_los):
# n:n setup
for lo_dict, schedule in zip(schedule_los, qobj_schedules):
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
header=schedule['header'],
config=lo_converter(lo_dict)
))
else:
raise QiskitError('Invalid LO setting is specified. '
'The LO should be configured for each schedule, or '
'single setup for all schedules (unique), or '
'multiple setups for a single schedule (frequency sweep),'
'or no LO configured at all.')
else:
# unique frequency setup
for schedule in qobj_schedules:
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
header=schedule['header'],
))
qobj_config = PulseQobjConfig(**qobj_config)
return PulseQobj(qobj_id=qobj_id,
config=qobj_config,
experiments=experiments,
header=qobj_header)
|
def assemble_schedules(schedules, qobj_id, qobj_header, run_config):
"""Assembles a list of schedules into a qobj which can be run on the backend.
Args:
schedules (list[Schedule]): schedules to assemble
qobj_id (int): identifier for the generated qobj
qobj_header (QobjHeader): header to pass to the results
run_config (RunConfig): configuration of the runtime environment
Returns:
PulseQobj: the Qobj to be run on the backends
Raises:
QiskitError: when invalid schedules or configs are provided
"""
if hasattr(run_config, 'instruction_converter'):
instruction_converter = run_config.instruction_converter
else:
instruction_converter = InstructionToQobjConverter
qobj_config = run_config.to_dict()
qubit_lo_range = qobj_config.pop('qubit_lo_range')
meas_lo_range = qobj_config.pop('meas_lo_range')
meas_map = qobj_config.pop('meas_map', None)
memory_slots = qobj_config.pop('memory_slots', None)
max_memory_slot = 0
instruction_converter = instruction_converter(PulseQobjInstruction, **qobj_config)
lo_converter = LoConfigConverter(PulseQobjExperimentConfig, qubit_lo_range=qubit_lo_range,
meas_lo_range=meas_lo_range, **qobj_config)
# Pack everything into the Qobj
qobj_schedules = []
user_pulselib = {}
for idx, schedule in enumerate(schedules):
# instructions
qobj_instructions = []
# Instructions are returned as tuple of shifted time and instruction
for shift, instruction in schedule.instructions:
# TODO: support conditional gate
if isinstance(instruction, PulseInstruction):
name = instruction.command.name
if name in user_pulselib and instruction.command != user_pulselib[name]:
name = "{0}-{1:x}".format(name, hash(instruction.command.samples.tostring()))
instruction = PulseInstruction(
command=SamplePulse(name=name, samples=instruction.command.samples),
name=instruction.name,
channel=instruction.timeslots.channels[0])
# add samples to pulse library
user_pulselib[name] = instruction.command
if isinstance(instruction, AcquireInstruction):
if meas_map:
# verify all acquires satisfy meas_map
_validate_meas_map(instruction, meas_map)
max_memory_slot = max(max_memory_slot,
*[slot.index for slot in instruction.mem_slots])
qobj_instructions.append(instruction_converter(shift, instruction))
# experiment header
qobj_experiment_header = QobjExperimentHeader(
name=schedule.name or 'Experiment-%d' % idx
)
qobj_schedules.append({
'header': qobj_experiment_header,
'instructions': qobj_instructions
})
# set number of memoryslots
qobj_config['memory_slots'] = qobj_config.pop('memory_slots', None) or max_memory_slot
# setup pulse_library
qobj_config['pulse_library'] = [PulseLibraryItem(name=pulse.name, samples=pulse.samples)
for pulse in user_pulselib.values()]
# create qobj experiment field
experiments = []
schedule_los = qobj_config.pop('schedule_los', [])
if len(schedule_los) == 1:
lo_dict = schedule_los[0]
# update global config
q_los = lo_converter.get_qubit_los(lo_dict)
if q_los:
qobj_config['qubit_lo_freq'] = q_los
m_los = lo_converter.get_meas_los(lo_dict)
if m_los:
qobj_config['meas_lo_freq'] = m_los
if schedule_los:
# multiple frequency setups
if len(qobj_schedules) == 1:
# frequency sweep
for lo_dict in schedule_los:
experiments.append(PulseQobjExperiment(
instructions=qobj_schedules[0]['instructions'],
header=qobj_schedules[0]['header'],
config=lo_converter(lo_dict)
))
elif len(qobj_schedules) == len(schedule_los):
# n:n setup
for lo_dict, schedule in zip(schedule_los, qobj_schedules):
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
header=schedule['header'],
config=lo_converter(lo_dict)
))
else:
raise QiskitError('Invalid LO setting is specified. '
'The LO should be configured for each schedule, or '
'single setup for all schedules (unique), or '
'multiple setups for a single schedule (frequency sweep),'
'or no LO configured at all.')
else:
# unique frequency setup
for schedule in qobj_schedules:
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
header=schedule['header'],
))
qobj_config = PulseQobjConfig(**qobj_config)
return PulseQobj(qobj_id=qobj_id,
config=qobj_config,
experiments=experiments,
header=qobj_header)
|
101 |
def import_job():
# Make HEAD request to get last-modified time
last_modified = find_last_updated()
if not last_modified:
print(f'HEAD request to {FEED_URL} failed. Not attempting GET request.')
return
print(f'Last-Modified date: {last_modified}')
updated_on = get_last_updated_time()
if last_modified == updated_on:
print(f'No new updates since {updated_on}. Processing completed.')
return
print(f'Last import job: {updated_on}')
# Get feed:
d = get_feed()
# Create datetime using updated_on:
modified_since = convert_date_string(updated_on)
# Map feed entries to list of import objects:
print(f'Importing all entries that have been updated since {modified_since}.')
mapped_entries = map_entries(d.entries, modified_since)
print(f'{len(mapped_entries)} import objects created.')
# Import all data:
create_batch(mapped_entries)
print(f'{len(mapped_entries)} entries added to the batch import job.')
# Store timestamp for header
with open(LAST_UPDATED_TIME, 'w') as f:
f.write(last_modified)
print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}')
|
def import_job() -> None:
# Make HEAD request to get last-modified time
last_modified = find_last_updated()
if not last_modified:
print(f'HEAD request to {FEED_URL} failed. Not attempting GET request.')
return
print(f'Last-Modified date: {last_modified}')
updated_on = get_last_updated_time()
if last_modified == updated_on:
print(f'No new updates since {updated_on}. Processing completed.')
return
print(f'Last import job: {updated_on}')
# Get feed:
d = get_feed()
# Create datetime using updated_on:
modified_since = convert_date_string(updated_on)
# Map feed entries to list of import objects:
print(f'Importing all entries that have been updated since {modified_since}.')
mapped_entries = map_entries(d.entries, modified_since)
print(f'{len(mapped_entries)} import objects created.')
# Import all data:
create_batch(mapped_entries)
print(f'{len(mapped_entries)} entries added to the batch import job.')
# Store timestamp for header
with open(LAST_UPDATED_TIME, 'w') as f:
f.write(last_modified)
print(f'Last updated timestamp written to: {LAST_UPDATED_TIME}')
|
49,657 |
def setup_resource_paths(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up relative resource paths."""
pathto = context.get('pathto')
# favicon_url
favicon = context.get('favicon_url')
if favicon and not isurl(favicon):
context['favicon_url'] = pathto('_static/' + favicon, resource=True)
# logo_url
logo = context.get('logo_url')
if logo and not isurl(logo):
context['logo_url'] = pathto('_static/' + logo, resource=True)
|
def setup_resource_paths(app: Sphinx, pagename: str, templatename: str,
context: Dict, doctree: Node) -> None:
"""Set up relative resource paths."""
pathto = context.get('pathto')
# favicon_url
favicon_url = context.get('favicon_url')
if favicon_url and not isurl(favicon_url):
context['favicon_url'] = pathto('_static/' + favicon_url, resource=True)
# logo_url
logo = context.get('logo_url')
if logo and not isurl(logo):
context['logo_url'] = pathto('_static/' + logo, resource=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.