id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
3,236 | def selector_func(method, callargs, switchover_timestamp):
spec = method_specifications.get(method)
if spec is None:
return "redis" # default backend (possibly invoke base directly instead?)
if switchover_timestamp and time.time() < switchover_timestamp:
return "redis" # snuba does not yet have all data
operation_type, model_extractor = spec
backends = {model_backends[model][operation_type] for model in model_extractor(callargs)}
assert len(backends) == 1, "request was not directed to a single backend"
return backends.pop()
| def selector_func(method, callargs, switchover_timestamp):
spec = method_specifications.get(method)
if spec is None:
return "redis" # default backend (possibly invoke base directly instead?)
if switchover_timestamp is not None and time.time() < switchover_timestamp:
return "redis" # snuba does not yet have all data
operation_type, model_extractor = spec
backends = {model_backends[model][operation_type] for model in model_extractor(callargs)}
assert len(backends) == 1, "request was not directed to a single backend"
return backends.pop()
|
44,503 | def build_importer_task_spec(
dependent_task: pipeline_spec_pb2.PipelineTaskSpec,
input_name: str,
input_type_schema: str,
) -> pipeline_spec_pb2.PipelineTaskSpec:
"""Build an importer task spec.
Args:
dependent_task: The task requires importer node.
input_name: The name of the input artifact needs to be imported.
input_type_schema: The type of the input artifact.
Returns:
An importer node task spec.
"""
dependent_task_name = dependent_task.task_info.name
task_spec = pipeline_spec_pb2.PipelineTaskSpec()
task_spec.task_info.name = '{}_{}_importer'.format(dependent_task_name,
input_name)
task_spec.outputs.artifacts[OUTPUT_KEY].artifact_type.instance_schema = (
input_type_schema)
task_spec.executor_label = task_spec.task_info.name
return task_spec
| def build_importer_task_spec(
dependent_task: pipeline_spec_pb2.PipelineTaskSpec,
input_name: str,
input_type_schema: str,
) -> pipeline_spec_pb2.PipelineTaskSpec:
"""Builds an importer task spec.
Args:
dependent_task: The task requires importer node.
input_name: The name of the input artifact needs to be imported.
input_type_schema: The type of the input artifact.
Returns:
An importer node task spec.
"""
dependent_task_name = dependent_task.task_info.name
task_spec = pipeline_spec_pb2.PipelineTaskSpec()
task_spec.task_info.name = '{}_{}_importer'.format(dependent_task_name,
input_name)
task_spec.outputs.artifacts[OUTPUT_KEY].artifact_type.instance_schema = (
input_type_schema)
task_spec.executor_label = task_spec.task_info.name
return task_spec
|
3,906 | def is_threshold_sequence(degree_sequence):
"""Returns True if the sequence is a threshold degree seqeunce.
Uses the property that a threshold graph must be constructed by
adding either dominating or isolated nodes. Thus, it can be
deconstructed iteratively by removing a node of degree zero or a
node that connects to the remaining nodes. If this deconstruction
failes then the sequence is not a threshold sequence.
Parameters
----------
degree_sequence
The degree sequence
Returns
-------
False if not a threshold degree sequence, True otherwise
"""
ds = degree_sequence[:] # get a copy so we don't destroy original
ds.sort()
while ds:
if ds[0] == 0: # if isolated node
ds.pop(0) # remove it
continue
if ds[-1] != len(ds) - 1: # is the largest degree node dominating?
return False # no, not a threshold degree sequence
ds.pop() # yes, largest is the dominating node
ds = [d - 1 for d in ds] # remove it and decrement all degrees
return True
| def is_threshold_sequence(degree_sequence):
"""Returns True if the sequence is a threshold degree sequence.
Uses the property that a threshold graph must be constructed by
adding either dominating or isolated nodes. Thus, it can be
deconstructed iteratively by removing a node of degree zero or a
node that connects to the remaining nodes. If this deconstruction
failes then the sequence is not a threshold sequence.
Parameters
----------
degree_sequence
The degree sequence
Returns
-------
False if not a threshold degree sequence, True otherwise
"""
ds = degree_sequence[:] # get a copy so we don't destroy original
ds.sort()
while ds:
if ds[0] == 0: # if isolated node
ds.pop(0) # remove it
continue
if ds[-1] != len(ds) - 1: # is the largest degree node dominating?
return False # no, not a threshold degree sequence
ds.pop() # yes, largest is the dominating node
ds = [d - 1 for d in ds] # remove it and decrement all degrees
return True
|
1,618 | def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator : estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe : boolean, optional
If safe is false, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
elif isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' "
"it is a class rather than instance of a class "
% (repr(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError('Cannot clone object %s, as the constructor '
'either does not set or modifies parameter %s' %
(estimator, name))
return new_object
| def clone(estimator, safe=True):
"""Constructs a new estimator with the same parameters.
Clone does a deep copy of the model in an estimator
without actually copying attached data. It yields a new estimator
with the same parameters that has not been fit on any data.
Parameters
----------
estimator : estimator object, or list, tuple or set of objects
The estimator or group of estimators to be cloned
safe : boolean, optional
If safe is false, clone will fall back to a deep copy on objects
that are not estimators.
"""
estimator_type = type(estimator)
# XXX: not handling dictionaries
if estimator_type in (list, tuple, set, frozenset):
return estimator_type([clone(e, safe=safe) for e in estimator])
elif not hasattr(estimator, 'get_params'):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' (type %s): "
"it does not seem to be a scikit-learn estimator "
"as it does not implement a 'get_params' methods."
% (repr(estimator), type(estimator)))
elif isinstance(estimator, type):
if not safe:
return copy.deepcopy(estimator)
else:
raise TypeError("Cannot clone object '%s' "
"it is a class rather than an instance."
% (repr(estimator)))
klass = estimator.__class__
new_object_params = estimator.get_params(deep=False)
for name, param in new_object_params.items():
new_object_params[name] = clone(param, safe=False)
new_object = klass(**new_object_params)
params_set = new_object.get_params(deep=False)
# quick sanity check of the parameters of the clone
for name in new_object_params:
param1 = new_object_params[name]
param2 = params_set[name]
if param1 is not param2:
raise RuntimeError('Cannot clone object %s, as the constructor '
'either does not set or modifies parameter %s' %
(estimator, name))
return new_object
|
32,383 | def main():
try:
targetList = demisto.args().get("list")
engagement_list = demisto.executeCommand("getList", {"listName": targetList})[0]["Contents"].split(",")
random.shuffle(engagement_list)
buffer_list = ""
for token in engagement_list:
buffer_list = buffer_list + token.replace('"', '') + ","
buffer_list = buffer_list[:-1]
demisto.executeCommand("setList", {"listName": targetList, "listData": buffer_list})
return_results("List " + targetList + " successfully shuffled!")
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute BaseScript. Error: {str(ex)}')
| def main():
try:
targetList = demisto.args().get("list")
engagement_list = demisto.executeCommand("getList", {"listName": targetList})[0]["Contents"].split(",")
random.shuffle(engagement_list)
buffer_list = ','.join(engagement_list)
buffer_list = buffer_list[:-1]
demisto.executeCommand("setList", {"listName": targetList, "listData": buffer_list})
return_results("List " + targetList + " successfully shuffled!")
except Exception as ex:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute BaseScript. Error: {str(ex)}')
|
11,816 | def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = io.BytesIO(data[6:])
head = file.read(8)
# process dictionary
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif = dict(_fixup_dict(info))
# get exif extension
try:
# exif field 0x8769 is an offset pointer to the location
# of the nested embedded exif ifd.
# It should be a long, but may be corrupted.
file.seek(exif[0x8769])
except (KeyError, TypeError):
pass
else:
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif.update(_fixup_dict(info))
# get gpsinfo extension
try:
# exif field 0x8825 is an offset pointer to the location
# of the nested embedded gps exif ifd.
# It should be a long, but may be corrupted.
file.seek(exif[0x8825])
except (KeyError, TypeError):
pass
else:
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif[0x8825] = _fixup_dict(info)
# get interop
try:
# exif field 0xa005 is an offset pointer to the location
# of the nested embedded interop exif ifd.
# It should be a long, but may be corrupted.
file.seek(exif[0xa005])
except (KeyError, TypeError):
pass
else:
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif["interop"] = _fixup_dict(info)
return exif
| def _getexif(self):
# Extract EXIF information. This method is highly experimental,
# and is likely to be replaced with something better in a future
# version.
# The EXIF record consists of a TIFF file embedded in a JPEG
# application marker (!).
try:
data = self.info["exif"]
except KeyError:
return None
file = io.BytesIO(data[6:])
head = file.read(8)
# process dictionary
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif = dict(_fixup_dict(info))
# get exif extension
try:
# exif field 0x8769 is an offset pointer to the location
# of the nested embedded exif ifd.
# It should be a long, but may be corrupted.
file.seek(exif[0x8769])
except (KeyError, TypeError):
pass
else:
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif.update(_fixup_dict(info))
# get gpsinfo extension
try:
# exif field 0x8825 is an offset pointer to the location
# of the nested embedded gps exif ifd.
# It should be a long, but may be corrupted.
file.seek(exif[0x8825])
except (KeyError, TypeError):
pass
else:
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif[0x8825] = _fixup_dict(info)
# get interop
try:
# exif field 0xa005 is an offset pointer to the location
# of the nested embedded interop Exif IFD.
# It should be a long, but may be corrupted.
file.seek(exif[0xa005])
except (KeyError, TypeError):
pass
else:
info = TiffImagePlugin.ImageFileDirectory_v1(head)
info.load(file)
exif["interop"] = _fixup_dict(info)
return exif
|
54,223 | def test_text_diagrams():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
circuit = cirq.Circuit(
cirq.X(a),
cirq.Y(a),
cirq.Z(a),
cirq.Z(a) ** sympy.Symbol('x'),
cirq.rx(sympy.Symbol('x')).on(a),
cirq.CZ(a, b),
cirq.CNOT(a, b),
cirq.CNOT(b, a),
(cirq.CNOT ** 0.5)(a, b),
(cirq.CNOT ** 0.5)(b, a),
cirq.H(a) ** 0.5,
cirq.I(a),
cirq.IdentityGate(2)(a, b),
cirq.cphase(sympy.pi * sympy.Symbol('t')).on(a, b),
)
cirq.testing.assert_has_diagram(
circuit,
"""
a: βββXβββYβββZβββZ^xβββRx(x)βββ@βββ@βββXβββ@βββββββX^0.5βββH^0.5βββIβββIβββ@βββββ
β β β β β β β
b: βββββββββββββββββββββββββββββ@βββXβββ@βββX^0.5βββ@βββββββββββββββββββIβββ@^tβββ
""",
)
cirq.testing.assert_has_diagram(
circuit,
"""
a: ---X---Y---Z---Z^x---Rx(x)---@---@---X---@-------X^0.5---H^0.5---I---I---@-----
| | | | | | |
b: -----------------------------@---X---@---X^0.5---@-------------------I---@^t---
""",
use_unicode_characters=False,
)
| def test_text_diagrams():
a = cirq.NamedQubit('a')
b = cirq.NamedQubit('b')
circuit = cirq.Circuit(
cirq.X(a),
cirq.Y(a),
cirq.Z(a),
cirq.Z(a) ** sympy.Symbol('x'),
cirq.rx(sympy.Symbol('x')).on(a),
cirq.CZ(a, b),
cirq.CNOT(a, b),
cirq.CNOT(b, a),
cirq.CNOT(a, b) ** 0.5,
cirq.CNOT(a, b) ** 0.5,
cirq.H(a) ** 0.5,
cirq.I(a),
cirq.IdentityGate(2)(a, b),
cirq.cphase(sympy.pi * sympy.Symbol('t')).on(a, b),
)
cirq.testing.assert_has_diagram(
circuit,
"""
a: βββXβββYβββZβββZ^xβββRx(x)βββ@βββ@βββXβββ@βββββββX^0.5βββH^0.5βββIβββIβββ@βββββ
β β β β β β β
b: βββββββββββββββββββββββββββββ@βββXβββ@βββX^0.5βββ@βββββββββββββββββββIβββ@^tβββ
""",
)
cirq.testing.assert_has_diagram(
circuit,
"""
a: ---X---Y---Z---Z^x---Rx(x)---@---@---X---@-------X^0.5---H^0.5---I---I---@-----
| | | | | | |
b: -----------------------------@---X---@---X^0.5---@-------------------I---@^t---
""",
use_unicode_characters=False,
)
|
30,791 | def args():
"""Retrieves a command / script arguments object
Returns:
dict: Arguments object
"""
return {}
| def args():
"""Retrieves a command / script arguments object
Returns:
dict. Arguments object
"""
return {}
|
38,972 | def normalize_model_name(name: str) -> str:
"""Normalizes the given model name."""
return re.sub(r'[^a-zA-Z0-9.\-_]', '_', name)
| def normalize_name(name: str) -> str:
"""Normalizes the given model name."""
return re.sub(r'[^a-zA-Z0-9.\-_]', '_', name)
|
40,084 | def make_response_error(errmsg, in_httpcode=None, details=None):
if details is None:
details = {}
if not in_httpcode:
httpcode = 500
else:
httpcode = in_httpcode
msg = str(errmsg)
ret = {
'message': msg,
'httpcode': int(httpcode),
'detail': details
}
if 'error_codes' not in ret['detail']:
ret['detail']['error_codes'] = []
if isinstance(errmsg, Exception):
if 'anchore_error_json' in errmsg.__dict__:
# Try to load it as json
try:
anchore_error_json = errmsg.__dict__.get('anchore_error_json', None)
if type(anchore_error_json) == dict:
err_json = anchore_error_json
else:
err_json = json.loads(anchore_error_json)
except (TypeError, ValueError):
# Then it may just be a string, we cannot do anything with it
logger.debug('Failed to parse anchore_error_json as json')
return ret
if {'message', 'httpcode', 'detail'}.issubset(set(err_json)):
ret.update(err_json)
try:
if {'error_code'}.issubset(set(err_json)) and err_json.get('error_code', None):
if 'error_codes' not in ret['detail']:
ret['detail']['error_codes'] = []
ret['detail']['error_codes'].append(err_json.get('error_code'))
except KeyError:
logger.warn("unable to marshal error details: source error {}".format(errmsg.__dict__))
return ret
| def make_response_error(errmsg, in_httpcode=None, details=None):
if details is None:
details = {}
if not in_httpcode:
httpcode = 500
else:
httpcode = in_httpcode
msg = str(errmsg)
ret = {
'message': msg,
'httpcode': int(httpcode),
'detail': details
}
if 'error_codes' not in ret['detail']:
ret['detail']['error_codes'] = []
if isinstance(errmsg, Exception):
if 'anchore_error_json' in errmsg.__dict__:
# Try to load it as json
try:
anchore_error_json = getattr(errmsg, "anchore_error_json", None)
if type(anchore_error_json) == dict:
err_json = anchore_error_json
else:
err_json = json.loads(anchore_error_json)
except (TypeError, ValueError):
# Then it may just be a string, we cannot do anything with it
logger.debug('Failed to parse anchore_error_json as json')
return ret
if {'message', 'httpcode', 'detail'}.issubset(set(err_json)):
ret.update(err_json)
try:
if {'error_code'}.issubset(set(err_json)) and err_json.get('error_code', None):
if 'error_codes' not in ret['detail']:
ret['detail']['error_codes'] = []
ret['detail']['error_codes'].append(err_json.get('error_code'))
except KeyError:
logger.warn("unable to marshal error details: source error {}".format(errmsg.__dict__))
return ret
|
38,988 | def constr_lower(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':
lower = field.type_.lower_str or config.anystr_lower
if lower:
v = v.strip()
return v
| def constr_lower(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes':
lower = field.type_.lower_str or config.anystr_lower
if lower:
v = v.lower()
return v
|
32,910 | def _default_span_processors_factory(
trace_filters, # type: List[TraceFilter]
trace_writer, # type: TraceWriter
partial_flush_enabled, # type: bool
partial_flush_min_spans, # type: int
appsec_enabled, # type: bool
):
# type: (...) -> List[SpanProcessor]
"""Construct the default list of span processors to use."""
trace_processors = [] # type: List[TraceProcessor]
trace_processors += [TraceTagsProcessor()]
trace_processors += [TraceSamplingProcessor()]
trace_processors += [TraceTopLevelSpanProcessor()]
trace_processors += trace_filters
span_processors = [] # type: List[SpanProcessor]
if appsec_enabled:
try:
from .appsec.processor import AppSecSpanProcessor
appsec_span_processor = AppSecSpanProcessor()
span_processors.append(appsec_span_processor)
except Exception as e:
# DDAS-001-01
log.error(
"[DDAS-001-01] "
"AppSec could not start because of an unexpected error. No security activities will be collected. "
"Please contact support at https://docs.datadoghq.com/help/ for help. Error details: \n%s",
repr(e),
)
if config._raise:
raise
span_processors.append(
SpanAggregator(
partial_flush_enabled=partial_flush_enabled,
partial_flush_min_spans=partial_flush_min_spans,
trace_processors=trace_processors,
writer=trace_writer,
)
)
return span_processors
| def _default_span_processors_factory(
trace_filters, # type: List[TraceFilter]
trace_writer, # type: TraceWriter
partial_flush_enabled, # type: bool
partial_flush_min_spans, # type: int
appsec_enabled, # type: bool
):
# type: (...) -> List[SpanProcessor]
"""Construct the default list of span processors to use."""
trace_processors = [TraceTagsProcessor(), TraceSamplingProcessor(), TraceTopLevelSpanProcessor()] + trace_filters # type: List[TraceProcessor]
span_processors = [] # type: List[SpanProcessor]
if appsec_enabled:
try:
from .appsec.processor import AppSecSpanProcessor
appsec_span_processor = AppSecSpanProcessor()
span_processors.append(appsec_span_processor)
except Exception as e:
# DDAS-001-01
log.error(
"[DDAS-001-01] "
"AppSec could not start because of an unexpected error. No security activities will be collected. "
"Please contact support at https://docs.datadoghq.com/help/ for help. Error details: \n%s",
repr(e),
)
if config._raise:
raise
span_processors.append(
SpanAggregator(
partial_flush_enabled=partial_flush_enabled,
partial_flush_min_spans=partial_flush_min_spans,
trace_processors=trace_processors,
writer=trace_writer,
)
)
return span_processors
|
2,491 | def test_gradient_boosting_early_stopping():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(
n_estimators=100,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
gbr = GradientBoostingRegressor(
n_estimators=100,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if early_stopping works as expected
for est, tol, early_stop_n_estimators in (
(gbc, 1e-1, 28),
(gbr, 1e-1, 13),
(gbc, 1e-3, 70),
(gbr, 1e-3, 28),
):
est.set_params(tol=tol)
est.fit(X_train, y_train)
assert est.n_estimators_ == early_stop_n_estimators
assert est.score(X_test, y_test) > 0.7
# Without early stopping
gbc = GradientBoostingClassifier(
n_estimators=5, learning_rate=0.1, max_depth=3, random_state=42
)
gbc.fit(X, y)
gbr = GradientBoostingRegressor(
n_estimators=10, learning_rate=0.1, max_depth=3, random_state=42
)
gbr.fit(X, y)
assert gbc.n_estimators_ == 5
assert gbr.n_estimators_ == 10
| def test_gradient_boosting_early_stopping():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(
n_estimators=1000,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
gbr = GradientBoostingRegressor(
n_estimators=100,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if early_stopping works as expected
for est, tol, early_stop_n_estimators in (
(gbc, 1e-1, 28),
(gbr, 1e-1, 13),
(gbc, 1e-3, 70),
(gbr, 1e-3, 28),
):
est.set_params(tol=tol)
est.fit(X_train, y_train)
assert est.n_estimators_ == early_stop_n_estimators
assert est.score(X_test, y_test) > 0.7
# Without early stopping
gbc = GradientBoostingClassifier(
n_estimators=5, learning_rate=0.1, max_depth=3, random_state=42
)
gbc.fit(X, y)
gbr = GradientBoostingRegressor(
n_estimators=10, learning_rate=0.1, max_depth=3, random_state=42
)
gbr.fit(X, y)
assert gbc.n_estimators_ == 5
assert gbr.n_estimators_ == 10
|
30,969 | def iam_command_success(user_profile, okta_user):
""" Handles successful responses from Okta API by preparing the User Profile object with the user data.
Args:
user_profile (οΏΏIAMUserProfile): The User Profile object.
okta_user (dict): The data retrieved from Okta.
"""
if demisto.command() == 'disable-user':
active = False
elif demisto.command() == 'enable-user':
active = True
else:
active = False if okta_user.get('status') == DEPROVISIONED_STATUS else True
user_profile.set_result(
success=True,
active=active,
iden=okta_user.get('id'),
email=okta_user.get('profile', {}).get('email'),
username=okta_user.get('profile', {}).get('login'),
details=okta_user
)
| def iam_command_success(user_profile, okta_user):
""" Handles successful responses from Okta API by preparing the User Profile object with the user data.
Args:
user_profile (IAMUserProfile): The User Profile object.
okta_user (dict): The data retrieved from Okta.
"""
if demisto.command() == 'disable-user':
active = False
elif demisto.command() == 'enable-user':
active = True
else:
active = False if okta_user.get('status') == DEPROVISIONED_STATUS else True
user_profile.set_result(
success=True,
active=active,
iden=okta_user.get('id'),
email=okta_user.get('profile', {}).get('email'),
username=okta_user.get('profile', {}).get('login'),
details=okta_user
)
|
40,528 | def upload_cmd_tree():
blob_file_name = 'extensionCommandTree.json'
downloaded_file_name = 'extCmdTreeDownloaded.json'
file_path = os.path.expanduser(os.path.join('~', '.azure', file_name))
client = BlockBlobService(account_name=STORAGE_ACCOUNT, account_key=STORAGE_ACCOUNT_KEY)
client.create_blob_from_path(container_name=STORAGE_CONTAINER, blob_name=blob_file_name,
file_path=file_path)
url = client.make_blob_url(container_name=STORAGE_CONTAINER, blob_name=blob_file_name)
download_file_path = os.path.expanduser(os.path.join('~', '.azure', downloaded_file_name))
download_file(url, download_file_path)
if filecmp.cmp(file_path, download_file_path):
print("extensionCommandTree.json uploaded successfully. URL: {}".format(url))
else:
raise Exception("Failed to update extensionCommandTree.json in storage account")
| def upload_cmd_tree():
blob_file_name = 'extensionCommandTree.json'
downloaded_file_name = 'extCmdTreeDownloaded.json'
file_path = os.path.expanduser(os.path.join('~', '.azure', file_name))
client = BlockBlobService(account_name=STORAGE_ACCOUNT, account_key=STORAGE_ACCOUNT_KEY)
client.create_blob_from_path(container_name=STORAGE_CONTAINER, blob_name=blob_file_name,
file_path=file_path)
url = client.make_blob_url(container_name=STORAGE_CONTAINER, blob_name=blob_file_name)
download_file_path = os.path.expanduser(os.path.join('~', '.azure', downloaded_file_name))
download_file(url, download_file_path)
if filecmp.cmp(file_path, download_file_path):
print("extensionCommandTree.json uploaded successfully. URL: {}".format(url))
else:
raise Exception("Failed to update extensionCommandTree.json in the storage account")
|
25,776 | def busmap_by_hac(network, n_clusters, buses_i=None, branch_components=["Line", "Link"], feature=None):
"""
Create a busmap accroding to Hierarchical Agglomerative Clustering.
Parameters
----------
network : pypsa.Network
n_clusters : int
Final number of clusters desired.
buses_i: None|pandas.Index
Subset of buses to cluster. If None, all buses are considered.
branch_components: List
Subset of all branch_components in the network. Defaults to ["Line", "Link"].
feature: None | pandas.DataFrame
Feature to be considered for the clustering.
The DataFrame must be indexed with buses_i.
If None, all buses have the same similarity.
Returns
-------
busmap : pandas.Series
Mapping of network.buses to k-means clusters (indexed by
non-negative integers).
"""
if find_spec('sklearn') is None:
raise ModuleNotFoundError("Optional dependency 'sklearn' not found."
"Install via 'conda install -c conda-forge scikit-learn' "
"or 'pip install scikit-learn'")
from sklearn.cluster import AgglomerativeClustering as HAC
if buses_i is None:
buses_i = network.buses.index
if feature is None:
logger.warning(
"No feature is specified for Hierarchical Clustering. "
"Falling back to default, where all buses have equal similarity. "
"You can specify a feature as pandas.DataFrame indexed with buses_i."
)
feature = pd.DataFrame(index=buses_i, columns=[""], data=0)
buses_x = network.buses.index.get_indexer(buses_i)
A = network.adjacency_matrix(branch_components=branch_components).todense()
A = sp.sparse.coo_matrix(A[buses_x].T[buses_x].T)
labels = HAC(n_clusters=n_clusters,
connectivity=A,
affinity='euclidean',
linkage='ward').fit_predict(feature)
busmap = pd.Series(data=labels, index=buses_i, dtype='str')
return busmap
| def busmap_by_hac(network, n_clusters, buses_i=None, branch_components=["Line", "Link"], feature=None):
"""
Create a busmap according to Hierarchical Agglomerative Clustering.
Parameters
----------
network : pypsa.Network
n_clusters : int
Final number of clusters desired.
buses_i: None|pandas.Index
Subset of buses to cluster. If None, all buses are considered.
branch_components: List
Subset of all branch_components in the network. Defaults to ["Line", "Link"].
feature: None | pandas.DataFrame
Feature to be considered for the clustering.
The DataFrame must be indexed with buses_i.
If None, all buses have the same similarity.
Returns
-------
busmap : pandas.Series
Mapping of network.buses to k-means clusters (indexed by
non-negative integers).
"""
if find_spec('sklearn') is None:
raise ModuleNotFoundError("Optional dependency 'sklearn' not found."
"Install via 'conda install -c conda-forge scikit-learn' "
"or 'pip install scikit-learn'")
from sklearn.cluster import AgglomerativeClustering as HAC
if buses_i is None:
buses_i = network.buses.index
if feature is None:
logger.warning(
"No feature is specified for Hierarchical Clustering. "
"Falling back to default, where all buses have equal similarity. "
"You can specify a feature as pandas.DataFrame indexed with buses_i."
)
feature = pd.DataFrame(index=buses_i, columns=[""], data=0)
buses_x = network.buses.index.get_indexer(buses_i)
A = network.adjacency_matrix(branch_components=branch_components).todense()
A = sp.sparse.coo_matrix(A[buses_x].T[buses_x].T)
labels = HAC(n_clusters=n_clusters,
connectivity=A,
affinity='euclidean',
linkage='ward').fit_predict(feature)
busmap = pd.Series(data=labels, index=buses_i, dtype='str')
return busmap
|
38,250 | def log_request(handler):
"""log a bit more information about each request than tornado's default
- move static file get success to debug-level (reduces noise)
- get proxied IP instead of proxy IP
- log referer for redirect and failed requests
- log user-agent for failed requests
- record per-request metrics in prometheus
"""
status = handler.get_status()
request = handler.request
request_time = 1000.0 * handler.request.request_time() # milliseconds
if status == 304 or (
status < 300
and (
isinstance(handler, StaticFileHandler)
or getattr(handler, "log_success_debug", False)
)
):
# static-file success and 304 Found are debug-level
log_method = access_log.debug
elif status < 400:
log_method = access_log.info
elif status < 500:
log_method = access_log.warning
else:
log_method = access_log.error
if request_time >= 1000 and log_method is access_log.debug:
# slow responses are always logged at least INFO-level
log_method = access_log.info
uri = _scrub_uri(request.uri)
headers = _scrub_headers(request.headers)
try:
user = handler.current_user
except (HTTPError, RuntimeError):
username = ""
else:
if user is None:
username = ""
elif isinstance(user, str):
username = user
elif isinstance(user, dict):
username = user.get("name", "unknown")
else:
username = "unknown"
ns = dict(
status=status,
method=request.method,
ip=request.remote_ip,
uri=uri,
request_time=request_time,
user=username,
location="",
)
msg = "{status} {method} {uri}{location} ({user}@{ip}) {request_time:.2f}ms"
if status >= 500 and status not in {502, 503}:
log_method(json.dumps(headers, indent=2))
elif status in {301, 302}:
# log redirect targets
# FIXME: _headers is private, but there doesn't appear to be a public way
# to get headers from tornado
location = handler._headers.get("Location")
if location:
ns["location"] = " -> {}".format(_scrub_uri(location))
log_method(msg.format(**ns))
| def log_request(handler):
"""log a bit more information about each request than tornado's default
- move static file get success to debug-level (reduces noise)
- get proxied IP instead of proxy IP
- log referer for redirect and failed requests
- log user-agent for failed requests
- record per-request metrics in prometheus
"""
status = handler.get_status()
request = handler.request
request_time = 1000.0 * handler.request.request_time() # micro- to milli-seconds
if status == 304 or (
status < 300
and (
isinstance(handler, StaticFileHandler)
or getattr(handler, "log_success_debug", False)
)
):
# static-file success and 304 Found are debug-level
log_method = access_log.debug
elif status < 400:
log_method = access_log.info
elif status < 500:
log_method = access_log.warning
else:
log_method = access_log.error
if request_time >= 1000 and log_method is access_log.debug:
# slow responses are always logged at least INFO-level
log_method = access_log.info
uri = _scrub_uri(request.uri)
headers = _scrub_headers(request.headers)
try:
user = handler.current_user
except (HTTPError, RuntimeError):
username = ""
else:
if user is None:
username = ""
elif isinstance(user, str):
username = user
elif isinstance(user, dict):
username = user.get("name", "unknown")
else:
username = "unknown"
ns = dict(
status=status,
method=request.method,
ip=request.remote_ip,
uri=uri,
request_time=request_time,
user=username,
location="",
)
msg = "{status} {method} {uri}{location} ({user}@{ip}) {request_time:.2f}ms"
if status >= 500 and status not in {502, 503}:
log_method(json.dumps(headers, indent=2))
elif status in {301, 302}:
# log redirect targets
# FIXME: _headers is private, but there doesn't appear to be a public way
# to get headers from tornado
location = handler._headers.get("Location")
if location:
ns["location"] = " -> {}".format(_scrub_uri(location))
log_method(msg.format(**ns))
|
54,025 | def prepare_environment(env=None):
"""Prepares an environment context to run Python on.
If Python is being used from a conda environment, this is roughly equivalent
to activating the conda environment by setting up the correct environment
variables.
Parameters
----------
env : dict, optional
Dictionary of environment variables to modify. If ``None`` is passed, then
this will create a copy of the current ``os.environ``.
Returns
-------
dict
Updated environment variable dictionary.
"""
if env is None:
env = os.environ.copy()
if PYTHON_DIRECTORY:
if is_windows():
lib_bin = os.path.join(PYTHON_DIRECTORY, 'Library', 'bin')
if os.path.exists(lib_bin) and lib_bin not in env['PATH']:
env['PATH'] = lib_bin + os.pathsep + env['PATH']
else:
lib_bin = os.path.join(PYTHON_DIRECTORY, 'bin')
if os.path.exists(lib_bin) and lib_bin not in env['PATH']:
env['PATH'] = lib_bin + os.pathsep + env['PATH']
if CONDA_EXE:
env['CONDA_EXE'] = CONDA_EXE
return env
| def prepare_environment(env=None):
"""Prepares an environment context to run Python on.
If Python is being used from a conda environment, this is roughly equivalent
to activating the conda environment by setting up the correct environment
variables.
Parameters
----------
env : dict, optional
Dictionary of environment variables to modify. If ``None`` is passed, then
this will create a copy of the current ``os.environ``.
Returns
-------
dict
Updated environment variable dictionary.
"""
if env is None:
env = os.environ.copy()
if PYTHON_DIRECTORY:
if is_windows():
lib_bin = os.path.join(PYTHON_DIRECTORY, 'Library', 'bin')
else:
lib_bin = os.path.join(PYTHON_DIRECTORY, 'bin')
if os.path.exists(lib_bin) and lib_bin not in env['PATH']:
env['PATH'] = lib_bin + os.pathsep + env['PATH']
if CONDA_EXE:
env['CONDA_EXE'] = CONDA_EXE
return env
|
47,345 | def load_flax_weights_in_pytorch_model(pt_model, flax_state):
"""Load flax checkpoints in a PyTorch model"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see "
"https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions."
)
raise
# check if we have bf16 weights
is_type_bf16 = flatten_dict(jax.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).keys()
if any(is_type_bf16):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model."
)
flax_state = jax.tree_map(
lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state
)
flax_state_dict = flatten_dict(flax_state)
pt_model_dict = pt_model.state_dict()
load_model_with_head_into_base_model = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in set([k.split(".")[0] for k in pt_model_dict.keys()])
)
load_base_model_into_model_with_head = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in set([k.split(".")[0] for k in pt_model_dict.keys()])
)
# keep track of unexpected & missing keys
unexpected_keys = []
missing_keys = set(pt_model_dict.keys())
for flax_key_tuple, flax_tensor in flax_state_dict.items():
has_base_model_prefix = flax_key_tuple[0] == pt_model.base_model_prefix
require_base_model_prefix = ".".join((pt_model.base_model_prefix,) + flax_key_tuple) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
flax_key_tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
flax_key_tuple = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(flax_key_tuple) not in pt_model_dict:
# conv layer
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(flax_key_tuple) not in pt_model_dict:
# linear layer
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
flax_tensor = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
flax_key = ".".join(flax_key_tuple)
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}."
)
else:
# add weight to pytorch dict
flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor
pt_model_dict[flax_key] = torch.from_numpy(flax_tensor)
# remove from missing keys
missing_keys.remove(flax_key)
else:
# weight is not expected by PyTorch model
unexpected_keys.append(flax_key)
pt_model.load_state_dict(pt_model_dict)
# re-transform missing_keys to list
missing_keys = list(missing_keys)
if len(unexpected_keys) > 0:
logger.warning(
"Some weights of the Flax model were not used when "
f"initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {pt_model.__class__.__name__} from a Flax model trained on another task "
"or with another architecture (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect "
"to be exactly identical (e.g. initializing a BertForSequenceClassification model from a FlaxBertForSequenceClassification model)."
)
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model "
f"and are newly initialized: {missing_keys}\n"
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {pt_model.__class__.__name__} for predictions without further training."
)
return pt_model
| def load_flax_weights_in_pytorch_model(pt_model, flax_state):
"""Load flax checkpoints in a PyTorch model"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see "
"https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions."
)
raise
# check if we have bf16 weights
is_type_bf16 = flatten_dict(jax.tree_map(lambda x: x.dtype == jnp.bfloat16, flax_state)).keys()
if jnp.any(is_type_bf16):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model."
)
flax_state = jax.tree_map(
lambda params: params.astype(np.float32) if params.dtype == jnp.bfloat16 else params, flax_state
)
flax_state_dict = flatten_dict(flax_state)
pt_model_dict = pt_model.state_dict()
load_model_with_head_into_base_model = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in set([k.split(".")[0] for k in pt_model_dict.keys()])
)
load_base_model_into_model_with_head = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in set([k.split(".")[0] for k in pt_model_dict.keys()])
)
# keep track of unexpected & missing keys
unexpected_keys = []
missing_keys = set(pt_model_dict.keys())
for flax_key_tuple, flax_tensor in flax_state_dict.items():
has_base_model_prefix = flax_key_tuple[0] == pt_model.base_model_prefix
require_base_model_prefix = ".".join((pt_model.base_model_prefix,) + flax_key_tuple) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
flax_key_tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
flax_key_tuple = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(flax_key_tuple) not in pt_model_dict:
# conv layer
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
flax_tensor = jnp.transpose(flax_tensor, (3, 2, 0, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(flax_key_tuple) not in pt_model_dict:
# linear layer
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
flax_tensor = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
flax_key_tuple = flax_key_tuple[:-1] + ("weight",)
flax_key = ".".join(flax_key_tuple)
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected "
f"to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}."
)
else:
# add weight to pytorch dict
flax_tensor = np.asarray(flax_tensor) if not isinstance(flax_tensor, np.ndarray) else flax_tensor
pt_model_dict[flax_key] = torch.from_numpy(flax_tensor)
# remove from missing keys
missing_keys.remove(flax_key)
else:
# weight is not expected by PyTorch model
unexpected_keys.append(flax_key)
pt_model.load_state_dict(pt_model_dict)
# re-transform missing_keys to list
missing_keys = list(missing_keys)
if len(unexpected_keys) > 0:
logger.warning(
"Some weights of the Flax model were not used when "
f"initializing the PyTorch model {pt_model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {pt_model.__class__.__name__} from a Flax model trained on another task "
"or with another architecture (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect "
"to be exactly identical (e.g. initializing a BertForSequenceClassification model from a FlaxBertForSequenceClassification model)."
)
else:
logger.warning(f"All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model "
f"and are newly initialized: {missing_keys}\n"
"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
else:
logger.warning(
f"All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n"
"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {pt_model.__class__.__name__} for predictions without further training."
)
return pt_model
|
42,105 | def upgrade():
bind = op.get_bind()
sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True)
# MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE <tbl_name>
# ADD COLUMN <col_name> ... DEFAULT "FINITE_OR_NAN"', but seemingly Alembic
# does not support such a SQL statement. So first add a column with schema-level
# default value setting, then remove it by `batch_op.alter_column()`.
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.add_column(
sa.Column(
"intermediate_value_type",
sa.Enum("FINITE", "INF_POS", "INF_NEG", "NAN", name="floattypeenum"),
nullable=False,
server_default="FINITE",
),
)
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.alter_column("intermediate_value_type", server_default=None)
session = orm.Session(bind=bind)
try:
records = session.query(IntermediateValueModel).all()
mapping = []
for r in records:
value: float
if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf(
r.intermediate_value
):
value = np.inf
elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf(
r.intermediate_value
):
value = -np.inf
elif np.isnan(r.intermediate_value):
value = np.nan
else:
value = r.intermediate_value
(
sanitized_value,
float_type,
) = IntermediateValueModel._intermediate_value_to_stored_repr(value)
mapping.append(
{
"trial_intermediate_value_id": r.trial_intermediate_value_id,
"intermediate_value_type": float_type,
"intermediate_value": sanitized_value,
}
)
session.bulk_update_mappings(IntermediateValueModel, mapping)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
| def upgrade():
bind = op.get_bind()
sa.Enum(IntermediateValueModel.TrialIntermediateValueType).create(bind, checkfirst=True)
# MySQL and PostgreSQL supports DEFAULT clause like 'ALTER TABLE <tbl_name>
# ADD COLUMN <col_name> ... DEFAULT "FINITE_OR_NAN"', but seemingly Alembic
# does not support such a SQL statement. So first add a column with schema-level
# default value setting, then remove it by `batch_op.alter_column()`.
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.add_column(
sa.Column(
"intermediate_value_type",
sa.Enum("FINITE", "INF_POS", "INF_NEG", "NAN", name="floattypeenum"),
nullable=False,
server_default="FINITE",
),
)
with op.batch_alter_table("trial_intermediate_values") as batch_op:
batch_op.alter_column("intermediate_value_type", server_default=None)
session = orm.Session(bind=bind)
try:
records = session.query(IntermediateValueModel).all()
mapping = []
for r in records:
value: float
if np.isclose(r.intermediate_value, RDB_MAX_FLOAT) or np.isposinf(
r.intermediate_value
):
value = float("inf")
elif np.isclose(r.intermediate_value, RDB_MIN_FLOAT) or np.isneginf(
r.intermediate_value
):
value = -np.inf
elif np.isnan(r.intermediate_value):
value = np.nan
else:
value = r.intermediate_value
(
sanitized_value,
float_type,
) = IntermediateValueModel._intermediate_value_to_stored_repr(value)
mapping.append(
{
"trial_intermediate_value_id": r.trial_intermediate_value_id,
"intermediate_value_type": float_type,
"intermediate_value": sanitized_value,
}
)
session.bulk_update_mappings(IntermediateValueModel, mapping)
session.commit()
except SQLAlchemyError as e:
session.rollback()
raise e
finally:
session.close()
|
35,553 | def read_sensor_events(duration_sec):
sensor_events = messaging.sub_sock("sensorEvents", timeout=0.1)
start_time_sec = time.time()
events = []
while time.time() - start_time_sec < duration_sec:
events += messaging.drain_sock(sensor_events)
time.sleep(0.01)
return events
| def read_sensor_events(duration_sec):
sensor_events = messaging.sub_sock("sensorEvents", timeout=0.1)
start_time_sec = time.time()
events = []
while time.monotonic() - start_time_sec < duration_sec:
events += messaging.drain_sock(sensor_events)
time.sleep(0.01)
return events
|
3,676 | def skip(skipcols):
r"""
Helper function for the `usecols` parameter of `loadtxt` and `genfromtxt`.
The parameter `usecols` of `loadtxt` and `genfromtxt` allows the user to
select specific columns from the text file. This `skip` function provides
a simple way for the user to specify the columns that should be *skipped*
instead of the columns that should be used.
Parameters
----------
skipcols : int or sequence of ints
The indices of the columns to be skipped.
Returns
-------
f : function
Returns a function with the signature::
def f(n: int) -> List[int]
that may be passed to the functions `numpy.loadtxt` and
`numpy.genfromtxt`. The function is a closure that "rememebers"
the list of columns that were passed to `skip`. The function
returns ``list(range(n))`` with the values from `skipcols` removed.
Examples
--------
>>> from numpy.lib.npyio import skip
>>> s = " 0 1 2 3 4 5\n10 11 12 13 14 15\n20 21 22 23 24 25"
>>> print(s)
0 1 2 3 4 5
10 11 12 13 14 15
20 21 22 23 24 25
Use `skip` to skip the second column (column index 1) and the last
column (column index -1).
>>> np.loadtxt(s.splitlines(), dtype=int, usecols=skip([1, -1]))
array([[ 0, 2, 3, 4],
[10, 12, 13, 14],
[20, 22, 23, 24]])
To see what `skip` actually does, call the returned function with
``n = 6``:
>>> skip([1, -1])(6)
[0, 2, 3, 4]
"""
try:
skipcols = list(skipcols)
except TypeError:
# Presumably skipcols is an int, so wrap it in a list.
skipcols = [skipcols]
# Check that the values in skipcols are, in fact, ints.
# (We can check the types, but we can't check the actual values until
# we know how many columns are in the file.)
for c in skipcols:
try:
opindex(c)
except TypeError as e:
e.args = (f'skipcols must be an int or a sequence of ints but '
f'it contains at least one element of type '
f'{type(c)}',)
raise
def skipper(n):
normed_skipcols = []
for c in skipcols:
try:
newc = normalize_axis_index(c, n)
except np.AxisError:
raise IndexError(f'skip column index {c} out of range for '
f'file with {n} columns') from None
normed_skipcols.append(newc)
usecols = [c for c in range(n) if c not in normed_skipcols]
return usecols
return skipper
| def skip(skipcols):
r"""
Helper function for the `usecols` parameter of `loadtxt` and `genfromtxt`.
The parameter `usecols` of `loadtxt` and `genfromtxt` allows the user to
select specific columns from the text file. This `skip` function provides
a simple way for the user to specify the columns that should be *skipped*
instead of the columns that should be used.
Parameters
----------
skipcols : int or sequence of ints
The indices of the columns to be skipped.
Returns
-------
f : function
Returns a function with the signature::
def f(n: int) -> List[int]
that may be passed to the functions `numpy.loadtxt` and
`numpy.genfromtxt`. The function is a closure that "rememebers"
the list of columns that were passed to `skip`. The function
returns ``list(range(n))`` with the values from `skipcols` removed.
Examples
--------
>>> from numpy.lib.npyio import skip
>>> s = " 0 1 2 3 4 5\n10 11 12 13 14 15\n20 21 22 23 24 25"
>>> print(s)
0 1 2 3 4 5
10 11 12 13 14 15
20 21 22 23 24 25
Use `skip` to skip the second column (column index 1) and the last
column (column index -1).
>>> np.loadtxt(s.splitlines(), dtype=int, usecols=skip([1, -1]))
array([[ 0, 2, 3, 4],
[10, 12, 13, 14],
[20, 22, 23, 24]])
To see what `skip` actually does, call the returned function with
``n = 6``:
>>> skip([1, -1])(6)
[0, 2, 3, 4]
"""
skipcols = _normalize_usecols_to_list(skipcols)
# Check that the values in skipcols are, in fact, ints.
# (We can check the types, but we can't check the actual values until
# we know how many columns are in the file.)
for c in skipcols:
try:
opindex(c)
except TypeError as e:
e.args = (f'skipcols must be an int or a sequence of ints but '
f'it contains at least one element of type '
f'{type(c)}',)
raise
def skipper(n):
normed_skipcols = []
for c in skipcols:
try:
newc = normalize_axis_index(c, n)
except np.AxisError:
raise IndexError(f'skip column index {c} out of range for '
f'file with {n} columns') from None
normed_skipcols.append(newc)
usecols = [c for c in range(n) if c not in normed_skipcols]
return usecols
return skipper
|
8,426 | def test_create_with_spectral_coord():
spectral_coord = SpectralCoord(np.arange(5100, 5150)*u.AA, radial_velocity = u.Quantity(1000.0, "km/s"))
flux = np.random.randn(50)*u.Jy
spec = Spectrum1D(spectral_axis = spectral_coord, flux = flux)
assert spec.radial_velocity == u.Quantity(1000.0, "km/s")
assert isinstance(spec.spectral_axis, SpectralCoord)
assert spec.spectral_axis.size == 50
| def test_create_with_spectral_coord():
spectral_coord = SpectralCoord(np.arange(5100, 5150)*u.AA, radial_velocity=u.Quantity(1000.0, "km/s"))
flux = np.random.randn(50)*u.Jy
spec = Spectrum1D(spectral_axis = spectral_coord, flux = flux)
assert spec.radial_velocity == u.Quantity(1000.0, "km/s")
assert isinstance(spec.spectral_axis, SpectralCoord)
assert spec.spectral_axis.size == 50
|
34,112 | def get_fs_and_path(
uri: str,
) -> Tuple[Optional["pyarrow.fs.FileSystem"], Optional[str]]:
if not pyarrow:
return None, None
parsed = urllib.parse.urlparse(uri)
path = parsed.netloc + parsed.path
cache_key = (parsed.scheme, parsed.netloc)
if cache_key in _cached_fs:
fs = _cached_fs[cache_key]
return fs, path
try:
fs, path = pyarrow.fs.FileSystem.from_uri(uri)
_cached_fs[cache_key] = fs
return fs, path
except (pyarrow.lib.ArrowInvalid, pyarrow.lib.ArrowNotImplementedError):
# Raised when URI not recognized
if not fsspec:
# Only return if fsspec is not installed
return None, None
# Else, try to resolve protocol via fsspec
try:
fsspec_fs = fsspec.filesystem(parsed.scheme)
except ValueError:
# Raised when protocol not known
return None, None
fsspec_handler = pyarrow.fs.FSSpecHandler
if parsed.scheme in ["gs", "gcs"]:
# TODO(amogkam): Remove after https://github.com/fsspec/gcsfs/issues/498 is
# resolved.
try:
import gcsfs
if version.parse(gcsfs.__version__) > "2022.7.1":
raise RuntimeError(
"`gcsfs` versions greater than '2022.7.1' are not "
f"compatible with Pyarrow. You have version "
f"{gcsfs.__version__}. Please downgrade your gcsfs "
f"version. See more details in "
f"https://github.com/fsspec/gcsfs/issues/498."
)
except ImportError:
pass
# GS doesn't support `create_parents` arg in `create_dir()`
fsspec_handler = _CustomGCSHandler
fs = pyarrow.fs.PyFileSystem(fsspec_handler(fsspec_fs))
_cached_fs[cache_key] = fs
return fs, path
| def get_fs_and_path(
uri: str,
) -> Tuple[Optional["pyarrow.fs.FileSystem"], Optional[str]]:
if not pyarrow:
return None, None
parsed = urllib.parse.urlparse(uri)
path = parsed.netloc + parsed.path
cache_key = (parsed.scheme, parsed.netloc)
if cache_key in _cached_fs:
fs = _cached_fs[cache_key]
return fs, path
try:
fs, path = pyarrow.fs.FileSystem.from_uri(uri)
_cached_fs[cache_key] = fs
return fs, path
except (pyarrow.lib.ArrowInvalid, pyarrow.lib.ArrowNotImplementedError):
# Raised when URI not recognized
if not fsspec:
# Only return if fsspec is not installed
return None, None
# Else, try to resolve protocol via fsspec
try:
fsspec_fs = fsspec.filesystem(parsed.scheme)
except ValueError:
# Raised when protocol not known
return None, None
fsspec_handler = pyarrow.fs.FSSpecHandler
if parsed.scheme in ["gs", "gcs"]:
# TODO(amogkam): Remove after https://github.com/fsspec/gcsfs/issues/498 is
# resolved.
try:
import gcsfs
if version.parse(gcsfs.__version__) > "2022.7.1":
raise RuntimeError(
"`gcsfs` versions greater than '2022.7.1' are not "
f"compatible with pyarrow. You have gcsfs version "
f"{gcsfs.__version__}. Please downgrade your gcsfs "
f"version. See more details in "
f"https://github.com/fsspec/gcsfs/issues/498."
)
except ImportError:
pass
# GS doesn't support `create_parents` arg in `create_dir()`
fsspec_handler = _CustomGCSHandler
fs = pyarrow.fs.PyFileSystem(fsspec_handler(fsspec_fs))
_cached_fs[cache_key] = fs
return fs, path
|
35,272 | def test_validate_parafac2_tensor():
rng = check_random_state(12345)
true_shape = [(4, 5)]*3
true_rank = 2
weights, factors, projections = random_parafac2(true_shape, rank=true_rank)
# Check shape and rank returned
shape, rank = _validate_parafac2_tensor((weights, factors, projections))
assert_equal(shape, true_shape,
err_msg='Returned incorrect shape (got {}, expected {})'.format(
shape, true_shape))
assert_equal(rank, true_rank,
err_msg='Returned incorrect rank (got {}, expected {})'.format(
rank, true_rank))
# One of the factors has the wrong rank
for mode in range(3):
false_shape = (len(factors[mode]), true_rank+1)
factors[mode], copy = tl.tensor(rng.random_sample(false_shape)), factors[mode]
with assert_raises(ValueError):
_validate_parafac2_tensor((weights, factors, projections))
factors[mode] = copy
# Not three factor matrices
with assert_raises(ValueError):
_validate_parafac2_tensor((weights, factors[1:], projections))
# Not enough projections
with assert_raises(ValueError):
_validate_parafac2_tensor((weights, factors, projections[1:]))
# Wrong number of weights
with assert_raises(ValueError):
_validate_parafac2_tensor((weights[1:], factors, projections))
# The projections aren't orthogonal
false_projections = [rng.random_sample(tl.shape(P)) for P in projections]
with assert_raises(ValueError):
_validate_parafac2_tensor((weights, factors, false_projections))
| def test_validate_parafac2_tensor():
rng = check_random_state(12345)
true_shape = [(4, 5)]*3
true_rank = 2
weights, factors, projections = random_parafac2(true_shape, rank=true_rank)
# Check shape and rank returned
shape, rank = _validate_parafac2_tensor((weights, factors, projections))
assert_equal(shape, true_shape,
err_msg='Returned incorrect shape (got {}, expected {})'.format(
shape, true_shape))
assert_equal(rank, true_rank,
err_msg='Returned incorrect rank (got {}, expected {})'.format(
rank, true_rank))
# One of the factors has the wrong rank
for mode in range(3):
false_shape = (len(factors[mode]), true_rank+1)
factors[mode], copy = tl.tensor(rng.random_sample(false_shape)), factors[mode]
with assert_raises(ValueError):
_validate_parafac2_tensor((weights, factors, projections))
factors[mode] = copy
# Not three factor matrices
with assert_raises(ValueError):
_validate_parafac2_tensor((weights, factors[1:], projections))
# Not enough projections
with assert_raises(ValueError):
_validate_parafac2_tensor((weights, factors, projections[1:]))
# Wrong number of weights
with assert_raises(ValueError):
_validate_parafac2_tensor((weights[1:], factors, projections))
# The projections aren't orthogonal
false_projections = [tl.tensor(rng.random_sample(tl.shape(P))) for P in projections]
with assert_raises(ValueError):
_validate_parafac2_tensor((weights, factors, false_projections))
|
28,563 | def plot_loo_pit(
ax,
figsize,
ecdf,
loo_pit,
loo_pit_ecdf,
unif_ecdf,
p975,
p025,
fill_kwargs,
ecdf_fill,
use_hdi,
x_vals,
hdi_kwargs,
hdi_odds,
n_unif,
unif,
plot_unif_kwargs,
loo_pit_kde,
legend,
y_hat,
y,
color,
textsize,
credible_interval,
plot_kwargs,
backend_kwargs,
show,
):
"""Matplotlib loo pit plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
(figsize, _, _, xt_labelsize, linewidth, _) = _scale_fig_size(figsize, textsize, 1, 1)
plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, "plot")
plot_kwargs["color"] = to_hex(color)
plot_kwargs.setdefault("linewidth", linewidth * 1.4)
if isinstance(y, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y)
elif isinstance(y, DataArray):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y.name)
elif isinstance(y_hat, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat)
elif isinstance(y_hat, DataArray):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat.name)
else:
label = "LOO-PIT ECDF" if ecdf else "LOO-PIT"
plot_kwargs.setdefault("label", label)
plot_kwargs.setdefault("zorder", 5)
plot_unif_kwargs = matplotlib_kwarg_dealiaser(plot_unif_kwargs, "plot")
light_color = rgb_to_hsv(to_rgb(plot_kwargs.get("color")))
light_color[1] /= 2 # pylint: disable=unsupported-assignment-operation
light_color[2] += (1 - light_color[2]) / 2 # pylint: disable=unsupported-assignment-operation
plot_unif_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
plot_unif_kwargs.setdefault("alpha", 0.5)
plot_unif_kwargs.setdefault("linewidth", 0.6 * linewidth)
if ecdf:
n_data_points = loo_pit.size
plot_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
plot_unif_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
if ecdf_fill:
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
fill_kwargs.setdefault("alpha", 0.5)
fill_kwargs.setdefault(
"step", "mid" if plot_kwargs["drawstyle"] == "steps-mid" else None
)
fill_kwargs.setdefault("label", "{:.3g}% credible interval".format(credible_interval))
elif use_hdi:
if hdi_kwargs is None:
hdi_kwargs = {}
hdi_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
hdi_kwargs.setdefault("alpha", 0.35)
hdi_kwargs.setdefault("label", "Uniform HDI")
if ax is None:
_, ax = plt.subplots(1, 1, figsize=figsize, **backend_kwargs)
if ecdf:
ax.plot(
np.hstack((0, loo_pit, 1)), np.hstack((0, loo_pit - loo_pit_ecdf, 0)), **plot_kwargs
)
if ecdf_fill:
ax.fill_between(unif_ecdf, p975 - unif_ecdf, p025 - unif_ecdf, **fill_kwargs)
else:
ax.plot(unif_ecdf, p975 - unif_ecdf, unif_ecdf, p025 - unif_ecdf, **plot_unif_kwargs)
else:
x_ss = np.empty((n_unif, len(loo_pit_kde)))
u_dens = np.empty((n_unif, len(loo_pit_kde)))
if use_hdi:
ax.axhspan(*hdi_odds, **hdi_kwargs)
else:
for idx in range(n_unif):
unif_density, xmin, xmax = _fast_kde(unif[idx, :])
x_s = np.linspace(xmin, xmax, len(unif_density))
x_ss[idx] = x_s
u_dens[idx] = unif_density
ax.plot(x_ss.T, u_dens.T, **plot_unif_kwargs)
ax.plot(x_vals, loo_pit_kde, **plot_kwargs)
ax.set_xlim(0, 1)
ax.set_ylim(0, None)
ax.tick_params(labelsize=xt_labelsize)
if legend:
if not (use_hdi or (ecdf and ecdf_fill)):
label = "{:.3g}% credible interval".format(credible_interval) if ecdf else "Uniform"
ax.plot([], label=label, **plot_unif_kwargs)
ax.legend()
if backend_show(show):
plt.show()
return ax
| def plot_loo_pit(
ax,
figsize,
ecdf,
loo_pit,
loo_pit_ecdf,
unif_ecdf,
p975,
p025,
fill_kwargs,
ecdf_fill,
use_hdi,
x_vals,
hdi_kwargs,
hdi_odds,
n_unif,
unif,
plot_unif_kwargs,
loo_pit_kde,
legend,
y_hat,
y,
color,
textsize,
credible_interval,
plot_kwargs,
backend_kwargs,
show,
):
"""Matplotlib loo pit plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
(figsize, _, _, xt_labelsize, linewidth, _) = _scale_fig_size(figsize, textsize, 1, 1)
plot_kwargs = matplotlib_kwarg_dealiaser(plot_kwargs, "plot")
plot_kwargs["color"] = to_hex(color)
plot_kwargs.setdefault("linewidth", linewidth * 1.4)
if isinstance(y, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y)
elif isinstance(y, DataArray) and y.name is not None:
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y.name)
elif isinstance(y_hat, str):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat)
elif isinstance(y_hat, DataArray):
label = ("{} LOO-PIT ECDF" if ecdf else "{} LOO-PIT").format(y_hat.name)
else:
label = "LOO-PIT ECDF" if ecdf else "LOO-PIT"
plot_kwargs.setdefault("label", label)
plot_kwargs.setdefault("zorder", 5)
plot_unif_kwargs = matplotlib_kwarg_dealiaser(plot_unif_kwargs, "plot")
light_color = rgb_to_hsv(to_rgb(plot_kwargs.get("color")))
light_color[1] /= 2 # pylint: disable=unsupported-assignment-operation
light_color[2] += (1 - light_color[2]) / 2 # pylint: disable=unsupported-assignment-operation
plot_unif_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
plot_unif_kwargs.setdefault("alpha", 0.5)
plot_unif_kwargs.setdefault("linewidth", 0.6 * linewidth)
if ecdf:
n_data_points = loo_pit.size
plot_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
plot_unif_kwargs.setdefault("drawstyle", "steps-mid" if n_data_points < 100 else "default")
if ecdf_fill:
if fill_kwargs is None:
fill_kwargs = {}
fill_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
fill_kwargs.setdefault("alpha", 0.5)
fill_kwargs.setdefault(
"step", "mid" if plot_kwargs["drawstyle"] == "steps-mid" else None
)
fill_kwargs.setdefault("label", "{:.3g}% credible interval".format(credible_interval))
elif use_hdi:
if hdi_kwargs is None:
hdi_kwargs = {}
hdi_kwargs.setdefault("color", to_hex(hsv_to_rgb(light_color)))
hdi_kwargs.setdefault("alpha", 0.35)
hdi_kwargs.setdefault("label", "Uniform HDI")
if ax is None:
_, ax = plt.subplots(1, 1, figsize=figsize, **backend_kwargs)
if ecdf:
ax.plot(
np.hstack((0, loo_pit, 1)), np.hstack((0, loo_pit - loo_pit_ecdf, 0)), **plot_kwargs
)
if ecdf_fill:
ax.fill_between(unif_ecdf, p975 - unif_ecdf, p025 - unif_ecdf, **fill_kwargs)
else:
ax.plot(unif_ecdf, p975 - unif_ecdf, unif_ecdf, p025 - unif_ecdf, **plot_unif_kwargs)
else:
x_ss = np.empty((n_unif, len(loo_pit_kde)))
u_dens = np.empty((n_unif, len(loo_pit_kde)))
if use_hdi:
ax.axhspan(*hdi_odds, **hdi_kwargs)
else:
for idx in range(n_unif):
unif_density, xmin, xmax = _fast_kde(unif[idx, :])
x_s = np.linspace(xmin, xmax, len(unif_density))
x_ss[idx] = x_s
u_dens[idx] = unif_density
ax.plot(x_ss.T, u_dens.T, **plot_unif_kwargs)
ax.plot(x_vals, loo_pit_kde, **plot_kwargs)
ax.set_xlim(0, 1)
ax.set_ylim(0, None)
ax.tick_params(labelsize=xt_labelsize)
if legend:
if not (use_hdi or (ecdf and ecdf_fill)):
label = "{:.3g}% credible interval".format(credible_interval) if ecdf else "Uniform"
ax.plot([], label=label, **plot_unif_kwargs)
ax.legend()
if backend_show(show):
plt.show()
return ax
|
24,076 | def getTextInfoSpeech( # noqa: C901
info: textInfos.TextInfo,
useCache: Union[bool, SpeakTextInfoState] = True,
formatConfig: Dict[str, bool] = None,
unit: Optional[str] = None,
reason: OutputReason = OutputReason.QUERY,
_prefixSpeechCommand: Optional[SpeechCommand] = None,
onlyInitialFields: bool = False,
suppressBlanks: bool = False
) -> Generator[SpeechSequence, None, bool]:
onlyCache = reason == OutputReason.ONLYCACHE
if isinstance(useCache,SpeakTextInfoState):
speakTextInfoState=useCache
elif useCache:
speakTextInfoState=SpeakTextInfoState(info.obj)
else:
speakTextInfoState=None
autoLanguageSwitching=config.conf['speech']['autoLanguageSwitching']
extraDetail=unit in (textInfos.UNIT_CHARACTER,textInfos.UNIT_WORD)
if not formatConfig:
formatConfig=config.conf["documentFormatting"]
formatConfig=formatConfig.copy()
if extraDetail:
formatConfig['extraDetail']=True
reportIndentation=unit==textInfos.UNIT_LINE and ( formatConfig["reportLineIndentation"] or formatConfig["reportLineIndentationWithTones"])
# For performance reasons, when navigating by paragraph or table cell, spelling errors will not be announced.
if unit in (textInfos.UNIT_PARAGRAPH, textInfos.UNIT_CELL) and reason == OutputReason.CARET:
formatConfig['reportSpellingErrors']=False
#Fetch the last controlFieldStack, or make a blank one
controlFieldStackCache=speakTextInfoState.controlFieldStackCache if speakTextInfoState else []
formatFieldAttributesCache=speakTextInfoState.formatFieldAttributesCache if speakTextInfoState else {}
textWithFields=info.getTextWithFields(formatConfig)
# We don't care about node bounds, especially when comparing fields.
# Remove them.
for command in textWithFields:
if not isinstance(command,textInfos.FieldCommand):
continue
field=command.field
if not field:
continue
try:
del field["_startOfNode"]
except KeyError:
pass
try:
del field["_endOfNode"]
except KeyError:
pass
#Make a new controlFieldStack and formatField from the textInfo's initialFields
newControlFieldStack=[]
newFormatField=textInfos.FormatField()
initialFields=[]
for field in textWithFields:
if isinstance(field,textInfos.FieldCommand) and field.command in ("controlStart","formatChange"):
initialFields.append(field.field)
else:
break
if len(initialFields)>0:
del textWithFields[0:len(initialFields)]
endFieldCount=0
for field in reversed(textWithFields):
if isinstance(field,textInfos.FieldCommand) and field.command=="controlEnd":
endFieldCount+=1
else:
break
if endFieldCount>0:
del textWithFields[0-endFieldCount:]
for field in initialFields:
if isinstance(field,textInfos.ControlField):
newControlFieldStack.append(field)
elif isinstance(field,textInfos.FormatField):
newFormatField.update(field)
else:
raise ValueError("unknown field: %s"%field)
#Calculate how many fields in the old and new controlFieldStacks are the same
commonFieldCount=0
for count in range(min(len(newControlFieldStack),len(controlFieldStackCache))):
# #2199: When comparing controlFields try using uniqueID if it exists before resorting to compairing the entire dictionary
oldUniqueID=controlFieldStackCache[count].get('uniqueID')
newUniqueID=newControlFieldStack[count].get('uniqueID')
if ((oldUniqueID is not None or newUniqueID is not None) and newUniqueID==oldUniqueID) or (newControlFieldStack[count]==controlFieldStackCache[count]):
commonFieldCount+=1
else:
break
speechSequence: SpeechSequence = []
# #2591: Only if the reason is not focus, Speak the exit of any controlFields not in the new stack.
# We don't do this for focus because hearing "out of list", etc. isn't useful when tabbing or using quick navigation and makes navigation less efficient.
if reason not in [OutputReason.FOCUS, OutputReason.QUICKNAV]:
endingBlock=False
for count in reversed(range(commonFieldCount,len(controlFieldStackCache))):
fieldSequence = info.getControlFieldSpeech(
controlFieldStackCache[count],
controlFieldStackCache[0:count],
"end_removedFromControlFieldStack",
formatConfig,
extraDetail,
reason=reason
)
if fieldSequence:
speechSequence.extend(fieldSequence)
if not endingBlock and reason == OutputReason.SAYALL:
endingBlock=bool(int(controlFieldStackCache[count].get('isBlock',0)))
if endingBlock:
speechSequence.append(EndUtteranceCommand())
# The TextInfo should be considered blank if we are only exiting fields (i.e. we aren't
# entering any new fields and there is no text).
shouldConsiderTextInfoBlank = True
if _prefixSpeechCommand is not None:
assert isinstance(_prefixSpeechCommand, SpeechCommand)
speechSequence.append(_prefixSpeechCommand)
#Get speech text for any fields that are in both controlFieldStacks, if extra detail is not requested
if not extraDetail:
for count in range(commonFieldCount):
field=newControlFieldStack[count]
fieldSequence = info.getControlFieldSpeech(
field,
newControlFieldStack[0:count],
"start_inControlFieldStack",
formatConfig,
extraDetail,
reason=reason
)
if fieldSequence:
speechSequence.extend(fieldSequence)
shouldConsiderTextInfoBlank = False
if field.get("role")==controlTypes.Role.MATH:
shouldConsiderTextInfoBlank = False
_extendSpeechSequence_addMathForTextInfo(speechSequence, info, field)
# When true, we are inside a clickable field, and should therefore not announce any more new clickable fields
inClickable=False
#Get speech text for any fields in the new controlFieldStack that are not in the old controlFieldStack
for count in range(commonFieldCount,len(newControlFieldStack)):
field=newControlFieldStack[count]
if not inClickable and formatConfig['reportClickable']:
states=field.get('states')
if states and controlTypes.State.CLICKABLE in states:
# We entered the most outer clickable, so announce it, if we won't be announcing anything else interesting for this field
presCat=field.getPresentationCategory(newControlFieldStack[0:count],formatConfig,reason)
if not presCat or presCat is field.PRESCAT_LAYOUT:
speechSequence.append(controlTypes.State.CLICKABLE.displayString)
shouldConsiderTextInfoBlank = False
inClickable=True
fieldSequence = info.getControlFieldSpeech(
field,
newControlFieldStack[0:count],
"start_addedToControlFieldStack",
formatConfig,
extraDetail,
reason=reason
)
if fieldSequence:
speechSequence.extend(fieldSequence)
shouldConsiderTextInfoBlank = False
if field.get("role")==controlTypes.Role.MATH:
shouldConsiderTextInfoBlank = False
_extendSpeechSequence_addMathForTextInfo(speechSequence, info, field)
commonFieldCount+=1
#Fetch the text for format field attributes that have changed between what was previously cached, and this textInfo's initialFormatField.
fieldSequence = info.getFormatFieldSpeech(
newFormatField,
formatFieldAttributesCache,
formatConfig,
reason=reason,
unit=unit,
extraDetail=extraDetail,
initialFormat=True
)
if fieldSequence:
speechSequence.extend(fieldSequence)
language = None
if autoLanguageSwitching:
language=newFormatField.get('language')
speechSequence.append(LangChangeCommand(language))
lastLanguage=language
def isControlEndFieldCommand(x):
return isinstance(x, textInfos.FieldCommand) and x.command == "controlEnd"
isWordOrCharUnit = unit in (textInfos.UNIT_CHARACTER, textInfos.UNIT_WORD)
if onlyInitialFields or (
isWordOrCharUnit
and len(textWithFields) > 0
and len(textWithFields[0].strip() if not textWithFields[0].isspace() else textWithFields[0]) == 1
and all(isControlEndFieldCommand(x) for x in itertools.islice(textWithFields, 1, None))
):
if not onlyCache:
if onlyInitialFields or any(isinstance(x, str) for x in speechSequence):
yield speechSequence
if not onlyInitialFields:
spellingSequence = list(getSpellingSpeech(
textWithFields[0],
locale=language
))
logBadSequenceTypes(spellingSequence)
yield spellingSequence
if useCache:
speakTextInfoState.controlFieldStackCache=newControlFieldStack
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
return False
# Similar to before, but If the most inner clickable is exited, then we allow announcing clickable for the next lot of clickable fields entered.
inClickable=False
#Move through the field commands, getting speech text for all controlStarts, controlEnds and formatChange commands
#But also keep newControlFieldStack up to date as we will need it for the ends
# Add any text to a separate list, as it must be handled differently.
#Also make sure that LangChangeCommand objects are added before any controlField or formatField speech
relativeSpeechSequence=[]
inTextChunk=False
allIndentation=""
indentationDone=False
for command in textWithFields:
if isinstance(command,str):
# Text should break a run of clickables
inClickable=False
if reportIndentation and not indentationDone:
indentation,command=splitTextIndentation(command)
# Combine all indentation into one string for later processing.
allIndentation+=indentation
if command:
# There was content after the indentation, so there is no more indentation.
indentationDone=True
if command:
if inTextChunk:
relativeSpeechSequence[-1]+=command
else:
relativeSpeechSequence.append(command)
inTextChunk=True
elif isinstance(command,textInfos.FieldCommand):
newLanguage=None
if command.command=="controlStart":
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldSequence = []
if not inClickable and formatConfig['reportClickable']:
states=command.field.get('states')
if states and controlTypes.State.CLICKABLE in states:
# We have entered an outer most clickable or entered a new clickable after exiting a previous one
# Announce it if there is nothing else interesting about the field, but not if the user turned it off.
presCat=command.field.getPresentationCategory(newControlFieldStack[0:],formatConfig,reason)
if not presCat or presCat is command.field.PRESCAT_LAYOUT:
fieldSequence.append(controlTypes.State.CLICKABLE.displayString)
inClickable=True
fieldSequence.extend(info.getControlFieldSpeech(
command.field,
newControlFieldStack,
"start_relative",
formatConfig,
extraDetail,
reason=reason
))
newControlFieldStack.append(command.field)
elif command.command=="controlEnd":
# Exiting a controlField should break a run of clickables
inClickable=False
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldSequence = info.getControlFieldSpeech(
newControlFieldStack[-1],
newControlFieldStack[0:-1],
"end_relative",
formatConfig,
extraDetail,
reason=reason
)
del newControlFieldStack[-1]
if commonFieldCount>len(newControlFieldStack):
commonFieldCount=len(newControlFieldStack)
elif command.command=="formatChange":
fieldSequence = info.getFormatFieldSpeech(
command.field,
formatFieldAttributesCache,
formatConfig,
reason=reason,
unit=unit,
extraDetail=extraDetail
)
if fieldSequence:
inTextChunk=False
if autoLanguageSwitching:
newLanguage=command.field.get('language')
if lastLanguage!=newLanguage:
# The language has changed, so this starts a new text chunk.
inTextChunk=False
if not inTextChunk:
if fieldSequence:
if autoLanguageSwitching and lastLanguage is not None:
# Fields must be spoken in the default language.
relativeSpeechSequence.append(LangChangeCommand(None))
lastLanguage=None
relativeSpeechSequence.extend(fieldSequence)
if command.command=="controlStart" and command.field.get("role")==controlTypes.Role.MATH:
_extendSpeechSequence_addMathForTextInfo(relativeSpeechSequence, info, command.field)
if autoLanguageSwitching and newLanguage!=lastLanguage:
relativeSpeechSequence.append(LangChangeCommand(newLanguage))
lastLanguage=newLanguage
if reportIndentation and speakTextInfoState and allIndentation!=speakTextInfoState.indentationCache:
indentationSpeech=getIndentationSpeech(allIndentation, formatConfig)
if autoLanguageSwitching and speechSequence[-1].lang is not None:
# Indentation must be spoken in the default language,
# but the initial format field specified a different language.
# Insert the indentation before the LangChangeCommand.
langChange = speechSequence.pop()
speechSequence.extend(indentationSpeech)
speechSequence.append(langChange)
else:
speechSequence.extend(indentationSpeech)
if speakTextInfoState: speakTextInfoState.indentationCache=allIndentation
# Don't add this text if it is blank.
relativeBlank=True
for x in relativeSpeechSequence:
if isinstance(x,str) and not isBlank(x):
relativeBlank=False
break
if not relativeBlank:
speechSequence.extend(relativeSpeechSequence)
shouldConsiderTextInfoBlank = False
#Finally get speech text for any fields left in new controlFieldStack that are common with the old controlFieldStack (for closing), if extra detail is not requested
if autoLanguageSwitching and lastLanguage is not None:
speechSequence.append(
LangChangeCommand(None)
)
lastLanguage=None
if not extraDetail:
for count in reversed(range(min(len(newControlFieldStack),commonFieldCount))):
fieldSequence = info.getControlFieldSpeech(
newControlFieldStack[count],
newControlFieldStack[0:count],
"end_inControlFieldStack",
formatConfig,
extraDetail,
reason=reason
)
if fieldSequence:
speechSequence.extend(fieldSequence)
shouldConsiderTextInfoBlank = False
# If there is nothing that should cause the TextInfo to be considered
# non-blank, blank should be reported, unless we are doing a say all.
if not suppressBlanks and reason != OutputReason.SAYALL and shouldConsiderTextInfoBlank:
# Translators: This is spoken when the line is considered blank.
speechSequence.append(_("blank"))
#Cache a copy of the new controlFieldStack for future use
if useCache:
speakTextInfoState.controlFieldStackCache=list(newControlFieldStack)
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
if reason == OutputReason.ONLYCACHE or not speechSequence:
return False
yield speechSequence
return True
| def cancelDelayedPhoneticDescriptionIfPending() -> None:
inClickable=False
#Get speech text for any fields in the new controlFieldStack that are not in the old controlFieldStack
for count in range(commonFieldCount,len(newControlFieldStack)):
field=newControlFieldStack[count]
if not inClickable and formatConfig['reportClickable']:
states=field.get('states')
if states and controlTypes.State.CLICKABLE in states:
# We entered the most outer clickable, so announce it, if we won't be announcing anything else interesting for this field
presCat=field.getPresentationCategory(newControlFieldStack[0:count],formatConfig,reason)
if not presCat or presCat is field.PRESCAT_LAYOUT:
speechSequence.append(controlTypes.State.CLICKABLE.displayString)
shouldConsiderTextInfoBlank = False
inClickable=True
fieldSequence = info.getControlFieldSpeech(
field,
newControlFieldStack[0:count],
"start_addedToControlFieldStack",
formatConfig,
extraDetail,
reason=reason
)
if fieldSequence:
speechSequence.extend(fieldSequence)
shouldConsiderTextInfoBlank = False
if field.get("role")==controlTypes.Role.MATH:
shouldConsiderTextInfoBlank = False
_extendSpeechSequence_addMathForTextInfo(speechSequence, info, field)
commonFieldCount+=1
#Fetch the text for format field attributes that have changed between what was previously cached, and this textInfo's initialFormatField.
fieldSequence = info.getFormatFieldSpeech(
newFormatField,
formatFieldAttributesCache,
formatConfig,
reason=reason,
unit=unit,
extraDetail=extraDetail,
initialFormat=True
)
if fieldSequence:
speechSequence.extend(fieldSequence)
language = None
if autoLanguageSwitching:
language=newFormatField.get('language')
speechSequence.append(LangChangeCommand(language))
lastLanguage=language
def isControlEndFieldCommand(x):
return isinstance(x, textInfos.FieldCommand) and x.command == "controlEnd"
isWordOrCharUnit = unit in (textInfos.UNIT_CHARACTER, textInfos.UNIT_WORD)
if onlyInitialFields or (
isWordOrCharUnit
and len(textWithFields) > 0
and len(textWithFields[0].strip() if not textWithFields[0].isspace() else textWithFields[0]) == 1
and all(isControlEndFieldCommand(x) for x in itertools.islice(textWithFields, 1, None))
):
if not onlyCache:
if onlyInitialFields or any(isinstance(x, str) for x in speechSequence):
yield speechSequence
if not onlyInitialFields:
spellingSequence = list(getSpellingSpeech(
textWithFields[0],
locale=language
))
logBadSequenceTypes(spellingSequence)
yield spellingSequence
if useCache:
speakTextInfoState.controlFieldStackCache=newControlFieldStack
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
return False
# Similar to before, but If the most inner clickable is exited, then we allow announcing clickable for the next lot of clickable fields entered.
inClickable=False
#Move through the field commands, getting speech text for all controlStarts, controlEnds and formatChange commands
#But also keep newControlFieldStack up to date as we will need it for the ends
# Add any text to a separate list, as it must be handled differently.
#Also make sure that LangChangeCommand objects are added before any controlField or formatField speech
relativeSpeechSequence=[]
inTextChunk=False
allIndentation=""
indentationDone=False
for command in textWithFields:
if isinstance(command,str):
# Text should break a run of clickables
inClickable=False
if reportIndentation and not indentationDone:
indentation,command=splitTextIndentation(command)
# Combine all indentation into one string for later processing.
allIndentation+=indentation
if command:
# There was content after the indentation, so there is no more indentation.
indentationDone=True
if command:
if inTextChunk:
relativeSpeechSequence[-1]+=command
else:
relativeSpeechSequence.append(command)
inTextChunk=True
elif isinstance(command,textInfos.FieldCommand):
newLanguage=None
if command.command=="controlStart":
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldSequence = []
if not inClickable and formatConfig['reportClickable']:
states=command.field.get('states')
if states and controlTypes.State.CLICKABLE in states:
# We have entered an outer most clickable or entered a new clickable after exiting a previous one
# Announce it if there is nothing else interesting about the field, but not if the user turned it off.
presCat=command.field.getPresentationCategory(newControlFieldStack[0:],formatConfig,reason)
if not presCat or presCat is command.field.PRESCAT_LAYOUT:
fieldSequence.append(controlTypes.State.CLICKABLE.displayString)
inClickable=True
fieldSequence.extend(info.getControlFieldSpeech(
command.field,
newControlFieldStack,
"start_relative",
formatConfig,
extraDetail,
reason=reason
))
newControlFieldStack.append(command.field)
elif command.command=="controlEnd":
# Exiting a controlField should break a run of clickables
inClickable=False
# Control fields always start a new chunk, even if they have no field text.
inTextChunk=False
fieldSequence = info.getControlFieldSpeech(
newControlFieldStack[-1],
newControlFieldStack[0:-1],
"end_relative",
formatConfig,
extraDetail,
reason=reason
)
del newControlFieldStack[-1]
if commonFieldCount>len(newControlFieldStack):
commonFieldCount=len(newControlFieldStack)
elif command.command=="formatChange":
fieldSequence = info.getFormatFieldSpeech(
command.field,
formatFieldAttributesCache,
formatConfig,
reason=reason,
unit=unit,
extraDetail=extraDetail
)
if fieldSequence:
inTextChunk=False
if autoLanguageSwitching:
newLanguage=command.field.get('language')
if lastLanguage!=newLanguage:
# The language has changed, so this starts a new text chunk.
inTextChunk=False
if not inTextChunk:
if fieldSequence:
if autoLanguageSwitching and lastLanguage is not None:
# Fields must be spoken in the default language.
relativeSpeechSequence.append(LangChangeCommand(None))
lastLanguage=None
relativeSpeechSequence.extend(fieldSequence)
if command.command=="controlStart" and command.field.get("role")==controlTypes.Role.MATH:
_extendSpeechSequence_addMathForTextInfo(relativeSpeechSequence, info, command.field)
if autoLanguageSwitching and newLanguage!=lastLanguage:
relativeSpeechSequence.append(LangChangeCommand(newLanguage))
lastLanguage=newLanguage
if reportIndentation and speakTextInfoState and allIndentation!=speakTextInfoState.indentationCache:
indentationSpeech=getIndentationSpeech(allIndentation, formatConfig)
if autoLanguageSwitching and speechSequence[-1].lang is not None:
# Indentation must be spoken in the default language,
# but the initial format field specified a different language.
# Insert the indentation before the LangChangeCommand.
langChange = speechSequence.pop()
speechSequence.extend(indentationSpeech)
speechSequence.append(langChange)
else:
speechSequence.extend(indentationSpeech)
if speakTextInfoState: speakTextInfoState.indentationCache=allIndentation
# Don't add this text if it is blank.
relativeBlank=True
for x in relativeSpeechSequence:
if isinstance(x,str) and not isBlank(x):
relativeBlank=False
break
if not relativeBlank:
speechSequence.extend(relativeSpeechSequence)
shouldConsiderTextInfoBlank = False
#Finally get speech text for any fields left in new controlFieldStack that are common with the old controlFieldStack (for closing), if extra detail is not requested
if autoLanguageSwitching and lastLanguage is not None:
speechSequence.append(
LangChangeCommand(None)
)
lastLanguage=None
if not extraDetail:
for count in reversed(range(min(len(newControlFieldStack),commonFieldCount))):
fieldSequence = info.getControlFieldSpeech(
newControlFieldStack[count],
newControlFieldStack[0:count],
"end_inControlFieldStack",
formatConfig,
extraDetail,
reason=reason
)
if fieldSequence:
speechSequence.extend(fieldSequence)
shouldConsiderTextInfoBlank = False
# If there is nothing that should cause the TextInfo to be considered
# non-blank, blank should be reported, unless we are doing a say all.
if not suppressBlanks and reason != OutputReason.SAYALL and shouldConsiderTextInfoBlank:
# Translators: This is spoken when the line is considered blank.
speechSequence.append(_("blank"))
#Cache a copy of the new controlFieldStack for future use
if useCache:
speakTextInfoState.controlFieldStackCache=list(newControlFieldStack)
speakTextInfoState.formatFieldAttributesCache=formatFieldAttributesCache
if not isinstance(useCache,SpeakTextInfoState):
speakTextInfoState.updateObj()
if reason == OutputReason.ONLYCACHE or not speechSequence:
return False
yield speechSequence
return True
|
55,851 | def test_load_duplicate_keys_top() -> None:
from yaml.constructor import ConstructorError
try:
with tempfile.NamedTemporaryFile(delete=False) as fp:
fp.write("a:\n b: 1\na:\n b: 2\n".encode("utf-8"))
with pytest.raises(ConstructorError):
OmegaConf.load(fp.name)
finally:
os.unlink(fp.name)
| def test_load_duplicate_keys_top() -> None:
from yaml.constructor import ConstructorError
try:
with tempfile.NamedTemporaryFile(delete=False) as fp:
content = """
a:
b: 1
a:
b: 2
"""
fp.write(content.encode("utf-8"))
with pytest.raises(ConstructorError):
OmegaConf.load(fp.name)
finally:
os.unlink(fp.name)
|
44,161 | def pattern_matching(circuit_dag, pattern_dag):
r"""Function that applies the pattern matching algorithm and returns the list of maximal matches.
Args:
circuit_dag (.CommutationDAG): A commutation DAG representing the circuit to be optimized.
pattern_dag(.CommutationDAG): A commutation DAG representing the pattern.
Returns:
list(Match): the list of maximal matches.
**Example**
First let's consider the following circuit
.. code-block:: python
def circuit():
qml.S(wires=0)
qml.PauliZ(wires=0)
qml.S(wires=1)
qml.CZ(wires=[0, 1])
qml.S(wires=1)
qml.S(wires=2)
qml.CZ(wires=[1, 2])
qml.S(wires=2)
return qml.expval(qml.PauliX(wires=0))
where we want to find all maximal matches of a pattern containing a sequence of two ``pennylane.S`` gates and
a ``pennylane.PauliZ`` gate:
.. code-block:: python
with qml.tape.QuantumTape() as pattern:
qml.S(wires=0)
qml.S(wires=0)
qml.PauliZ(wires=0)
>>> circuit_dag = qml.commutation_dag(circuit)()
>>> pattern_dag = qml.commutation_dag(pattern)()
>>> all_max_matches = qml.pattern_matching(circuit_dag, pattern_dag)
It is possible to access the matches by looping through the list. The first integers indices represent the gates
in the pattern and the second intergers the gates in the circuit (by order of appearance).
>>> for match_conf in all_max_matches:
... print(match_conf.match)
[[0, 0], [2, 1]]
[[0, 2], [1, 4]]
[[0, 4], [1, 2]]
[[0, 5], [1, 7]]
[[0, 7], [1, 5]]
**Reference:**
[1] Iten, R., Moyard, R., Metger, T., Sutter, D. and Woerner, S., 2022.
Exact and practical pattern matching for quantum circuit optimization.
`doi.org/10.1145/3498325 <https://dl.acm.org/doi/abs/10.1145/3498325>`_
"""
# Match list
match_list = []
# Loop through all possible initial matches
for node_c, node_p in itertools.product(circuit_dag.get_nodes(), pattern_dag.get_nodes()):
# Initial matches between two identical gates (No qubits comparison)
if _compare_operation_without_qubits(node_c[1], node_p[1]):
# Fix qubits from the first (target fixed and control restrained)
not_fixed_qubits_confs = _not_fixed_qubits(
circuit_dag.num_wires, node_c[1].wires, pattern_dag.num_wires - len(node_p[1].wires)
)
# Loop over all possible qubits configurations given the first match constrains
for not_fixed_qubits_conf in not_fixed_qubits_confs:
for not_fixed_qubits_conf_permuted in itertools.permutations(not_fixed_qubits_conf):
for first_match_qubits_conf in _first_match_qubits(
node_c[1], node_p[1], pattern_dag.num_wires
):
# Qubits mapping between circuit and pattern
qubits_conf = _merge_first_match_and_permutation(
first_match_qubits_conf, not_fixed_qubits_conf_permuted
)
# Update wires, target_wires, control_wires
wires, target_wires, control_wires = _update_qubits(
circuit_dag, qubits_conf
)
# Forward match part of the algorithm
forward = ForwardMatch(
circuit_dag,
pattern_dag,
node_c[0],
node_p[0],
wires,
target_wires,
control_wires,
)
forward.run_forward_match()
# Backward match part of the algorithm
backward = BackwardMatch(
circuit_dag,
pattern_dag,
qubits_conf,
forward.match,
forward.circuit_matched_with,
forward.circuit_blocked,
forward.pattern_matched_with,
node_c[0],
node_p[0],
wires,
control_wires,
target_wires,
)
backward.run_backward_match()
_add_match(match_list, backward.match_final)
match_list.sort(key=lambda x: len(x.match), reverse=True)
# Extract maximal matches and optimizes the circuit for compatible maximal matches
if match_list:
maximal = MaximalMatches(match_list)
maximal.run_maximal_matches()
max_matches = maximal.max_match_list
return max_matches
return match_list
| def pattern_matching(circuit_dag, pattern_dag):
r"""Function that applies the pattern matching algorithm and returns the list of maximal matches.
Args:
circuit_dag (.CommutationDAG): A commutation DAG representing the circuit to be optimized.
pattern_dag(.CommutationDAG): A commutation DAG representing the pattern.
Returns:
list(Match): the list of maximal matches.
**Example**
First let's consider the following circuit
.. code-block:: python
def circuit():
qml.S(wires=0)
qml.PauliZ(wires=0)
qml.S(wires=1)
qml.CZ(wires=[0, 1])
qml.S(wires=1)
qml.S(wires=2)
qml.CZ(wires=[1, 2])
qml.S(wires=2)
return qml.expval(qml.PauliX(wires=0))
where we want to find all maximal matches of a pattern containing a sequence of two ``pennylane.S`` gates and
a ``pennylane.PauliZ`` gate:
.. code-block:: python
def pattern():
qml.S(wires=0)
qml.S(wires=0)
qml.PauliZ(wires=0)
>>> circuit_dag = qml.commutation_dag(circuit)()
>>> pattern_dag = qml.commutation_dag(pattern)()
>>> all_max_matches = qml.pattern_matching(circuit_dag, pattern_dag)
It is possible to access the matches by looping through the list. The first integers indices represent the gates
in the pattern and the second intergers the gates in the circuit (by order of appearance).
>>> for match_conf in all_max_matches:
... print(match_conf.match)
[[0, 0], [2, 1]]
[[0, 2], [1, 4]]
[[0, 4], [1, 2]]
[[0, 5], [1, 7]]
[[0, 7], [1, 5]]
**Reference:**
[1] Iten, R., Moyard, R., Metger, T., Sutter, D. and Woerner, S., 2022.
Exact and practical pattern matching for quantum circuit optimization.
`doi.org/10.1145/3498325 <https://dl.acm.org/doi/abs/10.1145/3498325>`_
"""
# Match list
match_list = []
# Loop through all possible initial matches
for node_c, node_p in itertools.product(circuit_dag.get_nodes(), pattern_dag.get_nodes()):
# Initial matches between two identical gates (No qubits comparison)
if _compare_operation_without_qubits(node_c[1], node_p[1]):
# Fix qubits from the first (target fixed and control restrained)
not_fixed_qubits_confs = _not_fixed_qubits(
circuit_dag.num_wires, node_c[1].wires, pattern_dag.num_wires - len(node_p[1].wires)
)
# Loop over all possible qubits configurations given the first match constrains
for not_fixed_qubits_conf in not_fixed_qubits_confs:
for not_fixed_qubits_conf_permuted in itertools.permutations(not_fixed_qubits_conf):
for first_match_qubits_conf in _first_match_qubits(
node_c[1], node_p[1], pattern_dag.num_wires
):
# Qubits mapping between circuit and pattern
qubits_conf = _merge_first_match_and_permutation(
first_match_qubits_conf, not_fixed_qubits_conf_permuted
)
# Update wires, target_wires, control_wires
wires, target_wires, control_wires = _update_qubits(
circuit_dag, qubits_conf
)
# Forward match part of the algorithm
forward = ForwardMatch(
circuit_dag,
pattern_dag,
node_c[0],
node_p[0],
wires,
target_wires,
control_wires,
)
forward.run_forward_match()
# Backward match part of the algorithm
backward = BackwardMatch(
circuit_dag,
pattern_dag,
qubits_conf,
forward.match,
forward.circuit_matched_with,
forward.circuit_blocked,
forward.pattern_matched_with,
node_c[0],
node_p[0],
wires,
control_wires,
target_wires,
)
backward.run_backward_match()
_add_match(match_list, backward.match_final)
match_list.sort(key=lambda x: len(x.match), reverse=True)
# Extract maximal matches and optimizes the circuit for compatible maximal matches
if match_list:
maximal = MaximalMatches(match_list)
maximal.run_maximal_matches()
max_matches = maximal.max_match_list
return max_matches
return match_list
|
57,933 | def get_user_login_profile(args, aws_client):
client = aws_client.aws_session(
service=SERVICE,
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
user_name = args.get('userName')
kwargs = {
'UserName': user_name
}
response = client.get_login_profile(**kwargs)
user_profile = response['LoginProfile']
data = ({
'UserName': user_profile.get('UserName', None),
'LoginProfile': {
'CreateDate': user_profile.get('CreateDate', None),
'PasswordResetRequired': user_profile.get('PasswordResetRequired', None)
}
})
ec = {'AWS.IAM.Users(val.UserName && val.UserName === obj.UserName)': data}
human_readable = tableToMarkdown('AWS IAM Login Profile for user {}'.format(user_name),
t=data.get('LoginProfile'),
headers=['CreateDate', 'PasswordResetRequired'],
removeNull=True,
headerTransform=pascalToSpace)
return_outputs(human_readable, ec)
| def get_user_login_profile(args, aws_client):
client = aws_client.aws_session(
service=SERVICE,
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
user_name = args.get('userName')
kwargs = {
'UserName': user_name
}
response = client.get_login_profile(**kwargs)
user_profile = response['LoginProfile']
data = {
'UserName': user_profile.get('UserName'),
'LoginProfile': {
'CreateDate': user_profile.get('CreateDate'),
'PasswordResetRequired': user_profile.get('PasswordResetRequired')
}
}
ec = {'AWS.IAM.Users(val.UserName && val.UserName === obj.UserName)': data}
human_readable = tableToMarkdown('AWS IAM Login Profile for user {}'.format(user_name),
t=data.get('LoginProfile'),
headers=['CreateDate', 'PasswordResetRequired'],
removeNull=True,
headerTransform=pascalToSpace)
return_outputs(human_readable, ec)
|
54,103 | def aggregate_to_substations(n, config, aggregation_strategies=dict(), buses_i=None):
# can be used to aggregate a selection of buses to electrically closest neighbors
# if no buses are given, nodes that are no substations or without offshore connection are aggregated
if buses_i is None:
logger.info("Aggregating buses that are no substations or have no valid offshore connection")
buses_i = list(set(n.buses.index)-set(n.generators.bus)-set(n.loads.bus))
weight = pd.concat({'Line': n.lines.length/n.lines.s_nom.clip(1e-3),
'Link': n.links.length/n.links.p_nom.clip(1e-3)})
adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight)
bus_indexer = n.buses.index.get_indexer(buses_i)
dist = pd.DataFrame(dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index)
dist[buses_i] = np.inf # bus in buses_i should not be assigned to different bus in buses_i
for c in n.buses.country.unique():
incountry_b = n.buses.country == c
dist.loc[incountry_b, ~incountry_b] = np.inf
busmap = n.buses.index.to_series()
busmap.loc[buses_i] = dist.idxmin(1)
# default aggregation strategies must be specified within the function, otherwise (when defaults are passed in
# the function's definition) they get lost in case custom values for different variables are specified in the config
bus_strategies = dict(country=_make_consense("Bus", "country"))
bus_strategies.update(aggregation_strategies.get("buses", {}))
generator_strategies = aggregation_strategies.get("generators", {"p_nom_max": "sum"})
# this snippet supports compatibility of PyPSA and PyPSA-EUR:
if "p_nom_max" in generator_strategies:
if generator_strategies["p_nom_max"] == "min": generator_strategies["p_nom_max"] = np.min
clustering = get_clustering_from_busmap(n, busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True,
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
generator_strategies=generator_strategies,
scale_link_capital_costs=False)
return clustering.network, busmap
| def aggregate_to_substations(n, config, aggregation_strategies=dict(), buses_i=None):
# can be used to aggregate a selection of buses to electrically closest neighbors
# if no buses are given, nodes that are no substations or without offshore connection are aggregated
if buses_i is None:
logger.info("Aggregating buses that are no substations or have no valid offshore connection")
buses_i = list(set(n.buses.index)-set(n.generators.bus)-set(n.loads.bus))
weight = pd.concat({'Line': n.lines.length/n.lines.s_nom.clip(1e-3),
'Link': n.links.length/n.links.p_nom.clip(1e-3)})
adj = n.adjacency_matrix(branch_components=['Line', 'Link'], weights=weight)
bus_indexer = n.buses.index.get_indexer(buses_i)
dist = pd.DataFrame(dijkstra(adj, directed=False, indices=bus_indexer), buses_i, n.buses.index)
dist[buses_i] = np.inf # bus in buses_i should not be assigned to different bus in buses_i
for c in n.buses.country.unique():
incountry_b = n.buses.country == c
dist.loc[incountry_b, ~incountry_b] = np.inf
busmap = n.buses.index.to_series()
busmap.loc[buses_i] = dist.idxmin(1)
# default aggregation strategies must be specified within the function, otherwise (when defaults are passed in
# the function's definition) they get lost in case custom values for different variables are specified in the config
bus_strategies = dict(country=_make_consense("Bus", "country"))
bus_strategies.update(aggregation_strategies.get("buses", {}))
generator_strategies = aggregation_strategies.get("generators", {"p_nom_max": "sum"})
# this snippet supports compatibility of PyPSA and PyPSA-EUR:
if generator_strategies.get("p_nom_max") == "min":
generator_strategies["p_nom_max"] = pd.Series.min
clustering = get_clustering_from_busmap(n, busmap,
bus_strategies=bus_strategies,
aggregate_generators_weighted=True,
aggregate_generators_carriers=None,
aggregate_one_ports=["Load", "StorageUnit"],
line_length_factor=1.0,
generator_strategies=generator_strategies,
scale_link_capital_costs=False)
return clustering.network, busmap
|
31,190 | def test_module(client):
ts_from = ts_to = round(float(datetime.timestamp(datetime.utcnow())))
result = client.get_incidents(ts_from, ts_to)
if not result.get('success'):
raise DemistoException(result['message'])
demisto.results("ok")
| def test_module(client):
ts_from = ts_to = round(float(datetime.timestamp(datetime.utcnow())))
result = client.get_incidents(ts_from, ts_to)
if not result.get('success'):
raise DemistoException(result['message'])
return "ok"
|
14,622 | def add_unseen_labels(train_label_dict, test_label_list):
"""
Merge test set labels that not seen in the training data with seen ones.
Parameters
----------
train_label_dict : dict
Dictionary mapping training set class labels to class indices.
test_label_list : list
List containing labels in the test set.
Returns
-------
train_and_test_label_dict : dict
Dictionary mapping merged lables from both the training and test set
to indices.
"""
# get the list of labels that were in the training set
train_label_list = list(train_label_dict.keys())
# identify any unseen labels in the test set
unseen_test_label_list = [label for label in test_label_list
if label not in train_label_list]
# create a new dictionary for these unseen labels with label indices
# for them starting _after_ those for the training set labels
unseen_label_dict = {label: i for i, label in enumerate(unseen_test_label_list,
start=len(train_label_list))}
# combine the train label dictionary with this unseen label one & return
train_and_test_label_dict = train_label_dict.copy()
train_and_test_label_dict.update(unseen_label_dict)
return train_and_test_label_dict
| def add_unseen_labels(train_label_dict, test_label_list):
"""
Merge test set labels that are not seen in the training data with seen ones.
Parameters
----------
train_label_dict : dict
Dictionary mapping training set class labels to class indices.
test_label_list : list
List containing labels in the test set.
Returns
-------
train_and_test_label_dict : dict
Dictionary mapping merged lables from both the training and test set
to indices.
"""
# get the list of labels that were in the training set
train_label_list = list(train_label_dict.keys())
# identify any unseen labels in the test set
unseen_test_label_list = [label for label in test_label_list
if label not in train_label_list]
# create a new dictionary for these unseen labels with label indices
# for them starting _after_ those for the training set labels
unseen_label_dict = {label: i for i, label in enumerate(unseen_test_label_list,
start=len(train_label_list))}
# combine the train label dictionary with this unseen label one & return
train_and_test_label_dict = train_label_dict.copy()
train_and_test_label_dict.update(unseen_label_dict)
return train_and_test_label_dict
|
2,013 | def _get_predictions(clf_fitted, X, label_encoder_):
"""Returns predictions for `X` and index of classes present in `X`.
For predicitons, `decision_function` method of the `clf_fitted` is used.
If this does not exist, `predict_proba` method used.
Parameters
----------
clf_fitted : Estimator instance
Fitted classifier instance.
X : array-like
Sample data used for the predictions.
label_encoder_ : LabelEncoder instance
LabelEncoder instance fitted on all the targets.
Returns
-------
df : array-like, shape (X.shape[0], len(clf_fitted.classes_))
The predictions. Note array is of shape (X.shape[0], 1) when there are
2 classes.
idx_pos_class : array-like, shape (n_classes,)
Indices of the classes present in `X`.
"""
if hasattr(clf_fitted, "decision_function"):
df = clf_fitted.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(clf_fitted, "predict_proba"):
df = clf_fitted.predict_proba(X)
if len(label_encoder_.classes_) == 2:
df = df[:, 1:]
else:
raise RuntimeError("'base_estimator' has no 'decision_function' or "
"'predict_proba' method.")
idx_pos_class = label_encoder_.transform(clf_fitted.classes_)
return df, idx_pos_class
| def _get_predictions(clf_fitted, X, label_encoder_):
"""Returns predictions for `X` and index of classes present in `X`.
For predicitons, `decision_function` method of the `clf_fitted` is used.
If this does not exist, `predict_proba` method used.
Parameters
----------
clf_fitted : Estimator instance
Fitted classifier instance.
X : array-like
Sample data used for the predictions.
label_encoder_ : LabelEncoder instance
LabelEncoder instance fitted on all the targets.
Returns
-------
df : array-like, shape (X.shape[0], len(clf_fitted.classes_))
The predictions. Note array is of shape (X.shape[0], 1) when there are
2 classes.
pos_class_indices : array-like, shape (n_classes,)
Indices of the classes present in `X`.
"""
if hasattr(clf_fitted, "decision_function"):
df = clf_fitted.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(clf_fitted, "predict_proba"):
df = clf_fitted.predict_proba(X)
if len(label_encoder_.classes_) == 2:
df = df[:, 1:]
else:
raise RuntimeError("'base_estimator' has no 'decision_function' or "
"'predict_proba' method.")
idx_pos_class = label_encoder_.transform(clf_fitted.classes_)
return df, idx_pos_class
|
24,268 | def test_discovery(aggregator):
host = socket.gethostbyname(common.HOST)
network = ipaddress.ip_network(u'{}/29'.format(host), strict=False).with_prefixlen
check_tags = ['snmp_device:{}'.format(host), 'snmp_profile:profile1']
instance = {
'name': 'snmp_conf',
# Make sure the check handles bytes
'network_address': network.encode('utf-8'),
'port': common.PORT,
'community_string': 'public',
'retries': 0,
'discovery_interval': 0,
}
init_config = {
'profiles': {
'profile1': {'definition': {'metrics': common.SUPPORTED_METRIC_TYPES, 'sysobjectid': '1.3.6.1.4.1.8072.*'}}
}
}
check = SnmpCheck('snmp', init_config, [instance])
try:
for _ in range(30):
check.check(instance)
if len(aggregator.metric_names) > 1:
break
time.sleep(1)
aggregator.reset()
finally:
check._running = False
del check
for metric in common.SUPPORTED_METRIC_TYPES:
metric_name = "snmp." + metric['name']
aggregator.assert_metric(metric_name, tags=check_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance')
aggregator.assert_metric('snmp.discovered_devices_count', tags=['network:{}'.format(network)])
aggregator.assert_all_metrics_covered()
| def test_discovery(aggregator):
host = socket.gethostbyname(common.HOST)
network = ipaddress.ip_network(u'{}/29'.format(host), strict=False).with_prefixlen
check_tags = ['snmp_device:{}'.format(host), 'snmp_profile:profile1']
instance = {
'name': 'snmp_conf',
# Make sure the check handles bytes
'network_address': network.encode('utf-8'),
'port': common.PORT,
'community_string': 'public',
'retries': 0,
'discovery_interval': 0,
}
init_config = {
'profiles': {
'profile1': {'definition': {'metrics': common.SUPPORTED_METRIC_TYPES, 'sysobjectid': '1.3.6.1.4.1.8072.*'}}
}
}
check = SnmpCheck('snmp', init_config, [instance])
try:
for _ in range(30):
check.check(instance)
if len(aggregator.metric_names) > 1:
break
time.sleep(1)
aggregator.reset()
finally:
check._running = False
del check # This is what the Agent would do when unscheduling the check.
for metric in common.SUPPORTED_METRIC_TYPES:
metric_name = "snmp." + metric['name']
aggregator.assert_metric(metric_name, tags=check_tags, count=1)
aggregator.assert_metric('snmp.sysUpTimeInstance')
aggregator.assert_metric('snmp.discovered_devices_count', tags=['network:{}'.format(network)])
aggregator.assert_all_metrics_covered()
|
46,613 | def main():
# Warning: keep in sync with twisted/plugins/pyrdp_plugin.py
parser = argparse.ArgumentParser()
parser.add_argument("target", help="IP:port of the target RDP machine (ex: 192.168.1.10:3390)")
parser.add_argument("-l", "--listen", help="Port number to listen on (default: 3389)", default=3389)
parser.add_argument("-o", "--output", help="Output folder", default="pyrdp_output")
parser.add_argument("-i", "--destination-ip", help="Destination IP address of the PyRDP player.If not specified, RDP events are not sent over the network.")
parser.add_argument("-d", "--destination-port", help="Listening port of the PyRDP player (default: 3000).", default=3000)
parser.add_argument("-k", "--private-key", help="Path to private key (for SSL)")
parser.add_argument("-c", "--certificate", help="Path to certificate (for SSL)")
parser.add_argument("-u", "--username", help="Username that will replace the client's username", default=None)
parser.add_argument("-p", "--password", help="Password that will replace the client's password", default=None)
parser.add_argument("-L", "--log-level", help="Console logging level. Logs saved to file are always verbose.", default="INFO", choices=["INFO", "DEBUG", "WARNING", "ERROR", "CRITICAL"])
parser.add_argument("-F", "--log-filter", help="Only show logs from this logger name (accepts '*' wildcards)", default="")
parser.add_argument("-s", "--sensor-id", help="Sensor ID (to differentiate multiple instances of the MITM where logs are aggregated at one place)", default="PyRDP")
parser.add_argument("--payload", help="Command to run automatically upon connection", default=None)
parser.add_argument("--payload-powershell", help="PowerShell command to run automatically upon connection", default=None)
parser.add_argument("--payload-powershell-file", help="PowerShell script to run automatically upon connection (as -EncodedCommand)", default=None)
parser.add_argument("--payload-delay", help="Time to wait after a new connection before sending the payload, in milliseconds", default=None)
parser.add_argument("--payload-duration", help="Amount of time for which input / output should be dropped, in milliseconds. This can be used to hide the payload screen.", default=None)
parser.add_argument("--crawl", help="Enable automatic shared drive scraping", action="store_true")
parser.add_argument("--crawler-match-file", help="File to be used by the crawler to chose what to download when scraping the client shared drives.", default=None)
parser.add_argument("--crawler-ignore-file", help="File to be used by the crawler to chose what folders to avoid when scraping the client shared drives.", default=None)
parser.add_argument("--no-replay", help="Disable replay recording", action="store_true")
parser.add_argument("--gdi", help="Allow GDI passthrough (No video decoding)", action="store_true")
args = parser.parse_args()
outDir = Path(args.output)
outDir.mkdir(exist_ok = True)
logLevel = getattr(logging, args.log_level)
pyrdpLogger = prepareLoggers(logLevel, args.log_filter, args.sensor_id, outDir)
targetHost, targetPort = parseTarget(args.target)
key, certificate = validateKeyAndCertificate(args.private_key, args.certificate)
listenPort = int(args.listen)
config = MITMConfig()
config.targetHost = targetHost
config.targetPort = targetPort
config.privateKeyFileName = key
config.certificateFileName = certificate
config.attackerHost = args.destination_ip
config.attackerPort = int(args.destination_port)
config.replacementUsername = args.username
config.replacementPassword = args.password
config.outDir = outDir
config.enableCrawler = args.crawl
config.crawlerMatchFileName = args.crawler_match_file
config.crawlerIgnoreFileName = args.crawler_ignore_file
config.recordReplays = not args.no_replay
config.allowGDI = args.gdi
payload = None
powershell = None
if int(args.payload is not None) + int(args.payload_powershell is not None) + int(args.payload_powershell_file is not None) > 1:
pyrdpLogger.error("Only one of --payload, --payload-powershell and --payload-powershell-file may be supplied.")
sys.exit(1)
if args.payload is not None:
payload = args.payload
pyrdpLogger.info("Using payload: %(payload)s", {"payload": args.payload})
elif args.payload_powershell is not None:
powershell = args.payload_powershell
pyrdpLogger.info("Using powershell payload: %(payload)s", {"payload": args.payload_powershell})
elif args.payload_powershell_file is not None:
if not os.path.exists(args.payload_powershell_file):
pyrdpLogger.error("Powershell file %(path)s does not exist.", {"path": args.payload_powershell_file})
sys.exit(1)
try:
with open(args.payload_powershell_file, "r") as f:
powershell = f.read()
except IOError as e:
pyrdpLogger.error("Error when trying to read powershell file: %(error)s", {"error": e})
sys.exit(1)
pyrdpLogger.info("Using payload from powershell file: %(path)s", {"path": args.payload_powershell_file})
if powershell is not None:
payload = "powershell -EncodedCommand " + b64encode(powershell.encode("utf-16le")).decode()
if payload is not None:
if args.payload_delay is None:
pyrdpLogger.error("--payload-delay must be provided if a payload is provided.")
sys.exit(1)
if args.payload_duration is None:
pyrdpLogger.error("--payload-duration must be provided if a payload is provided.")
sys.exit(1)
try:
config.payloadDelay = int(args.payload_delay)
except ValueError:
pyrdpLogger.error("Invalid payload delay. Payload delay must be an integral number of milliseconds.")
sys.exit(1)
if config.payloadDelay < 0:
pyrdpLogger.error("Payload delay must not be negative.")
sys.exit(1)
if config.payloadDelay < 1000:
pyrdpLogger.warning("You have provided a payload delay of less than 1 second. We recommend you use a slightly longer delay to make sure it runs properly.")
try:
config.payloadDuration = int(args.payload_duration)
except ValueError:
pyrdpLogger.error("Invalid payload duration. Payload duration must be an integral number of milliseconds.")
sys.exit(1)
if config.payloadDuration < 0:
pyrdpLogger.error("Payload duration must not be negative.")
sys.exit(1)
config.payload = payload
elif args.payload_delay is not None:
pyrdpLogger.error("--payload-delay was provided but no payload was set.")
sys.exit(1)
logConfiguration(config)
reactor.listenTCP(listenPort, MITMServerFactory(config))
pyrdpLogger.info("MITM Server listening on port %(port)d", {"port": listenPort})
reactor.run()
pyrdpLogger.info("MITM terminated")
logConfiguration(config)
| def main():
# Warning: keep in sync with twisted/plugins/pyrdp_plugin.py
parser = argparse.ArgumentParser()
parser.add_argument("target", help="IP:port of the target RDP machine (ex: 192.168.1.10:3390)")
parser.add_argument("-l", "--listen", help="Port number to listen on (default: 3389)", default=3389)
parser.add_argument("-o", "--output", help="Output folder", default="pyrdp_output")
parser.add_argument("-i", "--destination-ip", help="Destination IP address of the PyRDP player.If not specified, RDP events are not sent over the network.")
parser.add_argument("-d", "--destination-port", help="Listening port of the PyRDP player (default: 3000).", default=3000)
parser.add_argument("-k", "--private-key", help="Path to private key (for SSL)")
parser.add_argument("-c", "--certificate", help="Path to certificate (for SSL)")
parser.add_argument("-u", "--username", help="Username that will replace the client's username", default=None)
parser.add_argument("-p", "--password", help="Password that will replace the client's password", default=None)
parser.add_argument("-L", "--log-level", help="Console logging level. Logs saved to file are always verbose.", default="INFO", choices=["INFO", "DEBUG", "WARNING", "ERROR", "CRITICAL"])
parser.add_argument("-F", "--log-filter", help="Only show logs from this logger name (accepts '*' wildcards)", default="")
parser.add_argument("-s", "--sensor-id", help="Sensor ID (to differentiate multiple instances of the MITM where logs are aggregated at one place)", default="PyRDP")
parser.add_argument("--payload", help="Command to run automatically upon connection", default=None)
parser.add_argument("--payload-powershell", help="PowerShell command to run automatically upon connection", default=None)
parser.add_argument("--payload-powershell-file", help="PowerShell script to run automatically upon connection (as -EncodedCommand)", default=None)
parser.add_argument("--payload-delay", help="Time to wait after a new connection before sending the payload, in milliseconds", default=None)
parser.add_argument("--payload-duration", help="Amount of time for which input / output should be dropped, in milliseconds. This can be used to hide the payload screen.", default=None)
parser.add_argument("--crawl", help="Enable automatic shared drive scraping", action="store_true")
parser.add_argument("--crawler-match-file", help="File to be used by the crawler to chose what to download when scraping the client shared drives.", default=None)
parser.add_argument("--crawler-ignore-file", help="File to be used by the crawler to chose what folders to avoid when scraping the client shared drives.", default=None)
parser.add_argument("--no-replay", help="Disable replay recording", action="store_true")
parser.add_argument("--gdi-passthrough", help="Allow GDI passthrough (No video decoding)", action="store_true")
args = parser.parse_args()
outDir = Path(args.output)
outDir.mkdir(exist_ok = True)
logLevel = getattr(logging, args.log_level)
pyrdpLogger = prepareLoggers(logLevel, args.log_filter, args.sensor_id, outDir)
targetHost, targetPort = parseTarget(args.target)
key, certificate = validateKeyAndCertificate(args.private_key, args.certificate)
listenPort = int(args.listen)
config = MITMConfig()
config.targetHost = targetHost
config.targetPort = targetPort
config.privateKeyFileName = key
config.certificateFileName = certificate
config.attackerHost = args.destination_ip
config.attackerPort = int(args.destination_port)
config.replacementUsername = args.username
config.replacementPassword = args.password
config.outDir = outDir
config.enableCrawler = args.crawl
config.crawlerMatchFileName = args.crawler_match_file
config.crawlerIgnoreFileName = args.crawler_ignore_file
config.recordReplays = not args.no_replay
config.allowGDI = args.gdi
payload = None
powershell = None
if int(args.payload is not None) + int(args.payload_powershell is not None) + int(args.payload_powershell_file is not None) > 1:
pyrdpLogger.error("Only one of --payload, --payload-powershell and --payload-powershell-file may be supplied.")
sys.exit(1)
if args.payload is not None:
payload = args.payload
pyrdpLogger.info("Using payload: %(payload)s", {"payload": args.payload})
elif args.payload_powershell is not None:
powershell = args.payload_powershell
pyrdpLogger.info("Using powershell payload: %(payload)s", {"payload": args.payload_powershell})
elif args.payload_powershell_file is not None:
if not os.path.exists(args.payload_powershell_file):
pyrdpLogger.error("Powershell file %(path)s does not exist.", {"path": args.payload_powershell_file})
sys.exit(1)
try:
with open(args.payload_powershell_file, "r") as f:
powershell = f.read()
except IOError as e:
pyrdpLogger.error("Error when trying to read powershell file: %(error)s", {"error": e})
sys.exit(1)
pyrdpLogger.info("Using payload from powershell file: %(path)s", {"path": args.payload_powershell_file})
if powershell is not None:
payload = "powershell -EncodedCommand " + b64encode(powershell.encode("utf-16le")).decode()
if payload is not None:
if args.payload_delay is None:
pyrdpLogger.error("--payload-delay must be provided if a payload is provided.")
sys.exit(1)
if args.payload_duration is None:
pyrdpLogger.error("--payload-duration must be provided if a payload is provided.")
sys.exit(1)
try:
config.payloadDelay = int(args.payload_delay)
except ValueError:
pyrdpLogger.error("Invalid payload delay. Payload delay must be an integral number of milliseconds.")
sys.exit(1)
if config.payloadDelay < 0:
pyrdpLogger.error("Payload delay must not be negative.")
sys.exit(1)
if config.payloadDelay < 1000:
pyrdpLogger.warning("You have provided a payload delay of less than 1 second. We recommend you use a slightly longer delay to make sure it runs properly.")
try:
config.payloadDuration = int(args.payload_duration)
except ValueError:
pyrdpLogger.error("Invalid payload duration. Payload duration must be an integral number of milliseconds.")
sys.exit(1)
if config.payloadDuration < 0:
pyrdpLogger.error("Payload duration must not be negative.")
sys.exit(1)
config.payload = payload
elif args.payload_delay is not None:
pyrdpLogger.error("--payload-delay was provided but no payload was set.")
sys.exit(1)
logConfiguration(config)
reactor.listenTCP(listenPort, MITMServerFactory(config))
pyrdpLogger.info("MITM Server listening on port %(port)d", {"port": listenPort})
reactor.run()
pyrdpLogger.info("MITM terminated")
logConfiguration(config)
|
53,876 | def load_nox_module(global_config: Namespace) -> Union[types.ModuleType, int]:
"""Load the user's noxfile and return the module object for it.
.. note::
This task has two side effects; it makes ``global_config.noxfile``
an absolute path, and changes the working directory of the process.
Args:
global_config (.nox.main.GlobalConfig): The global config.
Returns:
module: The module designated by the Noxfile path.
"""
try:
# Save the absolute path to the Noxfile.
# This will inoculate it if nox changes paths because of an implicit
# or explicit chdir (like the one below).
global_config.noxfile = os.path.realpath(
# Be sure to expand variables
os.path.expandvars(global_config.noxfile)
)
noxfile_parent_dir = os.path.realpath(os.path.dirname(global_config.noxfile))
# Check ``nox.needs_version`` by parsing the AST.
check_nox_version(global_config.noxfile)
# Move to the path where the Noxfile is.
# This will ensure that the Noxfile's path is on sys.path, and that
# import-time path resolutions work the way the Noxfile author would
# guess.
os.chdir(noxfile_parent_dir)
return importlib.machinery.SourceFileLoader(
"user_nox_module", global_config.noxfile
).load_module()
except (VersionCheckFailed, InvalidVersionSpecifier) as error:
logger.error(str(error))
return 2
except FileNotFoundError:
logger.error(f"noxfile.py not found in {noxfile_parent_dir!r}.")
return 2
except (IOError, OSError):
logger.exception("Failed to load Noxfile {}".format(global_config.noxfile))
return 2
| def load_nox_module(global_config: Namespace) -> Union[types.ModuleType, int]:
"""Load the user's noxfile and return the module object for it.
.. note::
This task has two side effects; it makes ``global_config.noxfile``
an absolute path, and changes the working directory of the process.
Args:
global_config (.nox.main.GlobalConfig): The global config.
Returns:
module: The module designated by the Noxfile path.
"""
try:
# Save the absolute path to the Noxfile.
# This will inoculate it if nox changes paths because of an implicit
# or explicit chdir (like the one below).
global_config.noxfile = os.path.realpath(
# Be sure to expand variables
os.path.expandvars(global_config.noxfile)
)
noxfile_parent_dir = os.path.realpath(os.path.dirname(global_config.noxfile))
# Check ``nox.needs_version`` by parsing the AST.
check_nox_version(global_config.noxfile)
# Move to the path where the Noxfile is.
# This will ensure that the Noxfile's path is on sys.path, and that
# import-time path resolutions work the way the Noxfile author would
# guess.
os.chdir(noxfile_parent_dir)
return importlib.machinery.SourceFileLoader(
"user_nox_module", global_config.noxfile
).load_module()
except (VersionCheckFailed, InvalidVersionSpecifier) as error:
logger.error(str(error))
return 2
except FileNotFoundError:
logger.error("Failed to load Noxfile {}: no such file".format(global_config.noxfile))
return 2
except (IOError, OSError):
logger.exception("Failed to load Noxfile {}".format(global_config.noxfile))
return 2
|
29,914 | def test_scatter_output_filenames(tmpdir: py.path.local) -> None:
"""Check that when a scatter step produces same named output the final output is renamed correctly."""
cwd = tmpdir.chdir()
rtc = RuntimeContext()
rtc.outdir = str(cwd)
factory = cwltool.factory.Factory(runtime_context=rtc)
output_names = ['output.txt', 'output.txt_2', 'output.txt_3']
try:
scatter_workflow = factory.make(get_data("tests/scatter_numbers.cwl"))
result = scatter_workflow(range=3)
assert(
'output' in result
)
locations = [element['location'] for element in result['output']]
assert(
locations[0].endswith('output.txt') and
locations[1].endswith('output.txt_2') and
locations[2].endswith('output.txt_3'),
"Locations {} do not end with {}".format(locations, output_names)
)
finally:
for filename in output_names:
if os.path.exists(filename):
os.remove(filename)
| def test_scatter_output_filenames(tmpdir: py.path.local) -> None:
"""If a scatter step produces identically named output then confirm that the final output is renamed correctly."""
cwd = tmpdir.chdir()
rtc = RuntimeContext()
rtc.outdir = str(cwd)
factory = cwltool.factory.Factory(runtime_context=rtc)
output_names = ['output.txt', 'output.txt_2', 'output.txt_3']
try:
scatter_workflow = factory.make(get_data("tests/scatter_numbers.cwl"))
result = scatter_workflow(range=3)
assert(
'output' in result
)
locations = [element['location'] for element in result['output']]
assert(
locations[0].endswith('output.txt') and
locations[1].endswith('output.txt_2') and
locations[2].endswith('output.txt_3'),
"Locations {} do not end with {}".format(locations, output_names)
)
finally:
for filename in output_names:
if os.path.exists(filename):
os.remove(filename)
|
34,665 | def suggest_nlu_data(args: argparse.Namespace) -> None:
"""Load NLU training & evaluation data, paraphrases, an existing classification report, its corresponding config
file and suggest additional training
examples.
Args:
args: Commandline arguments
"""
nlu_training_data = rasa.shared.nlu.training_data.loading.load_data(
args.nlu_training_data
)
nlu_evaluation_data = rasa.shared.nlu.training_data.loading.load_data(
args.nlu_evaluation_data
)
paraphrases = rasa.shared.nlu.training_data.loading.load_data(args.paraphrases)
classification_report = rasa.shared.utils.io.read_json_file(
args.nlu_classification_report
)
random.seed(args.random_seed)
# Determine low data, low performing and frequently confused intents
num_intents = len(nlu_training_data.intents)
avg_size = int(
reduce(
lambda acc, num: acc + (num / num_intents),
nlu_training_data.number_of_examples_per_intent.values(),
0,
)
)
low_data_intents = sorted(
nlu_training_data.number_of_examples_per_intent.items(),
key=operator.itemgetter(1),
)[: args.num_intents]
low_precision_intents = sorted(
map(
lambda k: (k, classification_report[k]["precision"]),
nlu_training_data.intents,
),
key=operator.itemgetter(1),
)[: args.num_intents]
low_recall_intents = sorted(
map(
lambda k: (k, classification_report[k]["recall"]), nlu_training_data.intents
),
key=operator.itemgetter(1),
)[: args.num_intents]
low_f1_intents = sorted(
map(
lambda k: (k, classification_report[k]["f1-score"]),
nlu_training_data.intents,
),
key=operator.itemgetter(1),
)[: args.num_intents]
freq_confused_intents = sorted(
map(
lambda k: (k, sum(classification_report[k]["confused_with"].values())),
nlu_training_data.intents,
),
key=operator.itemgetter(1),
reverse=True,
)[: args.num_intents]
pooled_intents = (
set(map(lambda tp: tp[0], low_data_intents))
| set(map(lambda tp: tp[0], low_precision_intents))
| set(map(lambda tp: tp[0], low_recall_intents))
| set(map(lambda tp: tp[0], low_f1_intents))
| set(map(lambda tp: tp[0], freq_confused_intents))
)
# Retrieve paraphrase pool and training data pool
paraphrase_pool = _create_paraphrase_pool(
paraphrases, pooled_intents, args.paraphrase_score_threshold
)
training_data_pool, training_data_vocab_per_intent = _create_training_data_pool(
nlu_training_data, pooled_intents
)
# Build augmentation pools based on the maximum vocabulary expansion criterion ("diverse") and random sampling
max_vocab_expansion = _build_diverse_augmentation_pool(
paraphrase_pool, training_data_vocab_per_intent
)
random_expansion = _build_random_augmentation_pool(paraphrase_pool)
# Build augmentation training set
augmented_data_diverse, augmented_data_random = _build_augmentation_training_sets(
nlu_training_data,
training_data_pool,
random_expansion,
max_vocab_expansion,
pooled_intents,
avg_size,
)
# Store training data files
output_directory_diverse = os.path.join(args.out, "augmentation_diverse")
if not os.path.exists(output_directory_diverse):
os.makedirs(output_directory_diverse)
output_directory_random = os.path.join(args.out, "augmentation_random")
if not os.path.exists(output_directory_random):
os.makedirs(output_directory_random)
out_file_diverse = os.path.join(
output_directory_diverse, "train_augmented_diverse.yml"
)
augmented_data_diverse.persist_nlu(filename=out_file_diverse)
out_file_random = os.path.join(
output_directory_random, "train_augmented_random.yml"
)
augmented_data_random.persist_nlu(filename=out_file_random)
# Train NLU models on diverse and random augmentation sets
model_path_diverse = train_nlu(
config=args.config,
nlu_data=out_file_diverse,
output=output_directory_diverse,
domain=args.domain,
)
model_path_random = train_nlu(
config=args.config,
nlu_data=out_file_random,
output=output_directory_random,
domain=args.domain,
)
# Evaluate NLU models on NLU evaluation data
unpacked_model_path_diverse = get_model(model_path_diverse)
nlu_model_path_diverse = os.path.join(unpacked_model_path_diverse, "nlu")
interpreter = Interpreter.load(nlu_model_path_diverse)
interpreter.pipeline = remove_pretrained_extractors(interpreter.pipeline)
(intent_results, *_) = get_eval_data(interpreter, nlu_evaluation_data)
intent_report_diverse = create_intent_report(
intent_results=intent_results,
add_confused_labels_to_report=True,
metrics_as_dict=True,
)
intent_errors_diverse = extract_intent_errors_from_results(
intent_results=intent_results
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
os.path.join(output_directory_diverse, "intent_errors.json"),
intent_errors_diverse,
)
unpacked_model_random = get_model(model_path_random)
nlu_model_path_random = os.path.join(unpacked_model_random, "nlu")
interpreter = Interpreter.load(nlu_model_path_random)
interpreter.pipeline = remove_pretrained_extractors(interpreter.pipeline)
(intent_results, *_) = get_eval_data(interpreter, nlu_evaluation_data)
intent_report_random = create_intent_report(
intent_results=intent_results,
add_confused_labels_to_report=True,
metrics_as_dict=True,
)
intent_errors_random = extract_intent_errors_from_results(
intent_results=intent_results
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
os.path.join(output_directory_random, "intent_errors.json"),
intent_errors_random,
)
# Retrieve intents for which performance has changed
changed_intents_diverse = (
_get_intents_with_performance_changes(
classification_report,
intent_report_diverse.report,
nlu_training_data.intents,
)
- pooled_intents
)
changed_intents_random = (
_get_intents_with_performance_changes(
classification_report,
intent_report_random.report,
nlu_training_data.intents,
)
- pooled_intents
)
# Create and update result reports
report_tuple = _create_augmentation_summary(
pooled_intents,
changed_intents_diverse,
classification_report,
intent_report_diverse.report,
)
intent_summary_diverse = report_tuple[0]
intent_report_diverse.report.update(report_tuple[1])
report_tuple = _create_augmentation_summary(
pooled_intents,
changed_intents_random,
classification_report,
intent_report_random.report,
)
intent_summary_random = report_tuple[0]
intent_report_random.report.update(report_tuple[1])
# Store reports to file
rasa.shared.utils.io.dump_obj_as_json_to_file(
os.path.join(output_directory_diverse, "intent_report.json"),
intent_report_diverse.report,
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
os.path.join(output_directory_random, "intent_report.json"),
intent_report_random.report,
)
# Plot the summary reports
_plot_summary_reports(
intent_summary_diverse,
intent_summary_random,
changed_intents_diverse,
changed_intents_random,
output_directory_diverse,
output_directory_random,
)
telemetry.track_data_suggest()
| def suggest_nlu_data(args: argparse.Namespace) -> None:
"""Load NLU training & evaluation data, paraphrases, an existing classification report, its corresponding config
file and suggest additional training
examples.
Args:
args: Command-line arguments.
"""
nlu_training_data = rasa.shared.nlu.training_data.loading.load_data(
args.nlu_training_data
)
nlu_evaluation_data = rasa.shared.nlu.training_data.loading.load_data(
args.nlu_evaluation_data
)
paraphrases = rasa.shared.nlu.training_data.loading.load_data(args.paraphrases)
classification_report = rasa.shared.utils.io.read_json_file(
args.nlu_classification_report
)
random.seed(args.random_seed)
# Determine low data, low performing and frequently confused intents
num_intents = len(nlu_training_data.intents)
avg_size = int(
reduce(
lambda acc, num: acc + (num / num_intents),
nlu_training_data.number_of_examples_per_intent.values(),
0,
)
)
low_data_intents = sorted(
nlu_training_data.number_of_examples_per_intent.items(),
key=operator.itemgetter(1),
)[: args.num_intents]
low_precision_intents = sorted(
map(
lambda k: (k, classification_report[k]["precision"]),
nlu_training_data.intents,
),
key=operator.itemgetter(1),
)[: args.num_intents]
low_recall_intents = sorted(
map(
lambda k: (k, classification_report[k]["recall"]), nlu_training_data.intents
),
key=operator.itemgetter(1),
)[: args.num_intents]
low_f1_intents = sorted(
map(
lambda k: (k, classification_report[k]["f1-score"]),
nlu_training_data.intents,
),
key=operator.itemgetter(1),
)[: args.num_intents]
freq_confused_intents = sorted(
map(
lambda k: (k, sum(classification_report[k]["confused_with"].values())),
nlu_training_data.intents,
),
key=operator.itemgetter(1),
reverse=True,
)[: args.num_intents]
pooled_intents = (
set(map(lambda tp: tp[0], low_data_intents))
| set(map(lambda tp: tp[0], low_precision_intents))
| set(map(lambda tp: tp[0], low_recall_intents))
| set(map(lambda tp: tp[0], low_f1_intents))
| set(map(lambda tp: tp[0], freq_confused_intents))
)
# Retrieve paraphrase pool and training data pool
paraphrase_pool = _create_paraphrase_pool(
paraphrases, pooled_intents, args.paraphrase_score_threshold
)
training_data_pool, training_data_vocab_per_intent = _create_training_data_pool(
nlu_training_data, pooled_intents
)
# Build augmentation pools based on the maximum vocabulary expansion criterion ("diverse") and random sampling
max_vocab_expansion = _build_diverse_augmentation_pool(
paraphrase_pool, training_data_vocab_per_intent
)
random_expansion = _build_random_augmentation_pool(paraphrase_pool)
# Build augmentation training set
augmented_data_diverse, augmented_data_random = _build_augmentation_training_sets(
nlu_training_data,
training_data_pool,
random_expansion,
max_vocab_expansion,
pooled_intents,
avg_size,
)
# Store training data files
output_directory_diverse = os.path.join(args.out, "augmentation_diverse")
if not os.path.exists(output_directory_diverse):
os.makedirs(output_directory_diverse)
output_directory_random = os.path.join(args.out, "augmentation_random")
if not os.path.exists(output_directory_random):
os.makedirs(output_directory_random)
out_file_diverse = os.path.join(
output_directory_diverse, "train_augmented_diverse.yml"
)
augmented_data_diverse.persist_nlu(filename=out_file_diverse)
out_file_random = os.path.join(
output_directory_random, "train_augmented_random.yml"
)
augmented_data_random.persist_nlu(filename=out_file_random)
# Train NLU models on diverse and random augmentation sets
model_path_diverse = train_nlu(
config=args.config,
nlu_data=out_file_diverse,
output=output_directory_diverse,
domain=args.domain,
)
model_path_random = train_nlu(
config=args.config,
nlu_data=out_file_random,
output=output_directory_random,
domain=args.domain,
)
# Evaluate NLU models on NLU evaluation data
unpacked_model_path_diverse = get_model(model_path_diverse)
nlu_model_path_diverse = os.path.join(unpacked_model_path_diverse, "nlu")
interpreter = Interpreter.load(nlu_model_path_diverse)
interpreter.pipeline = remove_pretrained_extractors(interpreter.pipeline)
(intent_results, *_) = get_eval_data(interpreter, nlu_evaluation_data)
intent_report_diverse = create_intent_report(
intent_results=intent_results,
add_confused_labels_to_report=True,
metrics_as_dict=True,
)
intent_errors_diverse = extract_intent_errors_from_results(
intent_results=intent_results
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
os.path.join(output_directory_diverse, "intent_errors.json"),
intent_errors_diverse,
)
unpacked_model_random = get_model(model_path_random)
nlu_model_path_random = os.path.join(unpacked_model_random, "nlu")
interpreter = Interpreter.load(nlu_model_path_random)
interpreter.pipeline = remove_pretrained_extractors(interpreter.pipeline)
(intent_results, *_) = get_eval_data(interpreter, nlu_evaluation_data)
intent_report_random = create_intent_report(
intent_results=intent_results,
add_confused_labels_to_report=True,
metrics_as_dict=True,
)
intent_errors_random = extract_intent_errors_from_results(
intent_results=intent_results
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
os.path.join(output_directory_random, "intent_errors.json"),
intent_errors_random,
)
# Retrieve intents for which performance has changed
changed_intents_diverse = (
_get_intents_with_performance_changes(
classification_report,
intent_report_diverse.report,
nlu_training_data.intents,
)
- pooled_intents
)
changed_intents_random = (
_get_intents_with_performance_changes(
classification_report,
intent_report_random.report,
nlu_training_data.intents,
)
- pooled_intents
)
# Create and update result reports
report_tuple = _create_augmentation_summary(
pooled_intents,
changed_intents_diverse,
classification_report,
intent_report_diverse.report,
)
intent_summary_diverse = report_tuple[0]
intent_report_diverse.report.update(report_tuple[1])
report_tuple = _create_augmentation_summary(
pooled_intents,
changed_intents_random,
classification_report,
intent_report_random.report,
)
intent_summary_random = report_tuple[0]
intent_report_random.report.update(report_tuple[1])
# Store reports to file
rasa.shared.utils.io.dump_obj_as_json_to_file(
os.path.join(output_directory_diverse, "intent_report.json"),
intent_report_diverse.report,
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
os.path.join(output_directory_random, "intent_report.json"),
intent_report_random.report,
)
# Plot the summary reports
_plot_summary_reports(
intent_summary_diverse,
intent_summary_random,
changed_intents_diverse,
changed_intents_random,
output_directory_diverse,
output_directory_random,
)
telemetry.track_data_suggest()
|
44,398 | def _divide_by_sqrt_n_samp(oks, samples):
"""
divide Oβ±Όβ by βn
"""
n_samp = samples.shape[0] * mpi.n_nodes # MPI
sqrt_n = float(np.sqrt(n_samp, dtype=x.dtype)) # enforce weak type
return jax.tree_map(lambda x: x / sqrt_n, oks)
| def _divide_by_sqrt_n_samp(oks, samples):
"""
divide Oβ±Όβ by βn
"""
n_samp = samples.shape[0] * mpi.n_nodes # MPI
sqrt_n = math.sqrt(n_samp) # enforce weak type
return jax.tree_map(lambda x: x / sqrt_n, oks)
|
6,930 | def import_file_by_path(path, force=False, data_import=False, pre_process=None, ignore_version=None,
reset_permissions=False, for_sync=False):
if not frappe.flags.dt:
frappe.flags.dt = []
try:
docs = read_doc_from_file(path)
except IOError:
print(path + " missing")
return
curr_hash = md5(path)
if docs:
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if not force:
try:
db_hash = frappe.db.get_value(doc["doctype"], doc["name"], "migration_hash")
except Exception:
frappe.flags.dt += [doc["doctype"]]
db_hash = None
if not db_hash:
db_modified = frappe.db.get_value(doc["doctype"], doc["name"], "modified")
if db_modified and doc.get("modified") == get_datetime_str(db_modified):
return False
if curr_hash == db_hash:
return False
original_modified = doc.get("modified")
import_doc(
docdict=doc,
force=force,
data_import=data_import,
pre_process=pre_process,
ignore_version=ignore_version,
reset_permissions=reset_permissions,
path=path,
)
if doc["doctype"] == "DocType":
if doc["name"] == "DocType":
Doctype_table=frappe.qb.DocType("DocType")
frappe.qb.update(Doctype_table).set(Doctype_table.migration_hash, curr_hash).where(Doctype_table.name == "DocType").run()
else:
frappe.db.set_value(doc["doctype"], doc["name"], "migration_hash", curr_hash)
if original_modified:
update_modified(original_modified, doc)
return True
| def import_file_by_path(path, force=False, data_import=False, pre_process=None, ignore_version=None,
reset_permissions=False, for_sync=False):
frappe.flags.dt = frappe.flags.dt or []
try:
docs = read_doc_from_file(path)
except IOError:
print(path + " missing")
return
curr_hash = md5(path)
if docs:
if not isinstance(docs, list):
docs = [docs]
for doc in docs:
if not force:
try:
db_hash = frappe.db.get_value(doc["doctype"], doc["name"], "migration_hash")
except Exception:
frappe.flags.dt += [doc["doctype"]]
db_hash = None
if not db_hash:
db_modified = frappe.db.get_value(doc["doctype"], doc["name"], "modified")
if db_modified and doc.get("modified") == get_datetime_str(db_modified):
return False
if curr_hash == db_hash:
return False
original_modified = doc.get("modified")
import_doc(
docdict=doc,
force=force,
data_import=data_import,
pre_process=pre_process,
ignore_version=ignore_version,
reset_permissions=reset_permissions,
path=path,
)
if doc["doctype"] == "DocType":
if doc["name"] == "DocType":
Doctype_table=frappe.qb.DocType("DocType")
frappe.qb.update(Doctype_table).set(Doctype_table.migration_hash, curr_hash).where(Doctype_table.name == "DocType").run()
else:
frappe.db.set_value(doc["doctype"], doc["name"], "migration_hash", curr_hash)
if original_modified:
update_modified(original_modified, doc)
return True
|
40,672 | def dice_coefficient(cm, ignore_index=None):
"""Calculates Dice Coefficient for a given Confusion Matrix.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
ignore_index (int, optional): index to ignore, e.g. background index
"""
if not isinstance(cm, ConfusionMatrix):
raise TypeError("Argument cm should be instance of ConfusionMatrix, but given {}".format(type(cm)))
if ignore_index is not None:
if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes):
raise ValueError("ignore_index should be non-negative integer, but given {}".format(ignore_index))
tp = cm.confusion_matrix.diag().sum()
fp_plus_fn = cm.confusion_matrix.sum() - tp
return (2.0 * tp) / (2.0 * tp + fp_plus_fn)
| def dice_coefficient(cm, ignore_index=None):
"""Calculates Dice Coefficient for a given Confusion Matrix.
Args:
cm (ConfusionMatrix): instance of confusion matrix metric
ignore_index (int, optional): index to ignore, e.g. background index
"""
if not isinstance(cm, ConfusionMatrix):
raise TypeError("Argument cm should be instance of ConfusionMatrix, but given {}".format(type(cm)))
if ignore_index is not None:
if not (isinstance(ignore_index, numbers.Integral) and 0 <= ignore_index < cm.num_classes):
raise ValueError("ignore_index should be non-negative integer, but given {}".format(ignore_index))
tp = cm.confusion_matrix.diag().sum()
fp_plus_fn = cm.confusion_matrix.sum() - tp
return (2.0 * tp) / (2.0 * tp + fp_plus_fn + 1e-15)
|
40,277 | def test_data():
torch_geometric.set_debug(True)
x = torch.tensor([[1, 3, 5], [2, 4, 6]], dtype=torch.float).t()
edge_index = torch.tensor([[0, 0, 1, 1, 2], [1, 1, 0, 2, 1]])
data = Data(x=x, edge_index=edge_index).to(torch.device('cpu'))
N = data.num_nodes
assert N == 3
assert data.x.tolist() == x.tolist()
assert data['x'].tolist() == x.tolist()
assert sorted(data.keys) == ['edge_index', 'x']
assert len(data) == 2
assert 'x' in data and 'edge_index' in data and 'pos' not in data
D = data.to_dict()
assert len(D) == 2
assert 'x' in D and 'edge_index' in D
D = data.to_namedtuple()
assert len(D) == 2
assert D.x is not None and D.edge_index is not None
assert data.__cat_dim__('x', data.x) == 0
assert data.__cat_dim__('edge_index', data.edge_index) == -1
assert data.__inc__('x', data.x) == 0
assert data.__inc__('edge_index', data.edge_index) == data.num_nodes
assert not data.x.is_contiguous()
data.contiguous()
assert data.x.is_contiguous()
assert not data.is_coalesced()
data = data.coalesce()
assert data.is_coalesced()
clone = data.clone()
assert clone != data
assert len(clone) == len(data)
assert clone.x.tolist() == data.x.tolist()
assert clone.edge_index.tolist() == data.edge_index.tolist()
# test to_heterogenous
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert torch.allclose(data.edge_index,
hetero_data.edge_stores[0]['edge_index'])
data.edge_type = torch.tensor([0, 0, 1, 0])
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert [3, 1] == [i.edge_index.size(1) for i in hetero_data.edge_stores]
data.edge_type = None
data['x'] = x + 1
assert data.x.tolist() == (x + 1).tolist()
assert str(data) == 'Data(x=[3, 2], edge_index=[2, 4])'
dictionary = {'x': data.x, 'edge_index': data.edge_index}
data = Data.from_dict(dictionary)
assert sorted(data.keys) == ['edge_index', 'x']
assert not data.has_isolated_nodes()
assert not data.has_self_loops()
assert data.is_undirected()
assert not data.is_directed()
assert data.num_nodes == 3
assert data.num_edges == 4
assert data.num_faces is None
assert data.num_node_features == 2
assert data.num_features == 2
data.edge_attr = torch.randn(data.num_edges, 2)
assert data.num_edge_features == 2
data.edge_attr = None
data.x = None
assert data.num_nodes == 3
data.edge_index = None
assert data.num_nodes is None
assert data.num_edges == 0
data.num_nodes = 4
assert data.num_nodes == 4
data = Data(x=x, attribute=x)
assert len(data) == 2
assert data.x.tolist() == x.tolist()
assert data.attribute.tolist() == x.tolist()
face = torch.tensor([[0, 1], [1, 2], [2, 3]])
data = Data(num_nodes=4, face=face)
assert data.num_faces == 2
assert data.num_nodes == 4
data = Data(title='test')
assert str(data) == "Data(title='test')"
assert data.num_node_features == 0
assert data.num_edge_features == 0
torch_geometric.set_debug(False)
| def test_data():
torch_geometric.set_debug(True)
x = torch.tensor([[1, 3, 5], [2, 4, 6]], dtype=torch.float).t()
edge_index = torch.tensor([[0, 0, 1, 1, 2], [1, 1, 0, 2, 1]])
data = Data(x=x, edge_index=edge_index).to(torch.device('cpu'))
N = data.num_nodes
assert N == 3
assert data.x.tolist() == x.tolist()
assert data['x'].tolist() == x.tolist()
assert sorted(data.keys) == ['edge_index', 'x']
assert len(data) == 2
assert 'x' in data and 'edge_index' in data and 'pos' not in data
D = data.to_dict()
assert len(D) == 2
assert 'x' in D and 'edge_index' in D
D = data.to_namedtuple()
assert len(D) == 2
assert D.x is not None and D.edge_index is not None
assert data.__cat_dim__('x', data.x) == 0
assert data.__cat_dim__('edge_index', data.edge_index) == -1
assert data.__inc__('x', data.x) == 0
assert data.__inc__('edge_index', data.edge_index) == data.num_nodes
assert not data.x.is_contiguous()
data.contiguous()
assert data.x.is_contiguous()
assert not data.is_coalesced()
data = data.coalesce()
assert data.is_coalesced()
clone = data.clone()
assert clone != data
assert len(clone) == len(data)
assert clone.x.tolist() == data.x.tolist()
assert clone.edge_index.tolist() == data.edge_index.tolist()
# test to_heterogenous
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data['0'].x)
assert torch.allclose(data.edge_index,
hetero_data.edge_stores[0]['edge_index'])
data.edge_type = torch.tensor([0, 0, 1, 0])
hetero_data = data.to_heterogeneous()
assert torch.allclose(data.x, hetero_data.node_stores[0]['x'])
assert [3, 1] == [i.edge_index.size(1) for i in hetero_data.edge_stores]
data.edge_type = None
data['x'] = x + 1
assert data.x.tolist() == (x + 1).tolist()
assert str(data) == 'Data(x=[3, 2], edge_index=[2, 4])'
dictionary = {'x': data.x, 'edge_index': data.edge_index}
data = Data.from_dict(dictionary)
assert sorted(data.keys) == ['edge_index', 'x']
assert not data.has_isolated_nodes()
assert not data.has_self_loops()
assert data.is_undirected()
assert not data.is_directed()
assert data.num_nodes == 3
assert data.num_edges == 4
assert data.num_faces is None
assert data.num_node_features == 2
assert data.num_features == 2
data.edge_attr = torch.randn(data.num_edges, 2)
assert data.num_edge_features == 2
data.edge_attr = None
data.x = None
assert data.num_nodes == 3
data.edge_index = None
assert data.num_nodes is None
assert data.num_edges == 0
data.num_nodes = 4
assert data.num_nodes == 4
data = Data(x=x, attribute=x)
assert len(data) == 2
assert data.x.tolist() == x.tolist()
assert data.attribute.tolist() == x.tolist()
face = torch.tensor([[0, 1], [1, 2], [2, 3]])
data = Data(num_nodes=4, face=face)
assert data.num_faces == 2
assert data.num_nodes == 4
data = Data(title='test')
assert str(data) == "Data(title='test')"
assert data.num_node_features == 0
assert data.num_edge_features == 0
torch_geometric.set_debug(False)
|
18,788 | def config_prefer_upstream(args):
"""Generate a packages config based on the configuration of all upstream
installs."""
scope = args.scope
if scope is None:
scope = spack.config.default_modify_scope('packages')
specs = spack.store.db.query(installed=[InstallStatuses.INSTALLED])
pref_specs = []
for spec in specs:
upstream = None
try:
upstream = spec.package.installed_upstream
except spack.repo.UnknownNamespaceError as err:
tty.die(
"Could not find package when checking spec {0} ({1}). "
"This is usually due to your Spack instance not being "
"configured to know about the upstream's repositories."
.format(spec.name, err.message)
)
if (upstream and not args.local) or (not upstream and args.local):
pref_specs.append(spec)
conflicting_variants = set()
pkgs = {}
for spec in pref_specs:
# Collect all the upstream compilers and versions for this package.
pkg = pkgs.get(spec.name, {
'version': [],
'compiler': [],
})
pkgs[spec.name] = pkg
# We have no existing variant if this is our first added version.
existing_variants = pkg.get('variants',
None if not pkg['version'] else '')
version = spec.version.string
if version not in pkg['version']:
pkg['version'].append(version)
compiler = str(spec.compiler)
if compiler not in pkg['compiler']:
pkg['compiler'].append(compiler)
# Get and list all the variants that differ from the default.
variants = []
for var_name, variant in spec.variants.items():
if (var_name in ['patches']
or var_name not in spec.package.variants):
continue
if variant.value != spec.package.variants[var_name].default:
variants.append(str(variant))
variants.sort()
variants = ' '.join(variants)
if spec.name not in conflicting_variants:
# Only specify the variants if there's a single variant
# set across all versions/compilers.
if existing_variants is not None and existing_variants != variants:
conflicting_variants.add(spec.name)
del pkg['variants']
elif variants:
pkg['variants'] = variants
if conflicting_variants:
tty.warn(
"The following packages have multiple conflicting upstream "
"specs. You may have to specify, by "
"concretized hash, which spec you want when building "
"packages that depend on them:\n - {0}"
.format("\n - ".join(sorted(conflicting_variants))))
# Simply write the config to the specified file.
existing = spack.config.get('packages', scope=scope)
new = spack.config.merge_yaml(existing, pkgs)
spack.config.set('packages', new, scope)
config_file = spack.config.config.get_config_filename(scope, section)
tty.msg("Updated config at {0}".format(config_file))
| def config_prefer_upstream(args):
"""Generate a packages config based on the configuration of all upstream
installs."""
scope = args.scope
if scope is None:
scope = spack.config.default_modify_scope('packages')
specs = spack.store.db.query(installed=True)
pref_specs = []
for spec in specs:
upstream = None
try:
upstream = spec.package.installed_upstream
except spack.repo.UnknownNamespaceError as err:
tty.die(
"Could not find package when checking spec {0} ({1}). "
"This is usually due to your Spack instance not being "
"configured to know about the upstream's repositories."
.format(spec.name, err.message)
)
if (upstream and not args.local) or (not upstream and args.local):
pref_specs.append(spec)
conflicting_variants = set()
pkgs = {}
for spec in pref_specs:
# Collect all the upstream compilers and versions for this package.
pkg = pkgs.get(spec.name, {
'version': [],
'compiler': [],
})
pkgs[spec.name] = pkg
# We have no existing variant if this is our first added version.
existing_variants = pkg.get('variants',
None if not pkg['version'] else '')
version = spec.version.string
if version not in pkg['version']:
pkg['version'].append(version)
compiler = str(spec.compiler)
if compiler not in pkg['compiler']:
pkg['compiler'].append(compiler)
# Get and list all the variants that differ from the default.
variants = []
for var_name, variant in spec.variants.items():
if (var_name in ['patches']
or var_name not in spec.package.variants):
continue
if variant.value != spec.package.variants[var_name].default:
variants.append(str(variant))
variants.sort()
variants = ' '.join(variants)
if spec.name not in conflicting_variants:
# Only specify the variants if there's a single variant
# set across all versions/compilers.
if existing_variants is not None and existing_variants != variants:
conflicting_variants.add(spec.name)
del pkg['variants']
elif variants:
pkg['variants'] = variants
if conflicting_variants:
tty.warn(
"The following packages have multiple conflicting upstream "
"specs. You may have to specify, by "
"concretized hash, which spec you want when building "
"packages that depend on them:\n - {0}"
.format("\n - ".join(sorted(conflicting_variants))))
# Simply write the config to the specified file.
existing = spack.config.get('packages', scope=scope)
new = spack.config.merge_yaml(existing, pkgs)
spack.config.set('packages', new, scope)
config_file = spack.config.config.get_config_filename(scope, section)
tty.msg("Updated config at {0}".format(config_file))
|
33,988 | def _pack_dir(
source_dir: str, files_stats: Optional[Dict[str, Tuple[float, int]]] = None
) -> io.BytesIO:
"""Pack whole directory contents into a uncompressed tarfile.
This function accepts a ``files_stats`` argument. If given, only files
whose stats differ from these stats will be packed.
The main use case for this is that we can collect information about files
already existing in the target directory, and only pack files that have
been updated. This is similar to how cloud syncing utilities decide
which files to transfer.
Args:
source_dir: Path to local directory to pack into tarfile.
files_stats: Dict of relative filenames mapping to a tuple of
(mtime, filesize). Only files that differ from these stats
will be packed.
Returns:
Tarfile as a stream object.
"""
stream = io.BytesIO()
with tarfile.open(fileobj=stream, mode="w", format=tarfile.PAX_FORMAT) as tar:
if not files_stats:
# If no `files_stats` is passed, pack whole directory
tar.add(source_dir, arcname="", recursive=True)
else:
# Otherwise, only pack differing files
tar.add(source_dir, arcname="", recursive=False)
for root, dirs, files in os.walk(source_dir, topdown=False):
rel_root = os.path.relpath(root, source_dir)
# Always add all directories
for dir in dirs:
key = os.path.join(rel_root, dir)
tar.add(os.path.join(source_dir, key), arcname=key, recursive=False)
# Add files where our information differs
for file in files:
key = os.path.join(rel_root, file)
stat = os.lstat(os.path.join(source_dir, key))
file_stat = stat.st_mtime, stat.st_size
if key not in files_stats or file_stat != files_stats[key]:
tar.add(os.path.join(source_dir, key), arcname=key)
return stream
| def _pack_dir(
source_dir: str, files_stats: Optional[Dict[str, Tuple[float, int]]] = None
) -> io.BytesIO:
"""Pack whole directory contents into an uncompressed tarfile.
This function accepts a ``files_stats`` argument. If given, only files
whose stats differ from these stats will be packed.
The main use case for this is that we can collect information about files
already existing in the target directory, and only pack files that have
been updated. This is similar to how cloud syncing utilities decide
which files to transfer.
Args:
source_dir: Path to local directory to pack into tarfile.
files_stats: Dict of relative filenames mapping to a tuple of
(mtime, filesize). Only files that differ from these stats
will be packed.
Returns:
Tarfile as a stream object.
"""
stream = io.BytesIO()
with tarfile.open(fileobj=stream, mode="w", format=tarfile.PAX_FORMAT) as tar:
if not files_stats:
# If no `files_stats` is passed, pack whole directory
tar.add(source_dir, arcname="", recursive=True)
else:
# Otherwise, only pack differing files
tar.add(source_dir, arcname="", recursive=False)
for root, dirs, files in os.walk(source_dir, topdown=False):
rel_root = os.path.relpath(root, source_dir)
# Always add all directories
for dir in dirs:
key = os.path.join(rel_root, dir)
tar.add(os.path.join(source_dir, key), arcname=key, recursive=False)
# Add files where our information differs
for file in files:
key = os.path.join(rel_root, file)
stat = os.lstat(os.path.join(source_dir, key))
file_stat = stat.st_mtime, stat.st_size
if key not in files_stats or file_stat != files_stats[key]:
tar.add(os.path.join(source_dir, key), arcname=key)
return stream
|
517 | def _can_send_test_report(report_id, user, domain):
try:
report = ReportNotification.get(report_id)
except ResourceNotFound:
return False
if report.domain != domain:
return False
if user._id != report.owner._id and not user.is_domain_admin(domain):
return False
return True
| def _can_send_test_report(report_id, user, domain):
try:
report = ReportNotification.get(report_id)
except ResourceNotFound:
return False
return _can_delete_scheduled_report(report, user, domain)
|
4,572 | def _set_view_plot_surf_plotly(hemi, view):
"""Helper function for plot_surf with plotly engine.
This function checks the selected hemisphere and view, and
returns the cameras view.
"""
hemis = np.array(['left', 'right'])
if hemi not in hemis:
raise ValueError(f"'hemi' must be one of {VALID_HEMISPHERES}")
if view == 'anterior':
cameras_view = 'front'
elif view == 'posterior':
cameras_view = 'back'
elif view == 'dorsal':
cameras_view = 'top'
elif view == 'ventral':
cameras_view = 'bottom'
elif view == 'lateral':
cameras_view = hemi
elif view == 'medial':
cameras_view = hemis[hemis != hemi][0]
else:
raise ValueError(f"view must be one of {VALID_VIEWS}")
return cameras_view
| def _set_view_plot_surf_plotly(hemi, view):
"""Helper function for plot_surf with plotly engine.
This function checks the selected hemisphere and view, and
returns the cameras view.
"""
if hemi not in VALID_HEMISPHERES:
raise ValueError(f"'hemi' must be one of {VALID_HEMISPHERES}")
if view == 'anterior':
cameras_view = 'front'
elif view == 'posterior':
cameras_view = 'back'
elif view == 'dorsal':
cameras_view = 'top'
elif view == 'ventral':
cameras_view = 'bottom'
elif view == 'lateral':
cameras_view = hemi
elif view == 'medial':
cameras_view = hemis[hemis != hemi][0]
else:
raise ValueError(f"view must be one of {VALID_VIEWS}")
return cameras_view
|
31,195 | def reopen_incidents_command(client, args):
incident_ids = args.get('incident_ids')
result = client.reopen_incidents(incident_ids)
if not result.get('success'):
raise DemistoException(result['message'])
msg = result.get('message')
markdown = "### " + msg
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.reopen',
outputs_key_field='',
outputs=msg
)
| def reopen_incidents_command(client, args):
incident_ids = argToList(args.get('incident_ids'))
result = client.reopen_incidents(incident_ids)
if not result.get('success'):
raise DemistoException(result['message'])
msg = result.get('message')
markdown = "### " + msg
return CommandResults(
readable_output=markdown,
outputs_prefix='LogPoint.Incidents.reopen',
outputs_key_field='',
outputs=msg
)
|
26,432 | def load_font(prefix, ttf_filename, charmap_filename, directory=None):
"""
Loads a font file and the associated charmap.
If ``directory`` the files will be looked for in the qtawesome ``fonts``
directory.
Parameters
----------
prefix: str
Prefix string to be used when accessing a given font set
ttf_filename: str
Ttf font filename
charmap_filename: str
Character map filename
directory: str or None, optional
Directory path for font and charmap files
Example
-------
If you want to load a font ``myicon.tff`` with a ``myicon-charmap.json``
charmap added to the qtawesome ``fonts`` directory (usually located at
``</path/to/lib/python>/site-packages/qtawesome/fonts/``) you can use::
qta.load_font(
'myicon',
'myicon.ttf',
'myicon-charmap.json'
)
However, if you want to load a font ``myicon.tff`` with a
``myicon-charmap.json`` charmap located in a specific path outside the
qtawesome ``font`` directory like for example ``/path/to/myproject/fonts``
you can use::
qta.load_font(
'myicon',
'myicon.ttf',
'myicon-charmap.json',
directory='/path/to/myproject/fonts'
)
"""
return _instance().load_font(prefix, ttf_filename, charmap_filename, directory)
| def load_font(prefix, ttf_filename, charmap_filename, directory=None):
"""
Loads a font file and the associated charmap.
If ``directory`` the files will be looked for in the qtawesome ``fonts``
directory.
Parameters
----------
prefix: str
Prefix string to be used when accessing a given font set
ttf_filename: str
Ttf font filename
charmap_filename: str
Character map filename
directory: str or None, optional
Directory path for font and charmap files
Example
-------
If you want to load a font ``myicon.tff`` with a ``myicon-charmap.json``
charmap added to the qtawesome ``fonts`` directory (usually located at
``</path/to/lib/python>/site-packages/qtawesome/fonts/``) you can use::
qta.load_font(
'myicon',
'myicon.ttf',
'myicon-charmap.json'
)
However, if you want to load a font ``myicon.tff`` with a
``myicon-charmap.json`` charmap located in a specific path outside the
qtawesome ``font`` directory like for example ``/path/to/myproject/fonts``
you can use::
qta.load_font(
'myicon',
'myicon.ttf',
'myicon-charmap.json',
directory='/path/to/myproject/fonts'
)
"""
return _instance().load_font(prefix, ttf_filename, charmap_filename, directory)
|
57,576 | def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is not None
env = env_fn()
observation_space = env.observation_space
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == "reset":
observation = env.reset()
write_to_shared_memory(
index, observation, shared_memory, observation_space
)
pipe.send((None, True))
elif command == "step":
observation, reward, done, info = env.step(data)
if done:
observation = env.reset()
write_to_shared_memory(
index, observation, shared_memory, observation_space
)
pipe.send(((None, reward, done, info), True))
elif command == "seed":
env.seed(data)
pipe.send((None, True))
elif command == "close":
pipe.send((None, True))
break
elif command == "_call":
name, args, kwargs = data
if name in ["reset", "step", "seed", "close"]:
raise ValueError(
"Trying to call function `{0}` with "
"`_call`. Use `{0}` directly instead.".format(name)
)
function = getattr(env, name)
if callable(function):
pipe.send((function(*args, **kwargs), True))
else:
pipe.send((function, True))
elif command == "_setattr":
name, value = data
setattr(env, name, value)
pipe.send((None, True))
elif command == "_check_observation_space":
pipe.send((data == observation_space, True))
else:
raise RuntimeError(
"Received unknown command `{0}`. Must "
"be one of {`reset`, `step`, `seed`, `close`, `_call`, "
"`_setattr`, `_check_observation_space`}.".format(command)
)
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
| def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
assert shared_memory is not None
env = env_fn()
observation_space = env.observation_space
parent_pipe.close()
try:
while True:
command, data = pipe.recv()
if command == "reset":
observation = env.reset()
write_to_shared_memory(
index, observation, shared_memory, observation_space
)
pipe.send((None, True))
elif command == "step":
observation, reward, done, info = env.step(data)
if done:
observation = env.reset()
write_to_shared_memory(
index, observation, shared_memory, observation_space
)
pipe.send(((None, reward, done, info), True))
elif command == "seed":
env.seed(data)
pipe.send((None, True))
elif command == "close":
pipe.send((None, True))
break
elif command == "_call":
name, args, kwargs = data
if name in ["reset", "step", "seed", "close"]:
raise ValueError(
f"Trying to call function `{name}` with "
f"`_call`. Use `{name}` directly instead."
)
function = getattr(env, name)
if callable(function):
pipe.send((function(*args, **kwargs), True))
else:
pipe.send((function, True))
elif command == "_setattr":
name, value = data
setattr(env, name, value)
pipe.send((None, True))
elif command == "_check_observation_space":
pipe.send((data == observation_space, True))
else:
raise RuntimeError(
"Received unknown command `{0}`. Must "
"be one of {`reset`, `step`, `seed`, `close`, `_call`, "
"`_setattr`, `_check_observation_space`}.".format(command)
)
except (KeyboardInterrupt, Exception):
error_queue.put((index,) + sys.exc_info()[:2])
pipe.send((None, False))
finally:
env.close()
|
39,597 | def _default_max_backend_connections():
try:
import psutil
except ImportError:
total_mem = 1024 * 1024 * 1024
if sys.platform.startswith("linux"):
with open('/proc/meminfo', 'rb') as f:
for line in f:
if line.startswith(b'MemTotal'):
total_mem = int(line.split()[1]) * 1024
break
else:
total_mem = psutil.virtual_memory().total
return int(total_mem / 1024 / 1024 / 100 * 8)
| def _compute_default_max_backend_connections():
try:
import psutil
except ImportError:
total_mem = 1024 * 1024 * 1024
if sys.platform.startswith("linux"):
with open('/proc/meminfo', 'rb') as f:
for line in f:
if line.startswith(b'MemTotal'):
total_mem = int(line.split()[1]) * 1024
break
else:
total_mem = psutil.virtual_memory().total
return int(total_mem / 1024 / 1024 / 100 * 8)
|
58,914 | def launch_mapdl(
exec_file=None,
run_location=None,
jobname="file",
nproc=2,
ram=None,
mode=None,
override=False,
loglevel="ERROR",
additional_switches="",
start_timeout=120,
port=None,
cleanup_on_exit=True,
start_instance=None,
ip=None,
clear_on_connect=True,
log_apdl=None,
verbose_mapdl=False,
license_server_check=True,
license_type=None,
print_com=False,
**kwargs,
) -> _MapdlCore:
"""Start MAPDL locally.
Parameters
----------
exec_file : str, optional
The location of the MAPDL executable. Will use the cached
location when left at the default ``None``.
run_location : str, optional
MAPDL working directory. Defaults to a temporary working
directory. If directory doesn't exist, will create one.
jobname : str, optional
MAPDL jobname. Defaults to ``'file'``.
nproc : int, optional
Number of processors. Defaults to 2.
ram : float, optional
Fixed amount of memory to request for MAPDL. If ``None``,
then MAPDL will use as much as available on the host machine.
mode : str, optional
Mode to launch MAPDL. Must be one of the following:
- ``'grpc'``
- ``'corba'``
- ``'console'``
The ``'grpc'`` mode is available on ANSYS 2021R1 or newer and
provides the best performance and stability. The ``'corba'``
mode is available from v17.0 and newer and is given legacy
support. This mode requires the additional
``ansys_corba`` module. Finally, the ``'console'`` mode
is for legacy use only Linux only prior to v17.0. This console
mode is pending depreciation.
override : bool, optional
Attempts to delete the lock file at the run_location.
Useful when a prior MAPDL session has exited prematurely and
the lock file has not been deleted.
loglevel : str, optional
Sets which messages are printed to the console. ``'INFO'``
prints out all ANSYS messages, ``'WARNING``` prints only
messages containing ANSYS warnings, and ``'ERROR'`` logs only
error messages.
additional_switches : str, optional
Additional switches for MAPDL, for example ``'aa_r'``, the
academic research license, would be added with:
- ``additional_switches="-aa_r"``
Avoid adding switches like -i -o or -b as these are already
included to start up the MAPDL server. See the notes
section for additional details.
start_timeout : float, optional
Maximum allowable time to connect to the MAPDL server.
port : int
Port to launch MAPDL gRPC on. Final port will be the first
port available after (or including) this port. Defaults to
50052. You can also override the default behavior of this
keyword argument with the environment variable
``PYMAPDL_PORT=<VALID PORT>``
custom_bin : str, optional
Path to the MAPDL custom executable. On release 2020R2 on
Linux, if ``None``, will check to see if you have
``ansys.mapdl_bin`` installed and use that executable.
cleanup_on_exit : bool, optional
Exit MAPDL when python exits or the mapdl Python instance is
garbage collected.
start_instance : bool, optional
When False, connect to an existing MAPDL instance at ``ip``
and ``port``, which default to ``'127.0.0.1'`` at 50052.
Otherwise, launch a local instance of MAPDL. You can also
override the default behavior of this keyword argument with
the environment variable ``PYMAPDL_START_INSTANCE=FALSE``.
ip : bool, optional
Used only when ``start_instance`` is ``False``. If provided,
it will force ``start_instance`` to be ``False``.
You can also provide a hostname as an alternative to an IP address.
Defaults to ``'127.0.0.1'``. You can also override the
default behavior of this keyword argument with the
environment variable "PYMAPDL_IP=FALSE".
clear_on_connect : bool, optional
Defaults to ``True``, giving you a fresh environment when
connecting to MAPDL. Except if ``start_instance`` is specified
then, it defaults to ``False``.
log_apdl : str, optional
Enables logging every APDL command to the local disk. This
can be used to "record" all the commands that are sent to
MAPDL via PyMAPDL so a script can be run within MAPDL without
PyMAPDL. This string is the path of the output file (e.g.
``log_apdl='pymapdl_log.txt'``). By default this is disabled.
remove_temp_files : bool, optional
Removes temporary files on exit. Default ``False``.
verbose_mapdl : bool, optional
Enable printing of all output when launching and running
MAPDL. This should be used for debugging only as output can
be tracked within pymapdl. Default ``False``.
license_server_check : bool, optional
Check if the license server is available if MAPDL fails to
start. Only available on ``mode='grpc'``. Defaults ``True``.
license_type : str, optional
Enable license type selection. You can input a string for its
license name (for example ``'meba'`` or ``'ansys'``) or its description
("enterprise solver" or "enterprise" respectively).
You can also use legacy licenses (for example ``'aa_t_a'``) but it will
also raise a warning. If it is not used (``None``), no specific license
will be requested, being up to the license server to provide a specific
license type. Default is ``None``.
print_com : bool, optional
Print the command ``/COM`` arguments to the standard output.
Default ``False``.
Returns
-------
ansys.mapdl.core.mapdl._MapdlCore
An instance of Mapdl. Type depends on the selected ``mode``.
Notes
-----
These are the MAPDL switch options as of 2020R2 applicable for
running MAPDL as a service via gRPC. Excluded switches such as
``"-j"`` either not applicable or are set via keyword arguments.
\-acc <device>
Enables the use of GPU hardware. See GPU
Accelerator Capability in the Parallel Processing Guide for more
information.
\-amfg
Enables the additive manufacturing capability. Requires
an additive manufacturing license. For general information about
this feature, see AM Process Simulation in ANSYS Workbench.
\-ansexe <executable>
Activates a custom mechanical APDL executable.
In the ANSYS Workbench environment, activates a custom
Mechanical APDL executable.
\-custom <executable>
Calls a custom Mechanical APDL executable
See Running Your Custom Executable in the Programmer's Reference
for more information.
\-db value
Initial memory allocation
Defines the portion of workspace (memory) to be used as the
initial allocation for the database. The default is 1024
MB. Specify a negative number to force a fixed size throughout
the run; useful on small memory systems.
\-dis
Enables Distributed ANSYS
See the Parallel Processing Guide for more information.
\-dvt
Enables ANSYS DesignXplorer advanced task (add-on).
Requires DesignXplorer.
\-l <language>
Specifies a language file to use other than English
This option is valid only if you have a translated message file
in an appropriately named subdirectory in
``/ansys_inc/v201/ansys/docu`` or
``Program Files\\ANSYS\\Inc\\V201\\ANSYS\\docu``
\-m <workspace>
Specifies the total size of the workspace
Workspace (memory) in megabytes used for the initial
allocation. If you omit the ``-m`` option, the default is 2 GB
(2048 MB). Specify a negative number to force a fixed size
throughout the run.
\-machines <IP>
Specifies the distributed machines
Machines on which to run a Distributed ANSYS analysis. See
Starting Distributed ANSYS in the Parallel Processing Guide for
more information.
\-mpi <value>
Specifies the type of MPI to use.
See the Parallel Processing Guide for more information.
\-mpifile <appfile>
Specifies an existing MPI file
Specifies an existing MPI file (appfile) to be used in a
Distributed ANSYS run. See Using MPI Files in the Parallel
Processing Guide for more information.
\-na <value>
Specifies the number of GPU accelerator devices
Number of GPU devices per machine or compute node when running
with the GPU accelerator feature. See GPU Accelerator Capability
in the Parallel Processing Guide for more information.
\-name <value>
Defines Mechanical APDL parameters
Set mechanical APDL parameters at program start-up. The parameter
name must be at least two characters long. For details about
parameters, see the ANSYS Parametric Design Language Guide.
\-p <productname>
ANSYS session product
Defines the ANSYS session product that will run during the
session. For more detailed information about the ``-p`` option,
see Selecting an ANSYS Product via the Command Line.
\-ppf <license feature name>
HPC license
Specifies which HPC license to use during a parallel processing
run. See HPC Licensing in the Parallel Processing Guide for more
information.
\-smp
Enables shared-memory parallelism.
See the Parallel Processing Guide for more information.
If the environment is configured to use `PyPIM <https://pypim.docs.pyansys.com>`_
and ``start_instance`` is ``True``, then starting the instance will be delegated to PyPIM.
In this event, most of the options will be ignored and the server side configuration will
be used.
Examples
--------
Launch MAPDL using the best protocol.
>>> from ansys.mapdl.core import launch_mapdl
>>> mapdl = launch_mapdl()
Run MAPDL with shared memory parallel and specify the location of
the Ansys binary.
>>> exec_file = 'C:/Program Files/ANSYS Inc/v201/ansys/bin/win64/ANSYS201.exe'
>>> mapdl = launch_mapdl(exec_file, additional_switches='-smp')
Connect to an existing instance of MAPDL at IP 192.168.1.30 and
port 50001. This is only available using the latest ``'grpc'``
mode.
>>> mapdl = launch_mapdl(start_instance=False, ip='192.168.1.30',
... port=50001)
Force the usage of the CORBA protocol.
>>> mapdl = launch_mapdl(mode='corba')
Run MAPDL using the console mode (available only on Linux).
>>> mapdl = launch_mapdl('/ansys_inc/v194/ansys/bin/ansys194',
... mode='console')
"""
# These parameters are partially used for unit testing
set_no_abort = kwargs.get("set_no_abort", True)
if ip is None:
ip = os.environ.get("PYMAPDL_IP", LOCALHOST)
else: # pragma: no cover
start_instance = False
ip = socket.gethostbyname(ip) # Converting ip or hostname to ip
check_valid_ip(ip) # double check
if port is None:
port = int(os.environ.get("PYMAPDL_PORT", MAPDL_DEFAULT_PORT))
check_valid_port(port)
# Start MAPDL with PyPIM if the environment is configured for it
# and the user did not pass a directive on how to launch it.
if _HAS_PIM and exec_file is None and pypim.is_configured():
LOG.info("Starting MAPDL remotely. The startup configuration will be ignored.")
return launch_remote_mapdl(cleanup_on_exit=cleanup_on_exit)
# connect to an existing instance if enabled
if start_instance is None:
start_instance = check_valid_start_instance(
os.environ.get("PYMAPDL_START_INSTANCE", True)
)
# special handling when building the gallery outside of CI. This
# creates an instance of mapdl the first time if PYMAPDL start instance
# is False.
if pymapdl.BUILDING_GALLERY: # pragma: no cover
# launch an instance of pymapdl if it does not already exist and
# we're allowed to start instances
if start_instance and GALLERY_INSTANCE[0] is None:
mapdl = launch_mapdl(
start_instance=True,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
GALLERY_INSTANCE[0] = {"ip": mapdl._ip, "port": mapdl._port}
return mapdl
# otherwise, connect to the existing gallery instance if available
elif GALLERY_INSTANCE[0] is not None:
mapdl = MapdlGrpc(
ip=GALLERY_INSTANCE[0]["ip"],
port=GALLERY_INSTANCE[0]["port"],
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
# finally, if running on CI/CD, connect to the default instance
else:
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
if not start_instance:
if clear_on_connect is None:
clear_on_connect = False
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
# verify executable
if exec_file is None:
# Load cached path
exec_file = get_ansys_path()
if exec_file is None:
raise FileNotFoundError(
"Invalid exec_file path or cannot load cached "
"mapdl path. Enter one manually by specifying "
"exec_file="
)
else: # verify ansys exists at this location
if not os.path.isfile(exec_file):
raise FileNotFoundError(
f'Invalid MAPDL executable at "{exec_file}"\n'
"Enter one manually using exec_file="
)
# verify run location
if run_location is None:
temp_dir = tempfile.gettempdir()
run_location = os.path.join(temp_dir, "ansys_%s" % random_string(10))
if not os.path.isdir(run_location):
try:
os.mkdir(run_location)
except:
raise RuntimeError(
"Unable to create the temporary working "
f'directory "{run_location}"\n'
"Please specify run_location="
)
else:
if not os.path.isdir(run_location):
raise FileNotFoundError(f'"{run_location}" is not a valid directory')
# verify no lock file and the mode is valid
check_lock_file(run_location, jobname, override)
mode = check_mode(mode, _version_from_path(exec_file))
# cache start parameters
additional_switches = _validate_add_sw(
additional_switches, exec_file, kwargs.pop("force_intel", False)
)
if isinstance(license_type, str):
# In newer license server versions an invalid license name just get discarded and produces no effect or warning.
# For example:
# ```bash
# mapdl.exe -p meba # works fine because 'meba' is a valid license in ALLOWABLE_LICENSES.
# mapdl.exe -p yoyoyo # The -p flag is ignored and it run the default license.
# ```
#
# In older versions probably it might raise an error. But not sure.
license_type = license_type.lower().strip()
if "enterprise" in license_type and "solver" not in license_type:
license_type = "ansys"
elif "enterprise" in license_type and "solver" in license_type:
license_type = "meba"
elif "premium" in license_type:
license_type = "mech_2"
elif "pro" in license_type:
license_type = "mech_1"
elif license_type not in ALLOWABLE_LICENSES:
allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES]
warn_text = (
f"The keyword argument 'license_type' value ('{license_type}') is not a recognized license name or has been deprecated.\n"
+ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"
+ f"Recognized license names: {' '.join(allow_lics)}"
)
warnings.warn(warn_text, UserWarning)
additional_switches += " -p " + license_type
LOG.debug(
f"Using specified license name '{license_type}' in the 'license_type' keyword argument."
)
elif "-p " in additional_switches:
# There is already a license request in additional switches.
license_type = re.findall(r"-p \b(\w*)", additional_switches)[
0
] # getting only the first product license.
if license_type not in ALLOWABLE_LICENSES:
allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES]
warn_text = (
f"The additional switch product value ('-p {license_type}') is not a recognized license name or has been deprecated.\n"
+ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"
+ f"Recognized license names: {' '.join(allow_lics)}"
)
warnings.warn(warn_text, UserWarning)
LOG.warning(warn_text)
LOG.debug(
f"Using specified license name '{license_type}' in the additional switches parameter."
)
elif license_type is not None:
raise TypeError("The argument 'license_type' does only accept str or None.")
start_parm = {
"exec_file": exec_file,
"run_location": run_location,
"additional_switches": additional_switches,
"jobname": jobname,
"nproc": nproc,
"print_com": print_com,
}
if mode in ["console", "corba"]:
start_parm["start_timeout"] = start_timeout
else:
start_parm["ram"] = ram
start_parm["override"] = override
start_parm["timeout"] = start_timeout
# Check the license server
if license_server_check:
# configure timeout to be 90% of the wait time of the startup
# time for Ansys.
lic_check = LicenseChecker(timeout=start_timeout * 0.9, verbose=verbose_mapdl)
lic_check.start()
try:
if mode == "console":
from ansys.mapdl.core.mapdl_console import MapdlConsole
mapdl = MapdlConsole(loglevel=loglevel, log_apdl=log_apdl, **start_parm)
elif mode == "corba":
try:
# pending deprication to ansys-mapdl-corba
from ansys.mapdl.core.mapdl_corba import MapdlCorba
except ImportError:
raise ImportError(
"To use this feature, install the MAPDL CORBA package"
" with:\n\npip install ansys_corba"
) from None
broadcast = kwargs.get("log_broadcast", False)
mapdl = MapdlCorba(
loglevel=loglevel,
log_apdl=log_apdl,
log_broadcast=broadcast,
verbose=verbose_mapdl,
**start_parm,
)
elif mode == "grpc":
port, actual_run_location = launch_grpc(
port=port, verbose=verbose_mapdl, ip=ip, **start_parm
)
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=cleanup_on_exit,
loglevel=loglevel,
set_no_abort=set_no_abort,
remove_temp_files=kwargs.pop("remove_temp_files", False),
log_apdl=log_apdl,
**start_parm,
)
if run_location is None:
mapdl._path = actual_run_location
except Exception as exception:
# Failed to launch for some reason. Check if failure was due
# to the license check
if license_server_check:
lic_check.check()
# pass
raise exception
return mapdl
| def launch_mapdl(
exec_file=None,
run_location=None,
jobname="file",
nproc=2,
ram=None,
mode=None,
override=False,
loglevel="ERROR",
additional_switches="",
start_timeout=120,
port=None,
cleanup_on_exit=True,
start_instance=None,
ip=None,
clear_on_connect=True,
log_apdl=None,
verbose_mapdl=False,
license_server_check=True,
license_type=None,
print_com=False,
**kwargs,
) -> _MapdlCore:
"""Start MAPDL locally.
Parameters
----------
exec_file : str, optional
The location of the MAPDL executable. Will use the cached
location when left at the default ``None``.
run_location : str, optional
MAPDL working directory. Defaults to a temporary working
directory. If directory doesn't exist, will create one.
jobname : str, optional
MAPDL jobname. Defaults to ``'file'``.
nproc : int, optional
Number of processors. Defaults to 2.
ram : float, optional
Fixed amount of memory to request for MAPDL. If ``None``,
then MAPDL will use as much as available on the host machine.
mode : str, optional
Mode to launch MAPDL. Must be one of the following:
- ``'grpc'``
- ``'corba'``
- ``'console'``
The ``'grpc'`` mode is available on ANSYS 2021R1 or newer and
provides the best performance and stability. The ``'corba'``
mode is available from v17.0 and newer and is given legacy
support. This mode requires the additional
``ansys_corba`` module. Finally, the ``'console'`` mode
is for legacy use only Linux only prior to v17.0. This console
mode is pending depreciation.
override : bool, optional
Attempts to delete the lock file at the run_location.
Useful when a prior MAPDL session has exited prematurely and
the lock file has not been deleted.
loglevel : str, optional
Sets which messages are printed to the console. ``'INFO'``
prints out all ANSYS messages, ``'WARNING``` prints only
messages containing ANSYS warnings, and ``'ERROR'`` logs only
error messages.
additional_switches : str, optional
Additional switches for MAPDL, for example ``'aa_r'``, the
academic research license, would be added with:
- ``additional_switches="-aa_r"``
Avoid adding switches like -i -o or -b as these are already
included to start up the MAPDL server. See the notes
section for additional details.
start_timeout : float, optional
Maximum allowable time to connect to the MAPDL server.
port : int
Port to launch MAPDL gRPC on. Final port will be the first
port available after (or including) this port. Defaults to
50052. You can also override the default behavior of this
keyword argument with the environment variable
``PYMAPDL_PORT=<VALID PORT>``
custom_bin : str, optional
Path to the MAPDL custom executable. On release 2020R2 on
Linux, if ``None``, will check to see if you have
``ansys.mapdl_bin`` installed and use that executable.
cleanup_on_exit : bool, optional
Exit MAPDL when python exits or the mapdl Python instance is
garbage collected.
start_instance : bool, optional
When False, connect to an existing MAPDL instance at ``ip``
and ``port``, which default to ``'127.0.0.1'`` at 50052.
Otherwise, launch a local instance of MAPDL. You can also
override the default behavior of this keyword argument with
the environment variable ``PYMAPDL_START_INSTANCE=FALSE``.
ip : bool, optional
Used only when ``start_instance`` is ``False``. If provided,
it will force ``start_instance`` to be ``False``.
You can also provide a hostname as an alternative to an IP address.
Defaults to ``'127.0.0.1'``. You can also override the
default behavior of this keyword argument with the
environment variable "PYMAPDL_IP=FALSE".
clear_on_connect : bool, optional
Defaults to ``True``, giving you a fresh environment when
connecting to MAPDL. When if ``start_instance`` is specified
it defaults to ``False``.
log_apdl : str, optional
Enables logging every APDL command to the local disk. This
can be used to "record" all the commands that are sent to
MAPDL via PyMAPDL so a script can be run within MAPDL without
PyMAPDL. This string is the path of the output file (e.g.
``log_apdl='pymapdl_log.txt'``). By default this is disabled.
remove_temp_files : bool, optional
Removes temporary files on exit. Default ``False``.
verbose_mapdl : bool, optional
Enable printing of all output when launching and running
MAPDL. This should be used for debugging only as output can
be tracked within pymapdl. Default ``False``.
license_server_check : bool, optional
Check if the license server is available if MAPDL fails to
start. Only available on ``mode='grpc'``. Defaults ``True``.
license_type : str, optional
Enable license type selection. You can input a string for its
license name (for example ``'meba'`` or ``'ansys'``) or its description
("enterprise solver" or "enterprise" respectively).
You can also use legacy licenses (for example ``'aa_t_a'``) but it will
also raise a warning. If it is not used (``None``), no specific license
will be requested, being up to the license server to provide a specific
license type. Default is ``None``.
print_com : bool, optional
Print the command ``/COM`` arguments to the standard output.
Default ``False``.
Returns
-------
ansys.mapdl.core.mapdl._MapdlCore
An instance of Mapdl. Type depends on the selected ``mode``.
Notes
-----
These are the MAPDL switch options as of 2020R2 applicable for
running MAPDL as a service via gRPC. Excluded switches such as
``"-j"`` either not applicable or are set via keyword arguments.
\-acc <device>
Enables the use of GPU hardware. See GPU
Accelerator Capability in the Parallel Processing Guide for more
information.
\-amfg
Enables the additive manufacturing capability. Requires
an additive manufacturing license. For general information about
this feature, see AM Process Simulation in ANSYS Workbench.
\-ansexe <executable>
Activates a custom mechanical APDL executable.
In the ANSYS Workbench environment, activates a custom
Mechanical APDL executable.
\-custom <executable>
Calls a custom Mechanical APDL executable
See Running Your Custom Executable in the Programmer's Reference
for more information.
\-db value
Initial memory allocation
Defines the portion of workspace (memory) to be used as the
initial allocation for the database. The default is 1024
MB. Specify a negative number to force a fixed size throughout
the run; useful on small memory systems.
\-dis
Enables Distributed ANSYS
See the Parallel Processing Guide for more information.
\-dvt
Enables ANSYS DesignXplorer advanced task (add-on).
Requires DesignXplorer.
\-l <language>
Specifies a language file to use other than English
This option is valid only if you have a translated message file
in an appropriately named subdirectory in
``/ansys_inc/v201/ansys/docu`` or
``Program Files\\ANSYS\\Inc\\V201\\ANSYS\\docu``
\-m <workspace>
Specifies the total size of the workspace
Workspace (memory) in megabytes used for the initial
allocation. If you omit the ``-m`` option, the default is 2 GB
(2048 MB). Specify a negative number to force a fixed size
throughout the run.
\-machines <IP>
Specifies the distributed machines
Machines on which to run a Distributed ANSYS analysis. See
Starting Distributed ANSYS in the Parallel Processing Guide for
more information.
\-mpi <value>
Specifies the type of MPI to use.
See the Parallel Processing Guide for more information.
\-mpifile <appfile>
Specifies an existing MPI file
Specifies an existing MPI file (appfile) to be used in a
Distributed ANSYS run. See Using MPI Files in the Parallel
Processing Guide for more information.
\-na <value>
Specifies the number of GPU accelerator devices
Number of GPU devices per machine or compute node when running
with the GPU accelerator feature. See GPU Accelerator Capability
in the Parallel Processing Guide for more information.
\-name <value>
Defines Mechanical APDL parameters
Set mechanical APDL parameters at program start-up. The parameter
name must be at least two characters long. For details about
parameters, see the ANSYS Parametric Design Language Guide.
\-p <productname>
ANSYS session product
Defines the ANSYS session product that will run during the
session. For more detailed information about the ``-p`` option,
see Selecting an ANSYS Product via the Command Line.
\-ppf <license feature name>
HPC license
Specifies which HPC license to use during a parallel processing
run. See HPC Licensing in the Parallel Processing Guide for more
information.
\-smp
Enables shared-memory parallelism.
See the Parallel Processing Guide for more information.
If the environment is configured to use `PyPIM <https://pypim.docs.pyansys.com>`_
and ``start_instance`` is ``True``, then starting the instance will be delegated to PyPIM.
In this event, most of the options will be ignored and the server side configuration will
be used.
Examples
--------
Launch MAPDL using the best protocol.
>>> from ansys.mapdl.core import launch_mapdl
>>> mapdl = launch_mapdl()
Run MAPDL with shared memory parallel and specify the location of
the Ansys binary.
>>> exec_file = 'C:/Program Files/ANSYS Inc/v201/ansys/bin/win64/ANSYS201.exe'
>>> mapdl = launch_mapdl(exec_file, additional_switches='-smp')
Connect to an existing instance of MAPDL at IP 192.168.1.30 and
port 50001. This is only available using the latest ``'grpc'``
mode.
>>> mapdl = launch_mapdl(start_instance=False, ip='192.168.1.30',
... port=50001)
Force the usage of the CORBA protocol.
>>> mapdl = launch_mapdl(mode='corba')
Run MAPDL using the console mode (available only on Linux).
>>> mapdl = launch_mapdl('/ansys_inc/v194/ansys/bin/ansys194',
... mode='console')
"""
# These parameters are partially used for unit testing
set_no_abort = kwargs.get("set_no_abort", True)
if ip is None:
ip = os.environ.get("PYMAPDL_IP", LOCALHOST)
else: # pragma: no cover
start_instance = False
ip = socket.gethostbyname(ip) # Converting ip or hostname to ip
check_valid_ip(ip) # double check
if port is None:
port = int(os.environ.get("PYMAPDL_PORT", MAPDL_DEFAULT_PORT))
check_valid_port(port)
# Start MAPDL with PyPIM if the environment is configured for it
# and the user did not pass a directive on how to launch it.
if _HAS_PIM and exec_file is None and pypim.is_configured():
LOG.info("Starting MAPDL remotely. The startup configuration will be ignored.")
return launch_remote_mapdl(cleanup_on_exit=cleanup_on_exit)
# connect to an existing instance if enabled
if start_instance is None:
start_instance = check_valid_start_instance(
os.environ.get("PYMAPDL_START_INSTANCE", True)
)
# special handling when building the gallery outside of CI. This
# creates an instance of mapdl the first time if PYMAPDL start instance
# is False.
if pymapdl.BUILDING_GALLERY: # pragma: no cover
# launch an instance of pymapdl if it does not already exist and
# we're allowed to start instances
if start_instance and GALLERY_INSTANCE[0] is None:
mapdl = launch_mapdl(
start_instance=True,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
GALLERY_INSTANCE[0] = {"ip": mapdl._ip, "port": mapdl._port}
return mapdl
# otherwise, connect to the existing gallery instance if available
elif GALLERY_INSTANCE[0] is not None:
mapdl = MapdlGrpc(
ip=GALLERY_INSTANCE[0]["ip"],
port=GALLERY_INSTANCE[0]["port"],
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
# finally, if running on CI/CD, connect to the default instance
else:
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
if not start_instance:
if clear_on_connect is None:
clear_on_connect = False
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=False,
loglevel=loglevel,
set_no_abort=set_no_abort,
)
if clear_on_connect:
mapdl.clear()
return mapdl
# verify executable
if exec_file is None:
# Load cached path
exec_file = get_ansys_path()
if exec_file is None:
raise FileNotFoundError(
"Invalid exec_file path or cannot load cached "
"mapdl path. Enter one manually by specifying "
"exec_file="
)
else: # verify ansys exists at this location
if not os.path.isfile(exec_file):
raise FileNotFoundError(
f'Invalid MAPDL executable at "{exec_file}"\n'
"Enter one manually using exec_file="
)
# verify run location
if run_location is None:
temp_dir = tempfile.gettempdir()
run_location = os.path.join(temp_dir, "ansys_%s" % random_string(10))
if not os.path.isdir(run_location):
try:
os.mkdir(run_location)
except:
raise RuntimeError(
"Unable to create the temporary working "
f'directory "{run_location}"\n'
"Please specify run_location="
)
else:
if not os.path.isdir(run_location):
raise FileNotFoundError(f'"{run_location}" is not a valid directory')
# verify no lock file and the mode is valid
check_lock_file(run_location, jobname, override)
mode = check_mode(mode, _version_from_path(exec_file))
# cache start parameters
additional_switches = _validate_add_sw(
additional_switches, exec_file, kwargs.pop("force_intel", False)
)
if isinstance(license_type, str):
# In newer license server versions an invalid license name just get discarded and produces no effect or warning.
# For example:
# ```bash
# mapdl.exe -p meba # works fine because 'meba' is a valid license in ALLOWABLE_LICENSES.
# mapdl.exe -p yoyoyo # The -p flag is ignored and it run the default license.
# ```
#
# In older versions probably it might raise an error. But not sure.
license_type = license_type.lower().strip()
if "enterprise" in license_type and "solver" not in license_type:
license_type = "ansys"
elif "enterprise" in license_type and "solver" in license_type:
license_type = "meba"
elif "premium" in license_type:
license_type = "mech_2"
elif "pro" in license_type:
license_type = "mech_1"
elif license_type not in ALLOWABLE_LICENSES:
allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES]
warn_text = (
f"The keyword argument 'license_type' value ('{license_type}') is not a recognized license name or has been deprecated.\n"
+ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"
+ f"Recognized license names: {' '.join(allow_lics)}"
)
warnings.warn(warn_text, UserWarning)
additional_switches += " -p " + license_type
LOG.debug(
f"Using specified license name '{license_type}' in the 'license_type' keyword argument."
)
elif "-p " in additional_switches:
# There is already a license request in additional switches.
license_type = re.findall(r"-p \b(\w*)", additional_switches)[
0
] # getting only the first product license.
if license_type not in ALLOWABLE_LICENSES:
allow_lics = [f"'{each}'" for each in ALLOWABLE_LICENSES]
warn_text = (
f"The additional switch product value ('-p {license_type}') is not a recognized license name or has been deprecated.\n"
+ "Still PyMAPDL will try to use it but in older versions you might experience problems connecting to the server.\n"
+ f"Recognized license names: {' '.join(allow_lics)}"
)
warnings.warn(warn_text, UserWarning)
LOG.warning(warn_text)
LOG.debug(
f"Using specified license name '{license_type}' in the additional switches parameter."
)
elif license_type is not None:
raise TypeError("The argument 'license_type' does only accept str or None.")
start_parm = {
"exec_file": exec_file,
"run_location": run_location,
"additional_switches": additional_switches,
"jobname": jobname,
"nproc": nproc,
"print_com": print_com,
}
if mode in ["console", "corba"]:
start_parm["start_timeout"] = start_timeout
else:
start_parm["ram"] = ram
start_parm["override"] = override
start_parm["timeout"] = start_timeout
# Check the license server
if license_server_check:
# configure timeout to be 90% of the wait time of the startup
# time for Ansys.
lic_check = LicenseChecker(timeout=start_timeout * 0.9, verbose=verbose_mapdl)
lic_check.start()
try:
if mode == "console":
from ansys.mapdl.core.mapdl_console import MapdlConsole
mapdl = MapdlConsole(loglevel=loglevel, log_apdl=log_apdl, **start_parm)
elif mode == "corba":
try:
# pending deprication to ansys-mapdl-corba
from ansys.mapdl.core.mapdl_corba import MapdlCorba
except ImportError:
raise ImportError(
"To use this feature, install the MAPDL CORBA package"
" with:\n\npip install ansys_corba"
) from None
broadcast = kwargs.get("log_broadcast", False)
mapdl = MapdlCorba(
loglevel=loglevel,
log_apdl=log_apdl,
log_broadcast=broadcast,
verbose=verbose_mapdl,
**start_parm,
)
elif mode == "grpc":
port, actual_run_location = launch_grpc(
port=port, verbose=verbose_mapdl, ip=ip, **start_parm
)
mapdl = MapdlGrpc(
ip=ip,
port=port,
cleanup_on_exit=cleanup_on_exit,
loglevel=loglevel,
set_no_abort=set_no_abort,
remove_temp_files=kwargs.pop("remove_temp_files", False),
log_apdl=log_apdl,
**start_parm,
)
if run_location is None:
mapdl._path = actual_run_location
except Exception as exception:
# Failed to launch for some reason. Check if failure was due
# to the license check
if license_server_check:
lic_check.check()
# pass
raise exception
return mapdl
|
20,386 | def streams_list_from_dict(streams):
"Converts a streams dictionary into a streams list"
params = {}
for k, v in streams.items():
if 'panel' in sys.modules:
from panel.depends import param_value_if_widget
v = param_value_if_widget(v)
if (isinstance(v, param.Parameter)
and (isinstance(v.owner, param.Parameterized)
or issubclass(v.owner, param.Parameterized))):
params[k] = v
else:
raise TypeError('Cannot handle value %r in streams dictionary' % v)
return Params.from_params(params)
| def streams_list_from_dict(streams):
"Converts a streams dictionary into a streams list"
params = {}
for k, v in streams.items():
if 'panel' in sys.modules:
from panel.depends import param_value_if_widget
v = param_value_if_widget(v)
if isinstance(v, param.Parameter) and v.owner is not None:
params[k] = v
else:
raise TypeError('Cannot handle value %r in streams dictionary' % v)
return Params.from_params(params)
|
48,855 | def _get_dag_defaults(dag: Optional["DAG"], task_group: Optional["TaskGroup"]) -> Tuple[dict, ParamsDict]:
if not dag:
return {}, ParamsDict()
dag_args = copy.copy(dag.default_args)
dag_params = copy.deepcopy(dag.params)
if task_group:
if not isinstance(task_group.default_args, Dict) and task_group.default_args is not None:
raise TypeError("default_args must be a dictionary")
else:
dag_args.update(task_group.default_args)
return dag_args, dag_params
| def _get_dag_defaults(dag: Optional["DAG"], task_group: Optional["TaskGroup"]) -> Tuple[dict, ParamsDict]:
if not dag:
return {}, ParamsDict()
dag_args = copy.copy(dag.default_args)
dag_params = copy.deepcopy(dag.params)
if task_group:
if task_group.default_args and not isinstance(task_group.default_args, collections.abc.Mapping):
raise TypeError("default_args must be a mapping")
dag_args.update(task_group.default_args)
return dag_args, dag_params
|
30,336 | def get_group():
"""
retrieve a single Group
"""
group_type = demisto.args().get('group_type')
group_id = int(demisto.args().get('group_id'))
response = get_group_request(group_type, group_id)
if group_type == 'adversaries':
data = response.get('data', {}).get('adversarie', {})
if group_type == 'campaigns':
data = response.get('data', {}).get('campaign', {})
if group_type == 'documents':
data = response.get('data', {}).get('document', {})
if group_type == 'emails':
data = response.get('data', {}).get('email', {})
if group_type == 'events':
data = response.get('data', {}).get('event', {})
if group_type == 'incidents':
data = response.get('data', {}).get('incident', {})
if group_type == 'intrusionSets':
data = response.get('data', {}).get('intrusionSet', {})
if group_type == 'reports':
data = response.get('data', {}).get('report', {})
if group_type == 'signatures':
data = response.get('data', {}).get('signature', {})
if group_type == 'threats':
data = response.get('data', {}).get('threat', {})
if response.get('status') == 'Success':
contents = {
'ID': data.get('id'),
'Name': data.get('name'),
'Owner': data.get('owner'),
'DateAdded': data.get('dateAdded'),
'EventDate': data.get('eventDate'),
'Status': data.get('status')
}
else:
return_error(response.get('message'))
context = {
'TC.Group(val.ID && val.ID === obj.ID)': contents
}
return_outputs(
tableToMarkdown('Group information', contents, removeNull=True),
context
)
| def get_group():
"""
retrieve a single Group
"""
group_type = demisto.args().get('group_type')
group_id = int(demisto.args().get('group_id'))
response = get_group_request(group_type, group_id)
if group_type == 'adversaries':
data = response.get('data', {}).get('adversarie', {})
if group_type == 'campaigns':
data = response.get('data', {}).get('campaign', {})
if group_type == 'documents':
data = response.get('data', {}).get('document', {})
if group_type == 'emails':
data = response.get('data', {}).get('email', {})
if group_type == 'events':
data = response.get('data', {}).get('event', {})
if group_type == 'incidents':
data = response.get('data', {}).get('incident', {})
if group_type == 'intrusionSets':
data = response.get('data', {}).get('intrusionSet', {})
if group_type == 'reports':
data = response.get('data', {}).get('report', {})
if group_type == 'signatures':
data = response.get('data', {}).get('signature', {})
if group_type == 'threats':
data = response.get('data', {}).get('threat', {})
if response.get('status') == 'Success':
contents = {
'ID': data.get('id'),
'Name': data.get('name'),
'Owner': data.get('owner'),
'DateAdded': data.get('dateAdded'),
'EventDate': data.get('eventDate'),
'Status': data.get('status')
}
else:
return_error(response.get('message'))
context = {
'TC.Group(val.ID && val.ID === obj.ID)': contents
}
return_outputs(
tableToMarkdown('ThreatConnect Group information', contents, removeNull=True),
context
)
|
49,032 | def maybe_deferred_to_future(d: Deferred) -> Union[Deferred, Future]:
""" Converts a Deferred to something that can be awaited in a callback or other user coroutine.
If the asyncio reactor is installed, coroutines are wrapped into Futures, and only Futures can be
awaited inside them. Otherwise, coroutines are wrapped into Deferreds and Deferreds can be awaited
directly inside them.
"""
if not is_asyncio_reactor_installed():
return d
else:
return deferred_to_future(d)
| def maybe_deferred_to_future(d: Deferred) -> Union[Deferred, Future]:
"""Return *d* as an object that can be awaited from a :ref:`Scrapy callable
defined as a coroutine <coroutine-support>`.
What you can await in Scrapy callables defined as coroutines depends on the
value of :setting:`TWISTED_REACTOR`:
- When not using the asyncio reactor, you can only await on
:class:`~twisted.internet.defer.Deferred` objects.
- When :ref:`using the asyncio reactor <install-asyncio>`, you can only
await on :class:`asyncio.Future` objects.
If you want to write code that uses ``Deferred`` objects but works with any
reactor, use this function on all ``Deferred`` objects::
class MySpider(Spider):
...
async def parse(self, response):
d = treq.get('https://example.com/additional')
extra_response = await maybe_deferred_to_future(d)
"""
if not is_asyncio_reactor_installed():
return d
else:
return deferred_to_future(d)
|
39,296 | def test_extract_surface():
# create a single hexahedral cell
lin_pts = np.array([[-1, -1, -1], # node 0
[ 1, -1, -1], # node 1
[ 1, 1, -1], # node 2
[-1, 1, -1], # node 3
[-1, -1, 1], # node 4
[ 1, -1, 1], # node 5
[ 1, 1, 1], # node 6
[-1, 1, 1]], np.double) # node 7
quad_pts = np.array([
(lin_pts[1] + lin_pts[0])/2.0,
(lin_pts[1] + lin_pts[2])/2.0,
(lin_pts[2] + lin_pts[3])/2.0,
(lin_pts[3] + lin_pts[0])/2.0,
(lin_pts[4] + lin_pts[5])/2.0,
(lin_pts[5] + lin_pts[6])/2.0,
(lin_pts[6] + lin_pts[7])/2.0,
(lin_pts[7] + lin_pts[4])/2.0,
(lin_pts[0] + lin_pts[4])/2.0,
(lin_pts[1] + lin_pts[5])/2.0,
(lin_pts[2] + lin_pts[6])/2.0,
(lin_pts[3] + lin_pts[7])/2.0], np.double)
# introduce a minor variation to the location of the mid-side points
quad_pts += np.random.random(quad_pts.shape)*0.25
pts = np.vstack((lin_pts, quad_pts))
cells = np.asarray([[20, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]], dtype=np.int64)
celltypes = np.array([VTK_QUADRATIC_HEXAHEDRON])
if pyvista._vtk.VTK9:
grid = pyvista.UnstructuredGrid(cells, celltypes, pts)
else:
grid = pyvista.UnstructuredGrid(np.array([0]), cells, celltypes, pts)
# expect each face to be divided 6 times since's it has a midside node
surf = grid.extract_surface()
assert surf.n_faces == 36
# expect each face to be divided several more times than the linear extraction
surf_subdivided = grid.extract_surface(subdivision=5)
assert surf_subdivided.n_faces > surf.n_faces
| def test_extract_surface():
# create a single hexahedral cell
lin_pts = np.array([[-1, -1, -1], # node 0
[ 1, -1, -1], # node 1
[ 1, 1, -1], # node 2
[-1, 1, -1], # node 3
[-1, -1, 1], # node 4
[ 1, -1, 1], # node 5
[ 1, 1, 1], # node 6
[-1, 1, 1]], np.double) # node 7
quad_pts = np.array([
(lin_pts[1] + lin_pts[0])/2.0,
(lin_pts[1] + lin_pts[2])/2.0,
(lin_pts[2] + lin_pts[3])/2.0,
(lin_pts[3] + lin_pts[0])/2.0,
(lin_pts[4] + lin_pts[5])/2.0,
(lin_pts[5] + lin_pts[6])/2.0,
(lin_pts[6] + lin_pts[7])/2.0,
(lin_pts[7] + lin_pts[4])/2.0,
(lin_pts[0] + lin_pts[4])/2.0,
(lin_pts[1] + lin_pts[5])/2.0,
(lin_pts[2] + lin_pts[6])/2.0,
(lin_pts[3] + lin_pts[7])/2.0], np.double)
# introduce a minor variation to the location of the mid-side points
quad_pts += np.random.random(quad_pts.shape)*0.25
pts = np.vstack((lin_pts, quad_pts))
cells = np.asarray([[20, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]], dtype=np.int64)
celltypes = np.array([VTK_QUADRATIC_HEXAHEDRON])
if pyvista._vtk.VTK9:
grid = pyvista.UnstructuredGrid(cells, celltypes, pts)
else:
grid = pyvista.UnstructuredGrid(np.array([0]), cells, celltypes, pts)
# expect each face to be divided 6 times since it has a midside node
surf = grid.extract_surface()
assert surf.n_faces == 36
# expect each face to be divided several more times than the linear extraction
surf_subdivided = grid.extract_surface(subdivision=5)
assert surf_subdivided.n_faces > surf.n_faces
|
32,317 | def get_upload_data(packs_results_file_path: str, stage: str) -> Tuple[dict, dict, dict, dict]:
""" Loads the packs_results.json file to get the successful and failed packs together with uploaded images dicts
Args:
packs_results_file_path (str): The path to the file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
Returns:
dict: The successful packs dict
dict: The failed packs dict
dict : The successful private packs dict
dict: The images data dict
"""
if os.path.exists(packs_results_file_path):
packs_results_file = load_json(packs_results_file_path)
stage_data: dict = packs_results_file.get(stage, {})
successful_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PACKS, {})
failed_packs_dict = stage_data.get(BucketUploadFlow.FAILED_PACKS, {})
successful_private_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS, {})
images_data_dict = stage_data.get(BucketUploadFlow.IMAGES, {})
return successful_packs_dict, failed_packs_dict, successful_private_packs_dict, images_data_dict
return {}, {}, {}, {}
| def get_upload_data(packs_results_file_path: str, stage: str) -> Tuple[dict, dict, dict, dict]:
""" Loads the packs_results.json file to get the successful and failed packs together with uploaded images dicts
return [int(pr_number) for pr_number in re.findall(PULL_REQUEST_PATTERN, log_info)]
Args:
packs_results_file_path (str): The path to the file
stage (str): can be BucketUploadFlow.PREPARE_CONTENT_FOR_TESTING or
BucketUploadFlow.UPLOAD_PACKS_TO_MARKETPLACE_STORAGE
Returns:
dict: The successful packs dict
dict: The failed packs dict
dict : The successful private packs dict
dict: The images data dict
"""
if os.path.exists(packs_results_file_path):
packs_results_file = load_json(packs_results_file_path)
stage_data: dict = packs_results_file.get(stage, {})
successful_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PACKS, {})
failed_packs_dict = stage_data.get(BucketUploadFlow.FAILED_PACKS, {})
successful_private_packs_dict = stage_data.get(BucketUploadFlow.SUCCESSFUL_PRIVATE_PACKS, {})
images_data_dict = stage_data.get(BucketUploadFlow.IMAGES, {})
return successful_packs_dict, failed_packs_dict, successful_private_packs_dict, images_data_dict
return {}, {}, {}, {}
|
15,356 | def _extract_blueprint_from_community_topic(
url,
topic,
) -> ImportedBlueprint:
"""Extract a blueprint from a community post JSON.
Async friendly.
"""
block_content = None
blueprint = None
post = topic["post_stream"]["posts"][0]
for match in COMMUNITY_CODE_BLOCK.finditer(post["cooked"]):
block_syntax, block_content = match.groups()
if block_syntax not in ("auto", "yaml"):
continue
block_content = block_content.strip()
try:
data = yaml.parse_yaml(block_content)
except HomeAssistantError:
if block_syntax == "yaml":
raise
continue
if not is_blueprint_config(data):
continue
blueprint = Blueprint(data)
break
if blueprint is None:
return None
return ImportedBlueprint(url, topic["slug"], block_content, blueprint)
| def _extract_blueprint_from_community_topic(
url,
topic,
) -> Optional[ImportedBlueprint]:
"""Extract a blueprint from a community post JSON.
Async friendly.
"""
block_content = None
blueprint = None
post = topic["post_stream"]["posts"][0]
for match in COMMUNITY_CODE_BLOCK.finditer(post["cooked"]):
block_syntax, block_content = match.groups()
if block_syntax not in ("auto", "yaml"):
continue
block_content = block_content.strip()
try:
data = yaml.parse_yaml(block_content)
except HomeAssistantError:
if block_syntax == "yaml":
raise
continue
if not is_blueprint_config(data):
continue
blueprint = Blueprint(data)
break
if blueprint is None:
return None
return ImportedBlueprint(url, topic["slug"], block_content, blueprint)
|
39,634 | def convert_weights_disk_format(params):
for name, param in params.items():
if name.endswith("_weight"):
scaling_name = name[0:-6] + "scaling"
if scaling_name in params:
b_max = mx.nd.contrib.intgemm_maxabsolute(param.data())
params[scaling_name].set_data(b_max / 127.0)
quantized = mx.nd.contrib.intgemm_prepare_data(param.data(), b_max)
param.set_data(quantized)
param.dtype = C.DTYPE_INT8
| def convert_weights_disk_format(params: mx.gluon.parameter.ParameterDict):
for name, param in params.items():
if name.endswith("_weight"):
scaling_name = name[0:-6] + "scaling"
if scaling_name in params:
b_max = mx.nd.contrib.intgemm_maxabsolute(param.data())
params[scaling_name].set_data(b_max / 127.0)
quantized = mx.nd.contrib.intgemm_prepare_data(param.data(), b_max)
param.set_data(quantized)
param.dtype = C.DTYPE_INT8
|
31,125 | def ip_details_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
ip command: Returns IP details for a list of IPs
"""
ip_addresses_string = args.get('ip')
ip_addresses_array = argToList(ip_addresses_string)
invalid_ips = []
for ip_address in ip_addresses_array: # Check for Valid IP Inputs
if not is_ip_valid(ip_address, accept_v6_ips=True):
invalid_ips.append(ip_address)
if invalid_ips:
return_warning('The following IP Addresses were found invalid: {}'.format(', '.join(invalid_ips)))
enhanced = argToBoolean(args.get('enhanced'))
response = client.get_ip_details(ip_addresses_array, enhanced)
ip_list = response.get("data", {}).get("results", {})
ip_data_list, ip_standard_list = [], []
for ip_data in ip_list:
score = to_dbot_score(ip_data.get("score", 0))
dbot_score = Common.DBotScore(
indicator=ip_data.get("name2"),
indicator_type=DBotScoreType.IP,
integration_name='CTIX',
score=score
)
ip_standard_context = Common.IP(
ip=ip_data.get("name2"),
asn=ip_data.get("asn"),
dbot_score=dbot_score
)
ip_standard_list.append(ip_standard_context)
ip_data_list.append(ip_data)
readable_output = tableToMarkdown('IP List', ip_data_list, removeNull=True)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CTIX.IP',
outputs_key_field='name2',
outputs=ip_data_list,
indicators=ip_standard_list
)
| def ip_details_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
ip command: Returns IP details for a list of IPs
"""
ip_addresses_string = args.get('ip')
ip_addresses_array = argToList(ip_addresses_string)
invalid_ips = []
for ip_address in ip_addresses_array: # Check for Valid IP Inputs
if not is_ip_valid(ip_address, accept_v6_ips=True):
invalid_ips.append(ip_address)
if invalid_ips:
return_warning('The following IP Addresses were found invalid: {}'.format(', '.join(invalid_ips)), exit=len(invalid_ips) == len(ip_addresses_array))
enhanced = argToBoolean(args.get('enhanced'))
response = client.get_ip_details(ip_addresses_array, enhanced)
ip_list = response.get("data", {}).get("results", {})
ip_data_list, ip_standard_list = [], []
for ip_data in ip_list:
score = to_dbot_score(ip_data.get("score", 0))
dbot_score = Common.DBotScore(
indicator=ip_data.get("name2"),
indicator_type=DBotScoreType.IP,
integration_name='CTIX',
score=score
)
ip_standard_context = Common.IP(
ip=ip_data.get("name2"),
asn=ip_data.get("asn"),
dbot_score=dbot_score
)
ip_standard_list.append(ip_standard_context)
ip_data_list.append(ip_data)
readable_output = tableToMarkdown('IP List', ip_data_list, removeNull=True)
return CommandResults(
readable_output=readable_output,
outputs_prefix='CTIX.IP',
outputs_key_field='name2',
outputs=ip_data_list,
indicators=ip_standard_list
)
|
23,180 | def _cov_agg(_t, levels, ddof, std=False, sort=False):
sums = []
muls = []
counts = []
# sometime we get a series back from concat combiner
t = list(_t)
cols = t[0][0].columns
for x, mul, n, col_mapping in t:
sums.append(x)
muls.append(mul)
counts.append(n)
col_mapping = col_mapping
total_sums = concat(sums).groupby(level=levels, sort=sort).sum()
total_muls = concat(muls).groupby(level=levels, sort=sort).sum()
total_counts = concat(counts).groupby(level=levels).sum()
result = (
concat([total_sums, total_muls, total_counts], axis=1)
.groupby(level=levels)
.apply(_cov_finalizer, cols=cols, std=std)
)
inv_col_mapping = {v: k for k, v in col_mapping.items()}
idx_vals = result.index.names
idx_mapping = list()
# when index is None we probably have selected a particular column
# df.groupby('a')[['b']].cov()
if len(idx_vals) == 1 and all(n is None for n in idx_vals):
idx_vals = list(inv_col_mapping.keys() - set(total_sums.columns))
for _, val in enumerate(idx_vals):
idx_name = inv_col_mapping.get(val, val)
idx_mapping.append(idx_name)
if len(result.columns.levels[0]) < len(col_mapping):
# removing index from col_mapping (produces incorrect multiindexes)
try:
col_mapping.pop(idx_name)
except KeyError:
# when slicing the col_map will not have the index
pass
keys = list(col_mapping.keys())
for level in range(len(result.columns.levels)):
result.columns = result.columns.set_levels(keys, level=level)
result.index.set_names(idx_mapping, inplace=True)
# stacking can lead to a sorted index
s_result = result.stack(dropna=False)
assert is_dataframe_like(s_result)
return s_result
| def _cov_agg(_t, levels, ddof, std=False, sort=False):
sums = []
muls = []
counts = []
# sometime we get a series back from concat combiner
t = list(_t)
cols = t[0][0].columns
for x, mul, n, col_mapping in t:
sums.append(x)
muls.append(mul)
counts.append(n)
col_mapping = col_mapping
total_sums = concat(sums).groupby(level=levels, sort=sort).sum()
total_muls = concat(muls).groupby(level=levels, sort=sort).sum()
total_counts = concat(counts).groupby(level=levels).sum()
result = (
concat([total_sums, total_muls, total_counts], axis=1)
.groupby(level=levels)
.apply(_cov_finalizer, cols=cols, std=std)
)
inv_col_mapping = {v: k for k, v in col_mapping.items()}
idx_vals = result.index.names
idx_mapping = list()
# when index is None we probably have selected a particular column
# df.groupby('a')[['b']].cov()
if len(idx_vals) == 1 and all(n is None for n in idx_vals):
idx_vals = list(inv_col_mapping.keys() - set(total_sums.columns))
for val in idx_vals:
idx_name = inv_col_mapping.get(val, val)
idx_mapping.append(idx_name)
if len(result.columns.levels[0]) < len(col_mapping):
# removing index from col_mapping (produces incorrect multiindexes)
try:
col_mapping.pop(idx_name)
except KeyError:
# when slicing the col_map will not have the index
pass
keys = list(col_mapping.keys())
for level in range(len(result.columns.levels)):
result.columns = result.columns.set_levels(keys, level=level)
result.index.set_names(idx_mapping, inplace=True)
# stacking can lead to a sorted index
s_result = result.stack(dropna=False)
assert is_dataframe_like(s_result)
return s_result
|
832 | def periodicity(f, symbol, check=False):
"""
Tests the given function for periodicity in the given symbol.
Parameters
==========
f : Expr.
The concerned function.
symbol : Symbol
The variable for which the period is to be determined.
check : Boolean, optional
The flag to verify whether the value being returned is a period or not.
Returns
=======
period
The period of the function is returned.
`None` is returned when the function is aperiodic or has a complex period.
The value of `0` is returned as the period of a constant function.
Raises
======
NotImplementedError
The value of the period computed cannot be verified.
Notes
=====
Currently, we do not support functions with a complex period.
The period of functions having complex periodic values such
as `exp`, `sinh` is evaluated to `None`.
The value returned might not be the "fundamental" period of the given
function i.e. it may not be the smallest periodic value of the function.
The verification of the period through the `check` flag is not reliable
due to internal simplification of the given expression. Hence, it is set
to `False` by default.
If `f` has `period` attribute as trigonometric functions do, it will be used
to determine the periodicity.
Examples
========
>>> from sympy import Symbol, sin, cos, tan, exp, Function, pi, S
>>> from sympy.calculus.util import periodicity
>>> x = Symbol('x')
>>> f = sin(x) + sin(2*x) + sin(3*x)
>>> periodicity(f, x)
2*pi
>>> periodicity(sin(x)*cos(x), x)
pi
>>> periodicity(exp(tan(2*x) - 1), x)
pi/2
>>> periodicity(sin(4*x)**cos(2*x), x)
pi
>>> periodicity(exp(x), x)
>>> class F(Function):
... nargs = 1
... def _period(self, general_period, symbol): # This emulates trigonometric function's method.
... arg = self.args[0]
... if not arg.has(symbol):
... return S.Zero
... if arg == symbol:
... return general_period
... if symbol is arg.free_symbols:
... if arg.is_Mul:
... g, h = arg.as_independent(symbol)
... if h == symbol:
... return general_period/abs(g)
... if arg.is_Add:
... a, h = arg.as_independent(symbol)
... g, h = h.as_independent(symbol, as_Add=False)
... if h == symbol:
... return general_period/abs(g)
... raise NotImplementedError("Use the periodicity function instead.")
... def period(self, symbol):
... return self._period(2*pi, symbol)
>>> periodicity(F(x), x)
2*pi
"""
from sympy.core.mod import Mod
from sympy.core.relational import Relational
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.trigonometric import (
TrigonometricFunction, sin, cos, csc, sec)
from sympy.simplify.simplify import simplify
from sympy.solvers.decompogen import decompogen
from sympy.polys.polytools import degree
f = _sympify(f)
temp = Dummy('x', real=True)
f = f.subs(symbol, temp)
symbol = temp
def _check(orig_f, period):
'''Return the checked period or raise an error.'''
new_f = orig_f.subs(symbol, symbol + period)
if new_f.equals(orig_f):
return period
else:
raise NotImplementedError(filldedent('''
The period of the given function cannot be verified.
When `%s` was replaced with `%s + %s` in `%s`, the result
was `%s` which was not recognized as being the same as
the original function.
So either the period was wrong or the two forms were
not recognized as being equal.
Set check=False to obtain the value.''' %
(symbol, symbol, period, orig_f, new_f)))
orig_f = f
period = None
if isinstance(f, Relational):
f = f.lhs - f.rhs
f = simplify(f)
if symbol not in f.free_symbols:
return S.Zero
if hasattr(f, 'period'):
try:
period = f.period(symbol)
except NotImplementedError:
pass
if isinstance(f, Abs):
arg = f.args[0]
if isinstance(arg, (sec, csc, cos)):
# all but tan and cot might have a
# a period that is half as large
# so recast as sin
arg = sin(arg.args[0])
period = periodicity(arg, symbol)
if period is not None and isinstance(arg, sin):
# the argument of Abs was a trigonometric other than
# cot or tan; test to see if the half-period
# is valid. Abs(arg) has behaviour equivalent to
# orig_f, so use that for test:
orig_f = Abs(arg)
try:
return _check(orig_f, period/2)
except NotImplementedError as err:
if check:
raise NotImplementedError(err)
# else let new orig_f and period be
# checked below
if isinstance(f, exp):
if re(f).has(f) or im(f).has(f): # Avoid infinite loop
period = periodicity(f.args[0], symbol)
elif im(f) != 0:
period_real = periodicity(re(f), symbol)
period_imag = periodicity(im(f), symbol)
if period_real is not None and period_imag is not None:
period = lcim([period_real, period_imag])
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if base_has_sym and not expo_has_sym:
period = periodicity(base, symbol)
elif expo_has_sym and not base_has_sym:
period = periodicity(expo, symbol)
else:
period = _periodicity(f.args, symbol)
elif f.is_Mul:
coeff, g = f.as_independent(symbol, as_Add=False)
if isinstance(g, TrigonometricFunction) or coeff is not S.One:
period = periodicity(g, symbol)
else:
period = _periodicity(g.args, symbol)
elif f.is_Add:
k, g = f.as_independent(symbol)
if k is not S.Zero:
return periodicity(g, symbol)
period = _periodicity(g.args, symbol)
elif isinstance(f, Mod):
a, n = f.args
if a == symbol:
period = n
elif isinstance(a, TrigonometricFunction):
period = periodicity(a, symbol)
elif hasattr(a, 'period'):
try:
period = periodicity(a, symbol)
except NotImplementedError:
pass
#check if 'f' is linear in 'symbol'
elif (a.is_polynomial(symbol) and degree(a, symbol) == 1 and
symbol not in n.free_symbols):
period = Abs(n / a.diff(symbol))
elif period is None:
from sympy.solvers.decompogen import compogen
g_s = decompogen(f, symbol)
num_of_gs = len(g_s)
if num_of_gs > 1:
for index, g in enumerate(reversed(g_s)):
start_index = num_of_gs - 1 - index
g = compogen(g_s[start_index:], symbol)
if g != orig_f and g != f: # Fix for issue 12620
period = periodicity(g, symbol)
if period is not None:
break
if period is not None:
if check:
return _check(orig_f, period)
return period
return None
| def periodicity(f, symbol, check=False):
"""
Tests the given function for periodicity in the given symbol.
Parameters
==========
f : Expr.
The concerned function.
symbol : Symbol
The variable for which the period is to be determined.
check : Boolean, optional
The flag to verify whether the value being returned is a period or not.
Returns
=======
period
The period of the function is returned.
`None` is returned when the function is aperiodic or has a complex period.
The value of `0` is returned as the period of a constant function.
Raises
======
NotImplementedError
The value of the period computed cannot be verified.
Notes
=====
Currently, we do not support functions with a complex period.
The period of functions having complex periodic values such
as `exp`, `sinh` is evaluated to `None`.
The value returned might not be the "fundamental" period of the given
function i.e. it may not be the smallest periodic value of the function.
The verification of the period through the `check` flag is not reliable
due to internal simplification of the given expression. Hence, it is set
to `False` by default.
If `f` has `period` attribute as trigonometric functions do, it will be used
to determine the periodicity.
Examples
========
>>> from sympy import Symbol, sin, cos, tan, exp, Function, pi, S
>>> from sympy.calculus.util import periodicity
>>> x = Symbol('x')
>>> f = sin(x) + sin(2*x) + sin(3*x)
>>> periodicity(f, x)
2*pi
>>> periodicity(sin(x)*cos(x), x)
pi
>>> periodicity(exp(tan(2*x) - 1), x)
pi/2
>>> periodicity(sin(4*x)**cos(2*x), x)
pi
>>> periodicity(exp(x), x)
>>> class F(Function):
... nargs = 1
... def _period(self, general_period, symbol): # This emulates trigonometric function's method.
... arg = self.args[0]
... if not arg.has(symbol):
... return S.Zero
... if arg == symbol:
... return general_period
... if symbol in arg.free_symbols:
... if arg.is_Mul:
... g, h = arg.as_independent(symbol)
... if h == symbol:
... return general_period/abs(g)
... if arg.is_Add:
... a, h = arg.as_independent(symbol)
... g, h = h.as_independent(symbol, as_Add=False)
... if h == symbol:
... return general_period/abs(g)
... raise NotImplementedError("Use the periodicity function instead.")
... def period(self, symbol):
... return self._period(2*pi, symbol)
>>> periodicity(F(x), x)
2*pi
"""
from sympy.core.mod import Mod
from sympy.core.relational import Relational
from sympy.functions.elementary.exponential import exp
from sympy.functions.elementary.complexes import Abs
from sympy.functions.elementary.trigonometric import (
TrigonometricFunction, sin, cos, csc, sec)
from sympy.simplify.simplify import simplify
from sympy.solvers.decompogen import decompogen
from sympy.polys.polytools import degree
f = _sympify(f)
temp = Dummy('x', real=True)
f = f.subs(symbol, temp)
symbol = temp
def _check(orig_f, period):
'''Return the checked period or raise an error.'''
new_f = orig_f.subs(symbol, symbol + period)
if new_f.equals(orig_f):
return period
else:
raise NotImplementedError(filldedent('''
The period of the given function cannot be verified.
When `%s` was replaced with `%s + %s` in `%s`, the result
was `%s` which was not recognized as being the same as
the original function.
So either the period was wrong or the two forms were
not recognized as being equal.
Set check=False to obtain the value.''' %
(symbol, symbol, period, orig_f, new_f)))
orig_f = f
period = None
if isinstance(f, Relational):
f = f.lhs - f.rhs
f = simplify(f)
if symbol not in f.free_symbols:
return S.Zero
if hasattr(f, 'period'):
try:
period = f.period(symbol)
except NotImplementedError:
pass
if isinstance(f, Abs):
arg = f.args[0]
if isinstance(arg, (sec, csc, cos)):
# all but tan and cot might have a
# a period that is half as large
# so recast as sin
arg = sin(arg.args[0])
period = periodicity(arg, symbol)
if period is not None and isinstance(arg, sin):
# the argument of Abs was a trigonometric other than
# cot or tan; test to see if the half-period
# is valid. Abs(arg) has behaviour equivalent to
# orig_f, so use that for test:
orig_f = Abs(arg)
try:
return _check(orig_f, period/2)
except NotImplementedError as err:
if check:
raise NotImplementedError(err)
# else let new orig_f and period be
# checked below
if isinstance(f, exp):
if re(f).has(f) or im(f).has(f): # Avoid infinite loop
period = periodicity(f.args[0], symbol)
elif im(f) != 0:
period_real = periodicity(re(f), symbol)
period_imag = periodicity(im(f), symbol)
if period_real is not None and period_imag is not None:
period = lcim([period_real, period_imag])
if f.is_Pow:
base, expo = f.args
base_has_sym = base.has(symbol)
expo_has_sym = expo.has(symbol)
if base_has_sym and not expo_has_sym:
period = periodicity(base, symbol)
elif expo_has_sym and not base_has_sym:
period = periodicity(expo, symbol)
else:
period = _periodicity(f.args, symbol)
elif f.is_Mul:
coeff, g = f.as_independent(symbol, as_Add=False)
if isinstance(g, TrigonometricFunction) or coeff is not S.One:
period = periodicity(g, symbol)
else:
period = _periodicity(g.args, symbol)
elif f.is_Add:
k, g = f.as_independent(symbol)
if k is not S.Zero:
return periodicity(g, symbol)
period = _periodicity(g.args, symbol)
elif isinstance(f, Mod):
a, n = f.args
if a == symbol:
period = n
elif isinstance(a, TrigonometricFunction):
period = periodicity(a, symbol)
elif hasattr(a, 'period'):
try:
period = periodicity(a, symbol)
except NotImplementedError:
pass
#check if 'f' is linear in 'symbol'
elif (a.is_polynomial(symbol) and degree(a, symbol) == 1 and
symbol not in n.free_symbols):
period = Abs(n / a.diff(symbol))
elif period is None:
from sympy.solvers.decompogen import compogen
g_s = decompogen(f, symbol)
num_of_gs = len(g_s)
if num_of_gs > 1:
for index, g in enumerate(reversed(g_s)):
start_index = num_of_gs - 1 - index
g = compogen(g_s[start_index:], symbol)
if g != orig_f and g != f: # Fix for issue 12620
period = periodicity(g, symbol)
if period is not None:
break
if period is not None:
if check:
return _check(orig_f, period)
return period
return None
|
57,138 | def validate(pending_deletion_request):
"""Checks that the domain object is valid.
Raises:
ValidationError. The field pseudonymizable_entity_mappings
contains wrong key.
"""
for key in pending_deletion_request.pseudonymizable_entity_mappings.keys():
if key not in [name.value for name in models.NAMES]:
raise utils.ValidationError(
'pseudonymizable_entity_mappings contain wrong key')
| def validate_penging_deletion_request(pending_deletion_request):
"""Checks that the pending deletion request is valid.
Raises:
ValidationError. The field pseudonymizable_entity_mappings
contains wrong key.
"""
for key in pending_deletion_request.pseudonymizable_entity_mappings.keys():
if key not in [name.value for name in models.NAMES]:
raise utils.ValidationError(
'pseudonymizable_entity_mappings contain wrong key')
|
26,431 | def light(app):
""" Apply Light Theme to the Qt application instance.
Args:
app (QApplication): QApplication instance.
"""
lightPalette = QPalette()
# base
lightPalette.setColor(QPalette.WindowText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Button, QColor(240, 240, 240))
lightPalette.setColor(QPalette.Light, QColor(180, 180, 180))
lightPalette.setColor(QPalette.Midlight, QColor(200, 200, 200))
lightPalette.setColor(QPalette.Dark, QColor(225, 225, 225))
lightPalette.setColor(QPalette.Text, QColor(0, 0, 0))
lightPalette.setColor(QPalette.BrightText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.ButtonText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Base, QColor(237, 237, 237))
lightPalette.setColor(QPalette.Window, QColor(240, 240, 240))
lightPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))
lightPalette.setColor(QPalette.Highlight, QColor(76, 163, 224))
lightPalette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Link, QColor(0, 162, 232))
lightPalette.setColor(QPalette.AlternateBase, QColor(225, 225, 225))
lightPalette.setColor(QPalette.ToolTipBase, QColor(240, 240, 240))
lightPalette.setColor(QPalette.ToolTipText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.LinkVisited, QColor(222, 222, 222))
# disabled
lightPalette.setColor(QPalette.Disabled, QPalette.WindowText,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.Text,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.ButtonText,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.Highlight,
QColor(190, 190, 190))
lightPalette.setColor(QPalette.Disabled, QPalette.HighlightedText,
QColor(115, 115, 115))
app.style().unpolish(app)
app.setPalette(lightPalette)
app.setStyle('Fusion')
| def light(app):
""" Apply Light Theme to the Qt application instance.
Args:
app (QApplication): QApplication instance.
"""
light_palette = QPalette()
# base
lightPalette.setColor(QPalette.WindowText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Button, QColor(240, 240, 240))
lightPalette.setColor(QPalette.Light, QColor(180, 180, 180))
lightPalette.setColor(QPalette.Midlight, QColor(200, 200, 200))
lightPalette.setColor(QPalette.Dark, QColor(225, 225, 225))
lightPalette.setColor(QPalette.Text, QColor(0, 0, 0))
lightPalette.setColor(QPalette.BrightText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.ButtonText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Base, QColor(237, 237, 237))
lightPalette.setColor(QPalette.Window, QColor(240, 240, 240))
lightPalette.setColor(QPalette.Shadow, QColor(20, 20, 20))
lightPalette.setColor(QPalette.Highlight, QColor(76, 163, 224))
lightPalette.setColor(QPalette.HighlightedText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.Link, QColor(0, 162, 232))
lightPalette.setColor(QPalette.AlternateBase, QColor(225, 225, 225))
lightPalette.setColor(QPalette.ToolTipBase, QColor(240, 240, 240))
lightPalette.setColor(QPalette.ToolTipText, QColor(0, 0, 0))
lightPalette.setColor(QPalette.LinkVisited, QColor(222, 222, 222))
# disabled
lightPalette.setColor(QPalette.Disabled, QPalette.WindowText,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.Text,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.ButtonText,
QColor(115, 115, 115))
lightPalette.setColor(QPalette.Disabled, QPalette.Highlight,
QColor(190, 190, 190))
lightPalette.setColor(QPalette.Disabled, QPalette.HighlightedText,
QColor(115, 115, 115))
app.style().unpolish(app)
app.setPalette(lightPalette)
app.setStyle('Fusion')
|
51,612 | def apply_thresholds(input, thresholds, choices):
"""Makes a choice based on an input and thresholds.
From list of ``choices``, it selects one of them based on a list of
inputs, depending on the position of each ``input`` whithin a list of
``thresholds``. It does so for each ``input`` provided.
Args:
input: A list of inputs to make a choice.
thresholds: A list of thresholds to choose.
choices: A list of the possible choices.
Returns:
:obj:`numpy.ndarray` of :obj:`float`:
A list of the choices made.
Raises:
:exc:`AssertionError`: When the number of ``thresholds`` (t) and the
number of choices (c) are not either t == c or t == c - 1.
Examples:
>>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20])
array([15])
>>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20])
array([20])
>>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20])
array([0])
"""
condlist = [input <= threshold for threshold in thresholds]
if len(condlist) == len(choices) - 1:
# If a choice is provided for input > highest threshold, last condition must be true to return it.
condlist += [True]
assert len(condlist) == len(choices), \
"apply_thresholds must be called with the same number of thresholds than choices, or one more choice"
return numpy.select(condlist, choices)
| def apply_thresholds(input, thresholds, choices):
"""Makes a choice based on an input and thresholds.
From a list of ``choices``, this function selects one of these values based on a list
of inputs, depending on the value of each ``input`` within a list of
``thresholds``.
Args:
input: A list of inputs to make a choice.
thresholds: A list of thresholds to choose.
choices: A list of the possible choices.
Returns:
:obj:`numpy.ndarray` of :obj:`float`:
A list of the choices made.
Raises:
:exc:`AssertionError`: When the number of ``thresholds`` (t) and the
number of choices (c) are not either t == c or t == c - 1.
Examples:
>>> apply_thresholds(np.array([4]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([5]), [5, 7], [10, 15, 20])
array([10])
>>> apply_thresholds(np.array([6]), [5, 7], [10, 15, 20])
array([15])
>>> apply_thresholds(np.array([8]), [5, 7], [10, 15, 20])
array([20])
>>> apply_thresholds(np.array([10]), [5, 7, 9], [10, 15, 20])
array([0])
"""
condlist = [input <= threshold for threshold in thresholds]
if len(condlist) == len(choices) - 1:
# If a choice is provided for input > highest threshold, last condition must be true to return it.
condlist += [True]
assert len(condlist) == len(choices), \
"apply_thresholds must be called with the same number of thresholds than choices, or one more choice"
return numpy.select(condlist, choices)
|
25,129 | def rmdir(store, path: Path = None):
"""Remove all items under the given path. If `store` provides a `rmdir` method,
this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
path = normalize_storage_path(path)
if hasattr(store, 'rmdir'):
# pass through
store.rmdir(path)
elif hasattr(store.fs, 'rmdir'):
store.fs.rmdir(path)
else:
# slow version, delete one key at a time
_rmdir_from_keys(store, path)
| def rmdir(store, path: Path = None):
"""Remove all items under the given path. If `store` provides a `rmdir` method,
this will be called, otherwise will fall back to implementation via the
`MutableMapping` interface."""
path = normalize_storage_path(path)
if hasattr(store, 'rmdir'):
# pass through
store.rmdir(path)
elif hasattr(store, 'fs') and hasattr(store.fs, 'rmdir'):
store.fs.rmdir(path)
else:
# slow version, delete one key at a time
_rmdir_from_keys(store, path)
|
55,968 | def convert_dataset_for_tensorflow(dataset, non_label_column_names, batch_size, labels, dataset_mode, drop_remainder):
"""Converts a Hugging Face dataset to a Tensorflow Dataset or Sequence object. We usually only want a Dataset when
we're training on TPU, as we get the nice feature of variable-length batches when we put the data in a Sequence
object instead."""
def densify_ragged_batch(features, labels=None):
features = {
feature: ragged_tensor.to_tensor(shape=batch_shape[feature]) for feature, ragged_tensor in features.items()
}
if labels is None:
return features
else:
return features, labels
feature_keys = list(set(dataset.features.keys()) - set(non_label_column_names + ["label"]))
if dataset_mode == "variable_batch":
batch_shape = {key: None for key in feature_keys}
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
if labels:
labels = tf.convert_to_tensor(np.array(dataset["label"]))
tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))
else:
tf_dataset = tf.data.Dataset.from_tensor_slices(data)
tf_dataset = (
tf_dataset.shuffle(buffer_size=len(dataset))
.batch(batch_size=batch_size, drop_remainder=drop_remainder)
.map(densify_ragged_batch)
)
return tf_dataset
elif dataset_mode == "constant_batch":
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
batch_shape = {
key: tf.concat(([batch_size], ragged_tensor.bounding_shape()[1:]), axis=0)
for key, ragged_tensor in data.items()
}
if labels:
labels = tf.convert_to_tensor(np.array(dataset["label"]))
tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))
else:
tf_dataset = tf.data.Dataset.from_tensor_slices(data)
tf_dataset = (
tf_dataset.shuffle(buffer_size=len(dataset))
.batch(batch_size=batch_size, drop_remainder=drop_remainder)
.map(densify_ragged_batch)
)
return tf_dataset
else:
raise ValueError("Unknown dataset mode!")
| def convert_dataset_for_tensorflow(dataset, non_label_column_names, batch_size, labels, dataset_mode, drop_remainder):
"""
Converts a Hugging Face dataset to a Tensorflow Dataset or Sequence object. We usually only want a Dataset when
we're training on TPU, as we get the nice feature of variable-length batches when we put the data in a Sequence
object instead.
"""
def densify_ragged_batch(features, labels=None):
features = {
feature: ragged_tensor.to_tensor(shape=batch_shape[feature]) for feature, ragged_tensor in features.items()
}
if labels is None:
return features
else:
return features, labels
feature_keys = list(set(dataset.features.keys()) - set(non_label_column_names + ["label"]))
if dataset_mode == "variable_batch":
batch_shape = {key: None for key in feature_keys}
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
if labels:
labels = tf.convert_to_tensor(np.array(dataset["label"]))
tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))
else:
tf_dataset = tf.data.Dataset.from_tensor_slices(data)
tf_dataset = (
tf_dataset.shuffle(buffer_size=len(dataset))
.batch(batch_size=batch_size, drop_remainder=drop_remainder)
.map(densify_ragged_batch)
)
return tf_dataset
elif dataset_mode == "constant_batch":
data = {key: tf.ragged.constant(dataset[key]) for key in feature_keys}
batch_shape = {
key: tf.concat(([batch_size], ragged_tensor.bounding_shape()[1:]), axis=0)
for key, ragged_tensor in data.items()
}
if labels:
labels = tf.convert_to_tensor(np.array(dataset["label"]))
tf_dataset = tf.data.Dataset.from_tensor_slices((data, labels))
else:
tf_dataset = tf.data.Dataset.from_tensor_slices(data)
tf_dataset = (
tf_dataset.shuffle(buffer_size=len(dataset))
.batch(batch_size=batch_size, drop_remainder=drop_remainder)
.map(densify_ragged_batch)
)
return tf_dataset
else:
raise ValueError("Unknown dataset mode!")
|
30,797 | def link_forensics_artifacts_name_command(file_names, client: Client, args: dict) -> CommandResults:
event_id = args.get("event_id", 0)
if len(file_names) > 0:
outputs = {
'eventId': int(event_id),
'Artifacts': file_names
}
return CommandResults(
outputs_prefix='Illusive.Event',
outputs_key_field='eventId',
outputs=outputs
)
else:
readable_output = f'### event id {event_id} has no artifacts'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Illusive.Event',
outputs_key_field='eventId',
outputs={'eventId': int(event_id)}
)
| def link_forensics_artifacts_name_command(file_names: list, client: Client, args: dict) -> CommandResults:
event_id = args.get("event_id", 0)
if len(file_names) > 0:
outputs = {
'eventId': int(event_id),
'Artifacts': file_names
}
return CommandResults(
outputs_prefix='Illusive.Event',
outputs_key_field='eventId',
outputs=outputs
)
else:
readable_output = f'### event id {event_id} has no artifacts'
return CommandResults(
readable_output=readable_output,
outputs_prefix='Illusive.Event',
outputs_key_field='eventId',
outputs={'eventId': int(event_id)}
)
|
40,205 | def intersection_polyline_plane(polyline, plane, expected_number_of_intersections=None, tol=1e-6):
"""Calculate the intersection point of a plane with a polyline.
Return a list of intersection points.
By default it will allow two intersections. Reduce expected_number_of_intersections to speed up.
Parameters
----------
polyline : compas.geometry.Polyline
polyline to test intersection
plane : compas.Geometry.Plane
plane to compute intersection
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
intersection_pts : list of compas.geometry.Point
if there are intersection points, return point(s) in a list
"""
if not expected_number_of_intersections:
expected_number_of_intersections = len(polyline)
intersection_points = []
max_iter = 0
for segment in pairwise(polyline):
pt = intersection_segment_plane(segment, plane, tol)
if pt and max_iter < expected_number_of_intersections:
intersection_points.append(pt)
max_iter += 1
else:
break
if len(intersection_points) > 0:
return intersection_points
return None
| def intersection_polyline_plane(polyline, plane, expected_number_of_intersections=None, tol=1e-6):
"""Calculate the intersection point of a plane with a polyline.
Return a list of intersection points.
By default it will allow two intersections. Reduce expected_number_of_intersections to speed up.
Parameters
----------
polyline : compas.geometry.Polyline
polyline to test intersection
plane : compas.Geometry.Plane
plane to compute intersection
tol : float, optional
A tolerance for membership verification.
Default is ``1e-6``.
Returns
-------
list of :class:`compas.geometry.Point`
if there are intersection points, return point(s) in a list
"""
if not expected_number_of_intersections:
expected_number_of_intersections = len(polyline)
intersection_points = []
max_iter = 0
for segment in pairwise(polyline):
pt = intersection_segment_plane(segment, plane, tol)
if pt and max_iter < expected_number_of_intersections:
intersection_points.append(pt)
max_iter += 1
else:
break
if len(intersection_points) > 0:
return intersection_points
return None
|
45,397 | def concatenate(dfs):
"""
Concatenate pandas DataFrames with saving 'category' dtype.
Parameters
----------
dfs : list
List of pandas DataFrames to concatenate.
Returns
-------
pandas.DataFrame
A pandas DataFrame.
"""
categoricals_column_names = set.intersection(
*[set(df.select_dtypes("category").columns.tolist()) for df in dfs]
)
for column_name in categoricals_column_names:
# Build a list of all columns in all dfs with name column_name.
categorical_columns_with_name = []
for df in dfs:
categorical_columns_in_df = df[column_name]
# Fast path for when the column name is unique.
if type(categorical_columns_in_df) == pandas.Series:
categorical_columns_with_name.append(categorical_columns_in_df)
else:
# If the column name is repeated, df[column_name] gives a
# a dataframe with all matching columns instead of a series.
categorical_columns_with_name.extend(
categorical_columns_in_df.iloc[:, i]
for i in range(len(categorical_columns_in_df.columns))
)
# Make a new category unioning all columns with the current name.
categories = union_categoricals(categorical_columns_with_name).categories
# Replace all columns having the current name with the new category.
for df in dfs:
categorical_columns_in_df = df[column_name]
# Fast path for when the column name is unique.
if type(categorical_columns_in_df) == pandas.Series:
df[column_name] = pandas.Categorical(
df[column_name], categories=categories
)
else:
for i in range(len(categorical_columns_in_df.columns)):
df.iloc[:, i] = pandas.Categorical(
df.iloc[:, i], categories=categories
)
return pandas.concat(dfs)
| def concatenate(dfs):
"""
Concatenate pandas DataFrames with saving 'category' dtype.
Parameters
----------
dfs : list
List of pandas DataFrames to concatenate.
Returns
-------
pandas.DataFrame
A pandas DataFrame.
"""
categoricals_column_names = set.intersection(
*[set(df.select_dtypes("category").columns.tolist()) for df in dfs]
)
for column_name in categoricals_column_names:
# Build a list of all columns in all dfs with name column_name.
categorical_columns_with_name = []
for df in dfs:
categorical_columns_in_df = df[column_name]
# Fast path for when the column name is unique.
if type(categorical_columns_in_df) == pandas.Series:
categorical_columns_with_name.append(categorical_columns_in_df)
else:
# If the column name is repeated, df[column_name] gives a
# a dataframe with all matching columns instead of a series.
categorical_columns_with_name.extend(
categorical_columns_in_df.iloc[:, i]
for i in range(len(categorical_columns_in_df.columns))
)
# Make a new category unioning all columns with the current name.
categories = union_categoricals(categorical_columns_with_name).categories
# Replace all columns having the current name with the new category.
for df in dfs:
categorical_columns_in_df = df[column_name]
# Fast path for when the column name is unique.
if isinstance(categorical_columns_in_df, pandas.Series):
df[column_name] = pandas.Categorical(
df[column_name], categories=categories
)
else:
for i in range(len(categorical_columns_in_df.columns)):
df.iloc[:, i] = pandas.Categorical(
df.iloc[:, i], categories=categories
)
return pandas.concat(dfs)
|
58,541 | def sac_actor_critic_loss(
policy: Policy, model: ModelV2, dist_class: Type[TFActionDistribution],
train_batch: SampleBatch) -> Union[TensorType, List[TensorType]]:
"""Constructs the loss for the Soft Actor Critic.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]: The action distr. class.
train_batch (SampleBatch): The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
# Should be True only for debugging purposes (e.g. test cases)!
deterministic = policy.config["_deterministic_loss"]
# Get the base model output from the train batch.
model_out_t, _ = model({
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Get the base model output from the next observations in the train batch.
model_out_tp1, _ = model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Get the target model's base outputs from the next observations in the
# train batch.
target_model_out_tp1, _ = policy.target_model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Discrete actions case.
if model.discrete:
# Get all action probs directly from pi and form their logp.
log_pis_t = tf.nn.log_softmax(model.get_policy_output(model_out_t), -1)
policy_t = tf.math.exp(log_pis_t)
log_pis_tp1 = tf.nn.log_softmax(
model.get_policy_output(model_out_tp1), -1)
policy_tp1 = tf.math.exp(log_pis_tp1)
# Q-values.
q_t = model.get_q_values(model_out_t)
# Target Q-values.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1)
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(model_out_t)
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1)
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_tp1 -= model.alpha * log_pis_tp1
# Actually selected Q-values (from the actions batch).
one_hot = tf.one_hot(
train_batch[SampleBatch.ACTIONS], depth=q_t.shape.as_list()[-1])
q_t_selected = tf.reduce_sum(q_t * one_hot, axis=-1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.reduce_sum(twin_q_t * one_hot, axis=-1)
# Discrete case: "Best" means weighted by the policy (prob) outputs.
q_tp1_best = tf.reduce_sum(tf.multiply(policy_tp1, q_tp1), axis=-1)
q_tp1_best_masked = \
(1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)) * \
q_tp1_best
# Continuous actions case.
else:
# Sample simgle actions from distribution.
action_dist_class = _get_dist_class(policy.config, policy.action_space)
action_dist_t = action_dist_class(
model.get_policy_output(model_out_t), policy.model)
policy_t = action_dist_t.sample() if not deterministic else \
action_dist_t.deterministic_sample()
log_pis_t = tf.expand_dims(action_dist_t.logp(policy_t), -1)
action_dist_tp1 = action_dist_class(
model.get_policy_output(model_out_tp1), policy.model)
policy_tp1 = action_dist_tp1.sample() if not deterministic else \
action_dist_tp1.deterministic_sample()
log_pis_tp1 = tf.expand_dims(action_dist_tp1.logp(policy_tp1), -1)
# Q-values for the actually selected actions.
q_t = model.get_q_values(model_out_t, tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32))
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(
model_out_t, tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32))
# Q-values for current policy in given current state.
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if policy.config["twin_q"]:
twin_q_t_det_policy = model.get_twin_q_values(
model_out_t, policy_t)
q_t_det_policy = tf.reduce_min(
(q_t_det_policy, twin_q_t_det_policy), axis=0)
# target q network evaluation
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1)
if policy.config["twin_q"]:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1)
# Take min over both twin-NNs.
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 -= model.alpha * log_pis_tp1
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - tf.cast(train_batch[SampleBatch.DONES],
tf.float32)) * q_tp1_best
# Compute RHS of bellman equation for the Q-loss (critic(s)).
q_t_selected_target = tf.stop_gradient(
tf.cast(train_batch[SampleBatch.REWARDS], tf.float32) +
policy.config["gamma"]**policy.config["n_step"] * q_tp1_best_masked)
# Compute the TD-error (potentially clipped).
base_td_error = tf.math.abs(q_t_selected - q_t_selected_target)
if policy.config["twin_q"]:
twin_td_error = tf.math.abs(twin_q_t_selected - q_t_selected_target)
td_error = 0.5 * (base_td_error + twin_td_error)
else:
td_error = base_td_error
# Calculate one or two critic losses (2 in the twin_q case).
prio_weights = tf.cast(train_batch[PRIO_WEIGHTS], tf.float32)
critic_loss = [tf.reduce_mean(prio_weights * huber_loss(base_td_error))]
if policy.config["twin_q"]:
critic_loss.append(
tf.reduce_mean(prio_weights * huber_loss(twin_td_error)))
# Alpha- and actor losses.
# Note: In the papers, alpha is used directly, here we take the log.
# Discrete case: Multiply the action probs as weights with the original
# loss terms (no expectations needed).
if model.discrete:
alpha_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(
tf.stop_gradient(policy_t), -model.log_alpha *
tf.stop_gradient(log_pis_t + model.target_entropy)),
axis=-1))
actor_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(
# NOTE: No stop_grad around policy output here
# (compare with q_t_det_policy for continuous case).
policy_t,
model.alpha * log_pis_t - tf.stop_gradient(q_t)),
axis=-1))
else:
alpha_loss = -tf.reduce_mean(
model.log_alpha *
tf.stop_gradient(log_pis_t + model.target_entropy))
actor_loss = tf.reduce_mean(model.alpha * log_pis_t - q_t_det_policy)
# Save for stats function.
policy.policy_t = policy_t
policy.q_t = q_t
policy.td_error = td_error
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.alpha_loss = alpha_loss
policy.alpha_value = model.alpha
policy.target_entropy = model.target_entropy
# In a custom apply op we handle the losses separately, but return them
# combined in one loss here.
return actor_loss + tf.math.add_n(critic_loss) + alpha_loss
| def sac_actor_critic_loss(
policy: Policy, model: ModelV2, dist_class: Type[TFActionDistribution],
train_batch: SampleBatch) -> Union[TensorType, List[TensorType]]:
"""Constructs the loss for the Soft Actor Critic.
Args:
policy (Policy): The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
dist_class (Type[ActionDistribution]: The action distr. class.
train_batch (SampleBatch): The training data.
Returns:
Union[TensorType, List[TensorType]]: A single loss tensor or a list
of loss tensors.
"""
# Should be True only for debugging purposes (e.g. test cases)!
deterministic = policy.config["_deterministic_loss"]
# Get the base model output from the train batch.
model_out_t, _ = model({
"obs": train_batch[SampleBatch.CUR_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Get the base model output from the next observations in the train batch.
model_out_tp1, _ = model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Get the target model's base outputs from the next observations in the
# train batch.
target_model_out_tp1, _ = policy.target_model({
"obs": train_batch[SampleBatch.NEXT_OBS],
"is_training": policy._get_is_training_placeholder(),
}, [], None)
# Discrete actions case.
if model.discrete:
# Get all action probs directly from pi and form their logp.
log_pis_t = tf.nn.log_softmax(model.get_policy_output(model_out_t), -1)
policy_t = tf.math.exp(log_pis_t)
log_pis_tp1 = tf.nn.log_softmax(
model.get_policy_output(model_out_tp1), -1)
policy_tp1 = tf.math.exp(log_pis_tp1)
# Q-values.
q_t = model.get_q_values(model_out_t)
# Target Q-values.
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1)
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(model_out_t)
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1)
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_tp1 -= model.alpha * log_pis_tp1
# Actually selected Q-values (from the actions batch).
one_hot = tf.one_hot(
train_batch[SampleBatch.ACTIONS], depth=q_t.shape.as_list()[-1])
q_t_selected = tf.reduce_sum(q_t * one_hot, axis=-1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.reduce_sum(twin_q_t * one_hot, axis=-1)
# Discrete case: "Best" means weighted by the policy (prob) outputs.
q_tp1_best = tf.reduce_sum(tf.multiply(policy_tp1, q_tp1), axis=-1)
q_tp1_best_masked = \
(1.0 - tf.cast(train_batch[SampleBatch.DONES], tf.float32)) * \
q_tp1_best
# Continuous actions case.
else:
# Sample simgle actions from distribution.
action_dist_class = _get_dist_class(policy.config, policy.action_space)
action_dist_t = action_dist_class(
model.get_policy_output(model_out_t), policy.model)
policy_t = action_dist_t.sample() if not deterministic else \
action_dist_t.deterministic_sample()
log_pis_t = tf.expand_dims(action_dist_t.logp(policy_t), -1)
action_dist_tp1 = action_dist_class(
model.get_policy_output(model_out_tp1), policy.model)
policy_tp1 = action_dist_tp1.sample() if not deterministic else \
action_dist_tp1.deterministic_sample()
log_pis_tp1 = tf.expand_dims(action_dist_tp1.logp(policy_tp1), -1)
# Q-values for the actually selected actions.
q_t = model.get_q_values(
model_out_t, tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32))
if policy.config["twin_q"]:
twin_q_t = model.get_twin_q_values(
model_out_t, tf.cast(train_batch[SampleBatch.ACTIONS], tf.float32))
# Q-values for current policy in given current state.
q_t_det_policy = model.get_q_values(model_out_t, policy_t)
if policy.config["twin_q"]:
twin_q_t_det_policy = model.get_twin_q_values(
model_out_t, policy_t)
q_t_det_policy = tf.reduce_min(
(q_t_det_policy, twin_q_t_det_policy), axis=0)
# target q network evaluation
q_tp1 = policy.target_model.get_q_values(target_model_out_tp1,
policy_tp1)
if policy.config["twin_q"]:
twin_q_tp1 = policy.target_model.get_twin_q_values(
target_model_out_tp1, policy_tp1)
# Take min over both twin-NNs.
q_tp1 = tf.reduce_min((q_tp1, twin_q_tp1), axis=0)
q_t_selected = tf.squeeze(q_t, axis=len(q_t.shape) - 1)
if policy.config["twin_q"]:
twin_q_t_selected = tf.squeeze(twin_q_t, axis=len(q_t.shape) - 1)
q_tp1 -= model.alpha * log_pis_tp1
q_tp1_best = tf.squeeze(input=q_tp1, axis=len(q_tp1.shape) - 1)
q_tp1_best_masked = (1.0 - tf.cast(train_batch[SampleBatch.DONES],
tf.float32)) * q_tp1_best
# Compute RHS of bellman equation for the Q-loss (critic(s)).
q_t_selected_target = tf.stop_gradient(
tf.cast(train_batch[SampleBatch.REWARDS], tf.float32) +
policy.config["gamma"]**policy.config["n_step"] * q_tp1_best_masked)
# Compute the TD-error (potentially clipped).
base_td_error = tf.math.abs(q_t_selected - q_t_selected_target)
if policy.config["twin_q"]:
twin_td_error = tf.math.abs(twin_q_t_selected - q_t_selected_target)
td_error = 0.5 * (base_td_error + twin_td_error)
else:
td_error = base_td_error
# Calculate one or two critic losses (2 in the twin_q case).
prio_weights = tf.cast(train_batch[PRIO_WEIGHTS], tf.float32)
critic_loss = [tf.reduce_mean(prio_weights * huber_loss(base_td_error))]
if policy.config["twin_q"]:
critic_loss.append(
tf.reduce_mean(prio_weights * huber_loss(twin_td_error)))
# Alpha- and actor losses.
# Note: In the papers, alpha is used directly, here we take the log.
# Discrete case: Multiply the action probs as weights with the original
# loss terms (no expectations needed).
if model.discrete:
alpha_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(
tf.stop_gradient(policy_t), -model.log_alpha *
tf.stop_gradient(log_pis_t + model.target_entropy)),
axis=-1))
actor_loss = tf.reduce_mean(
tf.reduce_sum(
tf.multiply(
# NOTE: No stop_grad around policy output here
# (compare with q_t_det_policy for continuous case).
policy_t,
model.alpha * log_pis_t - tf.stop_gradient(q_t)),
axis=-1))
else:
alpha_loss = -tf.reduce_mean(
model.log_alpha *
tf.stop_gradient(log_pis_t + model.target_entropy))
actor_loss = tf.reduce_mean(model.alpha * log_pis_t - q_t_det_policy)
# Save for stats function.
policy.policy_t = policy_t
policy.q_t = q_t
policy.td_error = td_error
policy.actor_loss = actor_loss
policy.critic_loss = critic_loss
policy.alpha_loss = alpha_loss
policy.alpha_value = model.alpha
policy.target_entropy = model.target_entropy
# In a custom apply op we handle the losses separately, but return them
# combined in one loss here.
return actor_loss + tf.math.add_n(critic_loss) + alpha_loss
|
7,463 | def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, warning_type=AstropyDeprecationWarning):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
warning_tyoe : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: {since}'
'\n {message}\n\n'.format(
**{'since': since, 'message': message.strip()})) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message, warning_type):
"""
Returns a wrapped function that displays ``warning_type``
when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = warning_type
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__['__add__']): # nopep8
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message, warning_type):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(get_function(cls.__init__),
message, warning_type)
else:
cls.__new__ = deprecate_function(get_function(cls.__new__),
message, warning_type)
return cls
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, warning_type=warning_type):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) is type(deprecate):
if pending:
message = ('The {func} {obj_type} will be deprecated in a '
'future version.')
else:
message = ('The {func} {obj_type} is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = '\n Use {} instead.'.format(alternative)
message = ((message.format(**{
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name})) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message, warning_type)
else:
return deprecate_function(obj, message, warning_type)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
| def deprecated(since, message='', name='', alternative='', pending=False,
obj_type=None, warning_type=AstropyDeprecationWarning):
"""
Used to mark a function or class as deprecated.
To mark an attribute as deprecated, use `deprecated_attribute`.
Parameters
------------
since : str
The release at which this API became deprecated. This is
required.
message : str, optional
Override the default deprecation message. The format
specifier ``func`` may be used for the name of the function,
and ``alternative`` may be used in the deprecation message
to insert the name of an alternative to the deprecated
function. ``obj_type`` may be used to insert a friendly name
for the type of object being deprecated.
name : str, optional
The name of the deprecated function or class; if not provided
the name is automatically determined from the passed in
function or class, though this is useful in the case of
renamed functions, where the new function is just assigned to
the name of the deprecated function. For example::
def new_function():
...
oldFunction = new_function
alternative : str, optional
An alternative function or class name that the user may use in
place of the deprecated object. The deprecation warning will
tell the user about this alternative if provided.
pending : bool, optional
If True, uses a AstropyPendingDeprecationWarning instead of a
AstropyDeprecationWarning.
obj_type : str, optional
The type of this object, if the automatically determined one
needs to be overridden.
warning_type : warning
Warning to be issued.
Default is `~astropy.utils.exceptions.AstropyDeprecationWarning`.
"""
method_types = (classmethod, staticmethod, types.MethodType)
def deprecate_doc(old_doc, message):
"""
Returns a given docstring with a deprecation message prepended
to it.
"""
if not old_doc:
old_doc = ''
old_doc = textwrap.dedent(old_doc).strip('\n')
new_doc = (('\n.. deprecated:: {since}'
'\n {message}\n\n'.format(
**{'since': since, 'message': message.strip()})) + old_doc)
if not old_doc:
# This is to prevent a spurious 'unexpected unindent' warning from
# docutils when the original docstring was blank.
new_doc += r'\ '
return new_doc
def get_function(func):
"""
Given a function or classmethod (or other function wrapper type), get
the function object.
"""
if isinstance(func, method_types):
func = func.__func__
return func
def deprecate_function(func, message, warning_type):
"""
Returns a wrapped function that displays ``warning_type``
when it is called.
"""
if isinstance(func, method_types):
func_wrapper = type(func)
else:
func_wrapper = lambda f: f
func = get_function(func)
def deprecated_func(*args, **kwargs):
if pending:
category = AstropyPendingDeprecationWarning
else:
category = warning_type
warnings.warn(message, category, stacklevel=2)
return func(*args, **kwargs)
# If this is an extension function, we can't call
# functools.wraps on it, but we normally don't care.
# This crazy way to get the type of a wrapper descriptor is
# straight out of the Python 3.3 inspect module docs.
if type(func) is not type(str.__dict__['__add__']): # nopep8
deprecated_func = functools.wraps(func)(deprecated_func)
deprecated_func.__doc__ = deprecate_doc(
deprecated_func.__doc__, message)
return func_wrapper(deprecated_func)
def deprecate_class(cls, message, warning_type):
"""
Update the docstring and wrap the ``__init__`` in-place (or ``__new__``
if the class or any of the bases overrides ``__new__``) so it will give
a deprecation warning when an instance is created.
This won't work for extension classes because these can't be modified
in-place and the alternatives don't work in the general case:
- Using a new class that looks and behaves like the original doesn't
work because the __new__ method of extension types usually makes sure
that it's the same class or a subclass.
- Subclassing the class and return the subclass can lead to problems
with pickle and will look weird in the Sphinx docs.
"""
cls.__doc__ = deprecate_doc(cls.__doc__, message)
if cls.__new__ is object.__new__:
cls.__init__ = deprecate_function(get_function(cls.__init__),
message, warning_type)
else:
cls.__new__ = deprecate_function(get_function(cls.__new__),
message, warning_type)
return cls
def deprecate(obj, message=message, name=name, alternative=alternative,
pending=pending, warning_type=warning_type):
if obj_type is None:
if isinstance(obj, type):
obj_type_name = 'class'
elif inspect.isfunction(obj):
obj_type_name = 'function'
elif inspect.ismethod(obj) or isinstance(obj, method_types):
obj_type_name = 'method'
else:
obj_type_name = 'object'
else:
obj_type_name = obj_type
if not name:
name = get_function(obj).__name__
altmessage = ''
if not message or type(message) is type(deprecate):
if pending:
message = ('The {func} {obj_type} will be deprecated in a '
'future version.')
else:
message = ('The {func} {obj_type} is deprecated and may '
'be removed in a future version.')
if alternative:
altmessage = '\n Use {} instead.'.format(alternative)
message = ((message.format(**{
'func': name,
'name': name,
'alternative': alternative,
'obj_type': obj_type_name})) +
altmessage)
if isinstance(obj, type):
return deprecate_class(obj, message, warning_type)
else:
return deprecate_function(obj, message, warning_type)
if type(message) is type(deprecate):
return deprecate(message)
return deprecate
|
54,349 | def main(root_data, output_data):
# Curate the contents of the dataset to keep only folders and sort them
contents_ds = [subdir for subdir in os.listdir(root_data) if os.path.isdir(os.path.join(root_data, subdir))]
contents_ds.sort()
# Loop across contents of each subdirectory
for subdir in contents_ds:
# Define subject id
sub_id = "sub-demoMouse" + subdir.split('_')[3]
# Define sample id
sample_id = subdir.split('_')[4]
# Get the path of each subdirectory
path_subdir = os.path.join(root_data, subdir)
# Get the contents of each subdirectory
contents_subdir = os.listdir(path_subdir)
# Define final bids subject id
sub_bids_full = sub_id + "_sample-" + sample_id
# Loop across the contents of each subdirectory
for file in contents_subdir:
# Get the path of each file
path_file_in = os.path.join(path_subdir, file)
# Check if the filename corresponds to the one in the images dictionary
if file in images:
# Most files go into the subject's data folder
path_sub_id_dir_out = os.path.join(output_data, sub_id, 'microscopy')
# Define the output file path
path_file_out = os.path.join(path_sub_id_dir_out, sub_bids_full + images[file])
# Check if the filename corresponds to the one in the derivatives dictionary
elif file in der:
# Derivatives go somewhere else
path_sub_id_dir_out = os.path.join(output_data, 'derivatives', 'labels', sub_id, 'microscopy')
# Define the output file path
path_file_out = os.path.join(path_sub_id_dir_out, sub_bids_full + der[file])
else:
# not a file we recognize
continue
# Create output subdirecotries and copy files to output
os.makedirs(os.path.dirname(path_file_out), exist_ok=True)
shutil.copyfile(path_file_in, path_file_out)
# Generate subject list
sub_list = sorted(d for d in os.listdir(output_data) if d.startswith("sub-"))
# Now that everything is curated, fill in the metadata
for sub_id in sub_list:
create_json_sidecar(output_data, sub_id)
# Create participants.tsv and samples.tsv
with open(output_data + '/samples.tsv', 'w') as samples, \
open(output_data + '/participants.tsv', 'w') as participants:
tsv_writer_samples = csv.writer(samples, delimiter='\t', lineterminator='\n')
tsv_writer_samples.writerow(["sample_id", "participant_id", "sample_type"])
tsv_writer_participants = csv.writer(participants, delimiter='\t', lineterminator='\n')
tsv_writer_participants.writerow(["participant_id", "species"])
for subject in sub_list:
row_sub = []
row_sub.append(subject)
row_sub.append('mus musculus')
tsv_writer_participants.writerow(row_sub)
subject_samples = sorted(glob.glob(os.path.join(output_data, subject, 'microscopy', '*.png')))
for file_sample in subject_samples:
row_sub_samples = []
row_sub_samples.append(os.path.basename(file_sample).split('_')[1])
row_sub_samples.append(subject)
row_sub_samples.append('tissue')
tsv_writer_samples.writerow(row_sub_samples)
# Create dataset_description.json
dataset_description = {"Name": "data_axondeepseg_tem",
"BIDSVersion": "1.7.0",
"License": "MIT"
}
with open(output_data + '/dataset_description.json', 'w') as json_file:
json.dump(dataset_description, json_file, indent=4)
# Create dataset_description.json for derivatives/labels
dataset_description_derivatives = {"Name": "data_axondeepseg_tem labels",
"BIDSVersion": "1.7.0",
"PipelineDescription": {
"Name": "Axon and myelin manual segmentation labels"
}}
with open(output_data + '/derivatives/labels/dataset_description.json', 'w') as json_file:
json.dump(dataset_description_derivatives, json_file, indent=4)
# Create participants.json
data_json = {
"participant_id": {
"Description": "Unique participant ID"
},
"species": {
"Description": "Binomial species name from the NCBI Taxonomy (https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi)"
}
}
with open(output_data + '/participants.json', 'w') as json_file:
json.dump(data_json, json_file, indent=4)
# Create samples.json
data_json = {
"sample_id": {
"Description": "Sample ID"
},
"participant_id": {
"Description": "Participant ID from whom tissue samples have been acquired"
},
"sample_type": {
"Description": "Type of sample from ENCODE Biosample Type (https://www.encodeproject.org/profiles/biosample_type)"
}
}
with open(output_data + '/samples.json', 'w') as json_file:
json.dump(data_json, json_file, indent=4)
# Create README
with open(output_data + '/README', 'w') as readme_file:
print(dedent("""\
- Generate on 2022-03-09
- Created for demo purposes"""), file=readme_file)
| def main(root_data, output_data):
# Curate the contents of the dataset to keep only folders and sort them
contents_ds = [subdir for subdir in os.listdir(root_data) if os.path.isdir(os.path.join(root_data, subdir))]
contents_ds.sort()
# Loop across contents of each subdirectory
for subdir in contents_ds:
# Define subject id
sub_id = "sub-demoMouse" + subdir.split('_')[3]
# Define sample id
sample_id = subdir.split('_')[4]
# Get the path of each subdirectory
path_subdir = os.path.join(root_data, subdir)
# Get the contents of each subdirectory
contents_subdir = os.listdir(path_subdir)
# Define final bids subject id
sub_bids_full = sub_id + "_sample-" + sample_id
# Loop across the contents of each subdirectory
for file in contents_subdir:
# Get the path of each file
path_file_in = os.path.join(path_subdir, file)
# Check if the filename corresponds to the one in the images dictionary
if file in images:
# Most files go into the subject's data folder
path_sub_id_dir_out = os.path.join(output_data, sub_id, 'microscopy')
# Define the output file path
path_file_out = os.path.join(path_sub_id_dir_out, sub_bids_full + images[file])
# Check if the filename corresponds to the one in the derivatives dictionary
elif file in der:
# Derivatives go somewhere else
path_sub_id_dir_out = os.path.join(output_data, 'derivatives', 'labels', sub_id, 'microscopy')
# Define the output file path
path_file_out = os.path.join(path_sub_id_dir_out, sub_bids_full + der[file])
else:
# not a file we recognize
continue
# Create output subdirecotries and copy files to output
os.makedirs(os.path.dirname(path_file_out), exist_ok=True)
shutil.copyfile(path_file_in, path_file_out)
# Generate subject list
sub_list = sorted(d for d in os.listdir(output_data) if d.startswith("sub-"))
# Now that everything is curated, fill in the metadata
for sub_id in sub_list:
create_json_sidecar(output_data, sub_id)
# Create participants.tsv and samples.tsv
with open(output_data + '/samples.tsv', 'w') as samples, \
open(output_data + '/participants.tsv', 'w') as participants:
tsv_writer_samples = csv.writer(samples, delimiter='\t', lineterminator='\n')
tsv_writer_samples.writerow(["sample_id", "participant_id", "sample_type"])
tsv_writer_participants = csv.writer(participants, delimiter='\t', lineterminator='\n')
tsv_writer_participants.writerow(["participant_id", "species"])
for subject in sub_list:
row_sub = []
row_sub.append(subject)
row_sub.append('mus musculus')
tsv_writer_participants.writerow(row_sub)
subject_samples = sorted(glob.glob(os.path.join(output_data, subject, 'microscopy', '*.png')))
for file_sample in subject_samples:
row_sub_samples = []
row_sub_samples.append(os.path.basename(file_sample).split('_')[1])
row_sub_samples.append(subject)
row_sub_samples.append('tissue')
tsv_writer_samples.writerow(row_sub_samples)
# Create dataset_description.json
dataset_description = {"Name": "demo dataset",
"BIDSVersion": "1.7.0"
}
with open(output_data + '/dataset_description.json', 'w') as json_file:
json.dump(dataset_description, json_file, indent=4)
# Create dataset_description.json for derivatives/labels
dataset_description_derivatives = {"Name": "data_axondeepseg_tem labels",
"BIDSVersion": "1.7.0",
"PipelineDescription": {
"Name": "Axon and myelin manual segmentation labels"
}}
with open(output_data + '/derivatives/labels/dataset_description.json', 'w') as json_file:
json.dump(dataset_description_derivatives, json_file, indent=4)
# Create participants.json
data_json = {
"participant_id": {
"Description": "Unique participant ID"
},
"species": {
"Description": "Binomial species name from the NCBI Taxonomy (https://www.ncbi.nlm.nih.gov/Taxonomy/Browser/wwwtax.cgi)"
}
}
with open(output_data + '/participants.json', 'w') as json_file:
json.dump(data_json, json_file, indent=4)
# Create samples.json
data_json = {
"sample_id": {
"Description": "Sample ID"
},
"participant_id": {
"Description": "Participant ID from whom tissue samples have been acquired"
},
"sample_type": {
"Description": "Type of sample from ENCODE Biosample Type (https://www.encodeproject.org/profiles/biosample_type)"
}
}
with open(output_data + '/samples.json', 'w') as json_file:
json.dump(data_json, json_file, indent=4)
# Create README
with open(output_data + '/README', 'w') as readme_file:
print(dedent("""\
- Generate on 2022-03-09
- Created for demo purposes"""), file=readme_file)
|
7,630 | def test_equal():
obstime = 'B1955'
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, obstime=obstime)
sc2 = SkyCoord([1, 20]*u.deg, [3, 4]*u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert sc1[0] == sc2[0] # (numpy True not Python True)
assert not (sc1[0] != sc2[0])
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 2]*u.km/u.s)
sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 20]*u.km/u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert sc1[0] == sc2[0]
assert not (sc1[0] != sc2[0])
| def test_equal():
obstime = 'B1955'
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, obstime=obstime)
sc2 = SkyCoord([1, 20]*u.deg, [3, 4]*u.deg, obstime=obstime)
# Compare arrays and scalars
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert sc1[0] == sc2[0]
assert not (sc1[0] != sc2[0])
# Broadcasting
eq = sc1[0] == sc2
ne = sc1[0] != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
# With diff only in velocity
sc1 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 2]*u.km/u.s)
sc2 = SkyCoord([1, 2]*u.deg, [3, 4]*u.deg, radial_velocity=[1, 20]*u.km/u.s)
eq = sc1 == sc2
ne = sc1 != sc2
assert np.all(eq == [True, False])
assert np.all(ne == [False, True])
assert sc1[0] == sc2[0]
assert not (sc1[0] != sc2[0])
|
7,895 | def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i>=2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
| def _vectfit_xs(energy, ce_xs, mts, rtol=1e-3, atol=1e-5, orders=None,
n_vf_iter=30, log=False, path_out=None, **kwargs):
r"""Convert point-wise cross section to multipole data via Vector Fitting.
Parameters
----------
energy : np.ndarray
Energy array
ce_xs : np.ndarray
Point-wise cross sections to be fitted
mts : Iterable of int
Reaction list
rtol : float, optional
Relative error tolerance
atol : float, optional
Absolute error tolerance
orders : Iterable of int, optional
A list of orders (number of poles) to be searched
n_vf_iter : int, optional
Number of maximum VF iterations
log : bool or int, optional
Whether to print running logs
path_out : str, optional
Path to save the figures
**kwargs
Additional keyword arguments
Returns
-------
Tuple
(poles, residues)
"""
# import vectfit package: https://github.com/liangjg/vectfit
import vectfit as vf
ne = energy.size
nmt = len(mts)
if ce_xs.shape != (nmt, ne):
raise ValueError('Inconsistent cross section data.')
# construct test data: interpolate xs with finer grids
N_FINER = 10
ne_test = (ne-1)*N_FINER + 1
test_energy = np.interp(np.arange(ne_test),
np.arange(ne_test, step=N_FINER), energy)
test_energy[[0, -1]] = energy[[0, -1]] # avoid numerical issue
test_xs_ref = np.zeros((nmt, ne_test))
for i in range(nmt):
test_xs_ref[i] = np.interp(test_energy, energy, ce_xs[i])
if log:
print("Energy: {:.3e} to {:.3e} eV ({} points)".format(
energy[0], energy[-1], ne))
# inputs
f = ce_xs * energy # sigma*E
s = np.sqrt(energy) # sqrt(E)
test_s = np.sqrt(test_energy)
weight = 1.0/f
# very small cross sections can lead to huge weights, which will harm the
# fitting accuracy
MIN_CROSS_SECTION = 1e-7
for i in range(nmt):
if np.all(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i] = 1.0
elif np.any(ce_xs[i]<=MIN_CROSS_SECTION):
weight[i, ce_xs[i]<=MIN_CROSS_SECTION] = \
max(weight[i, ce_xs[i]>MIN_CROSS_SECTION])
# detect peaks (resonances) and determine VF order search range
peaks, _ = find_peaks(ce_xs[0]+ce_xs[1])
n_peaks = peaks.size
if orders is not None:
# make sure orders are even integers
orders = list(set([int(i/2)*2 for i in orders if i >= 2]))
else:
lowest_order = max(2, 2*n_peaks)
highest_order = max(200, 4*n_peaks)
orders = list(range(lowest_order, highest_order+1, 2))
if log:
print("Found {} peaks".format(n_peaks))
print("Fitting orders from {} to {}".format(orders[0], orders[-1]))
# perform VF with increasing orders
found_ideal = False
n_discarded = 0 # for accelation, number of discarded searches
best_quality = best_ratio = -np.inf
for i, order in enumerate(orders):
if log:
print("Order={}({}/{})".format(order, i, len(orders)))
# initial guessed poles
poles = np.linspace(s[0], s[-1], order//2)
poles = poles + poles*0.01j
poles = np.sort(np.append(poles, np.conj(poles)))
found_better = False
# fitting iteration
for i_vf in range(n_vf_iter):
if log >= DETAILED_LOGGING:
print("VF iteration {}/{}".format(i_vf+1, n_vf_iter))
# call vf
try:
poles, residues, cf, f_fit, rms = vf.vectfit(f, s, poles, weight)
except:
break
# convert real pole to conjugate pairs
n_real_poles = 0
new_poles = []
for p in poles:
p_r, p_i = np.real(p), np.imag(p)
if (s[0] <= p_r <= s[-1]) and p_i == 0.:
new_poles += [p_r+p_r*0.01j, p_r-p_r*0.01j]
n_real_poles += 1
else:
new_poles += [p]
new_poles = np.array(new_poles)
# re-calculate residues if poles changed
if n_real_poles > 0:
if log >= DETAILED_LOGGING:
print(" # real poles: {}".format(n_real_poles))
new_poles, residues, cf, f_fit, rms = \
vf.vectfit(f, s, new_poles, weight, skip_pole=True)
# assess the result on test grid
test_xs = vf.evaluate(test_s, new_poles, residues) / test_energy
abserr = np.abs(test_xs - test_xs_ref)
relerr = abserr / test_xs_ref
if np.any(np.isnan(abserr)):
maxre, ratio, ratio2 = np.inf, -np.inf, -np.inf
elif np.all(abserr <= atol):
maxre, ratio, ratio2 = 0., 1., 1.
else:
maxre = np.max(relerr[abserr > atol])
ratio = np.sum((relerr<rtol) | (abserr<atol)) / relerr.size
ratio2 = np.sum((relerr<10*rtol) | (abserr<atol)) / relerr.size
quality = ratio + ratio2 - min(0.1*maxre, 1) - 0.001*new_poles.size
if np.any(test_xs < -atol):
quality = -np.inf
if log >= DETAILED_LOGGING:
print(" # poles: {}".format(new_poles.size))
print(" Max relative error: {:.3f}%".format(maxre*100))
print(" Satisfaction: {:.1f}%, {:.1f}%".format(ratio*100, ratio2*100))
print(" Quality: {:.2f}".format(quality))
if quality > best_quality:
if log >= DETAILED_LOGGING:
print(" Best by far!")
found_better = True
best_quality, best_ratio = quality, ratio
best_poles, best_residues = new_poles, residues
best_test_xs, best_relerr = test_xs, relerr
if best_ratio >= 1.0:
if log:
print("Found ideal results. Stop!")
found_ideal = True
break
else:
if log >= DETAILED_LOGGING:
print(" Discarded!")
if found_ideal:
break
# acceleration
if found_better:
n_discarded = 0
else:
if order > max(2*n_peaks, 50) and best_ratio > 0.7:
n_discarded += 1
if n_discarded >= 10 or (n_discarded >= 5 and best_ratio > 0.9):
if log >= DETAILED_LOGGING:
print("Couldn't get better results. Stop!")
break
# merge conjugate poles
real_idx = []
conj_idx = []
found_conj = False
for i, p in enumerate(best_poles):
if found_conj:
found_conj = False
continue
if np.imag(p) == 0.:
real_idx.append(i)
else:
if i < best_poles.size and np.conj(p) == best_poles[i+1]:
found_conj = True
conj_idx.append(i)
else:
raise RuntimeError("Complex poles are not conjugate!")
if log:
print("Found {} real poles and {} conjugate complex pairs.".format(
len(real_idx), len(conj_idx)))
mp_poles = best_poles[real_idx+conj_idx]
mp_residues = np.concatenate((best_residues[:, real_idx],
best_residues[:, conj_idx]*2), axis=1)/1j
if log:
print("Final number of poles: {}".format(mp_poles.size))
if path_out:
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
if not os.path.exists(path_out):
os.makedirs(path_out)
for i, mt in enumerate(mts):
fig, ax1 = plt.subplots()
lns1 = ax1.loglog(test_energy, test_xs_ref[i], 'g', label="ACE xs")
lns2 = ax1.loglog(test_energy, best_test_xs[i], 'b', label="VF xs")
ax2 = ax1.twinx()
lns3 = ax2.loglog(test_energy, best_relerr[i], 'r',
label="Relative error", alpha=0.5)
lns = lns1 + lns2 + lns3
labels = [l.get_label() for l in lns]
ax1.legend(lns, labels, loc='best')
ax1.set_xlabel('energy (eV)')
ax1.set_ylabel('cross section (b)', color='b')
ax1.tick_params('y', colors='b')
ax2.set_ylabel('relative error', color='r')
ax2.tick_params('y', colors='r')
plt.title("MT {} vectfitted with {} poles".format(mt, mp_poles.size))
fig.tight_layout()
fig_file = os.path.join(path_out, "{:.0f}-{:.0f}_MT{}.png".format(
energy[0], energy[-1], mt))
plt.savefig(fig_file)
plt.close()
if log:
print("Saved figure: {}".format(fig_file))
return (mp_poles, mp_residues)
|
31,032 | def test():
""" Test Function to test validity of access and refresh tokens"""
report_json, status_code = get_dlp_report('1')
if status_code in [200, 204]:
return_results("ok")
else:
raise Exception("Integration test failed: Unexpected status ({})".format(status_code))
| def test():
""" Test Function to test validity of access and refresh tokens"""
report_json, status_code = get_dlp_report('1')
if status_code in [200, 204]:
return_results("ok")
else:
raise DemistoException("Integration test failed: Unexpected status ({})".format(status_code))
|
31,934 | def fetch_notables(service, cache_object=None, enrich_notables=False):
last_run_data = demisto.getLastRun()
if not last_run_data:
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease first run')
last_run_time = last_run_data and 'time' in last_run_data and last_run_data['time']
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease last run is:\n {}'.format(last_run_data))
dem_params = demisto.params()
occurred_look_behind = int(dem_params.get('occurrence_look_behind', 15) or 15)
extensive_log('[SplunkPyPreRelease] occurrence look behind is: {}'.format(occurred_look_behind))
occured_start_time, now = get_fetch_start_times(dem_params, service, last_run_time, occurred_look_behind)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease last run time: {}, now: {}'.format(last_run_time, now))
default_batch_size = int(dem_params.get('batch_size', 200))
batch_size = last_run_data.get('batch_size') or default_batch_size
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease batch size is : {}'.format(batch_size))
kwargs_oneshot = build_fetch_kwargs(dem_params, batch_size, occured_start_time, now)
fetch_query = build_fetch_query(dem_params)
oneshotsearch_results = service.jobs.oneshot(fetch_query, **kwargs_oneshot) # type: ignore
reader = results.ResultsReader(oneshotsearch_results)
last_run_fetched_ids = last_run_data.get('found_incidents_ids', {})
incidents = [] # type: List[Dict]
notables = []
incident_ids_to_add = {}
for item in reader:
if len(incidents) >= FETCH_LIMIT:
break
extensive_log('[SplunkPyPreRelease] Incident data before parsing to notable: {}'.format(item))
notable_incident = Notable(data=item)
inc = notable_incident.to_incident()
extensive_log('[SplunkPyPreRelease] Incident data after parsing to notable: {}'.format(inc))
incident_id = create_incident_custom_id(inc)
if incident_id not in last_run_fetched_ids:
incident_ids_to_add[incident_id] = splunk_time_to_datetime(inc["occurred"]).strftime(SPLUNK_TIME_FORMAT)
# Save the occurrence time of each event in datetime format
incidents.append(inc)
notables.append(notable_incident)
else:
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - Dropped incident {} due to duplication.'.format(incident_id))
extensive_log('[SplunkPyPreRelease] Size of last_run_fetched_ids before adding new IDs: {}'.format(len(last_run_fetched_ids)))
for incident_id in incident_ids_to_add:
last_run_fetched_ids[incident_id] = incident_ids_to_add[incident_id]
# Adding the new incidents with the occurrence time.
extensive_log(
'[SplunkPyPreRelease] Size of last_run_fetched_ids after adding new IDs: {}'.format(len(last_run_fetched_ids)))
last_run_fetched_ids = remove_old_incident_ids(last_run_fetched_ids, occured_start_time)
extensive_log('[SplunkPyPreRelease] Size of last_run_fetched_ids after '
'removing old IDs: {}'.format(len(last_run_fetched_ids)))
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - incidents fetched on last run = {}'.format(last_run_fetched_ids))
debug_message = 'SplunkPyPreRelease - total number of incidents found: from {}\n to {}\n with the ' \
'query: {} is: {}.'.format(last_run_time, now, fetch_query, len(incidents))
extensive_log(debug_message)
if not enrich_notables:
demisto.incidents(incidents)
else:
cache_object.not_yet_submitted_notables += notables
if DUMMY not in last_run_data:
# we add dummy data to the last run to differentiate between the fetch-incidents triggered to the
# fetch-incidents running as part of "Pull from instance" in Classification & Mapping, as we don't
# want to add data to the integration context (which will ruin the logic of the cache object)
last_run_data.update({DUMMY: DUMMY})
if len(incidents) == 0:
next_run = get_next_start_time(last_run_time, now, False)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - Next run time with no incidents found: {}.'.format(next_run))
new_last_run = {
'time': next_run,
'found_incidents_ids': last_run_fetched_ids,
}
else:
if len(last_run_fetched_ids) + FETCH_LIMIT >= batch_size:
# If we almost saw all the events return from the query, we should increase the batch size to reach
# the new events.
batch_size += default_batch_size
latest_incident_fetched_time = get_latest_incident_time(incidents)
next_run = get_next_start_time(latest_incident_fetched_time, now, were_new_incidents_found=True)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - '
'Next run time with too many incidents: {}. Batch size: {}'.format(next_run, batch_size))
new_last_run = {
'time': next_run,
'found_incidents_ids': last_run_fetched_ids,
'batch_size': batch_size
}
last_run_data.update(new_last_run)
demisto.setLastRun(last_run_data)
| def fetch_notables(service, cache_object=None, enrich_notables=False):
last_run_data = demisto.getLastRun()
if not last_run_data:
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease first run')
last_run_time = last_run_data and 'time' in last_run_data and last_run_data['time']
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease last run is:\n {}'.format(last_run_data))
dem_params = demisto.params()
occurred_look_behind = int(dem_params.get('occurrence_look_behind', 15) or 15)
extensive_log('[SplunkPyPreRelease] occurrence look behind is: {}'.format(occurred_look_behind))
occured_start_time, now = get_fetch_start_times(dem_params, service, last_run_time, occurred_look_behind)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease last run time: {}, now: {}'.format(last_run_time, now))
default_batch_size = int(dem_params.get('batch_size', 200))
batch_size = last_run_data.get('batch_size') or default_batch_size
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease batch size is : {}'.format(batch_size))
kwargs_oneshot = build_fetch_kwargs(dem_params, batch_size, occured_start_time, now)
fetch_query = build_fetch_query(dem_params)
oneshotsearch_results = service.jobs.oneshot(fetch_query, **kwargs_oneshot) # type: ignore
reader = results.ResultsReader(oneshotsearch_results)
last_run_fetched_ids = last_run_data.get('found_incidents_ids', {})
incidents = [] # type: List[Dict]
notables = []
incident_ids_to_add = {}
for item in reader:
if len(incidents) == FETCH_LIMIT:
break
extensive_log('[SplunkPyPreRelease] Incident data before parsing to notable: {}'.format(item))
notable_incident = Notable(data=item)
inc = notable_incident.to_incident()
extensive_log('[SplunkPyPreRelease] Incident data after parsing to notable: {}'.format(inc))
incident_id = create_incident_custom_id(inc)
if incident_id not in last_run_fetched_ids:
incident_ids_to_add[incident_id] = splunk_time_to_datetime(inc["occurred"]).strftime(SPLUNK_TIME_FORMAT)
# Save the occurrence time of each event in datetime format
incidents.append(inc)
notables.append(notable_incident)
else:
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - Dropped incident {} due to duplication.'.format(incident_id))
extensive_log('[SplunkPyPreRelease] Size of last_run_fetched_ids before adding new IDs: {}'.format(len(last_run_fetched_ids)))
for incident_id in incident_ids_to_add:
last_run_fetched_ids[incident_id] = incident_ids_to_add[incident_id]
# Adding the new incidents with the occurrence time.
extensive_log(
'[SplunkPyPreRelease] Size of last_run_fetched_ids after adding new IDs: {}'.format(len(last_run_fetched_ids)))
last_run_fetched_ids = remove_old_incident_ids(last_run_fetched_ids, occured_start_time)
extensive_log('[SplunkPyPreRelease] Size of last_run_fetched_ids after '
'removing old IDs: {}'.format(len(last_run_fetched_ids)))
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - incidents fetched on last run = {}'.format(last_run_fetched_ids))
debug_message = 'SplunkPyPreRelease - total number of incidents found: from {}\n to {}\n with the ' \
'query: {} is: {}.'.format(last_run_time, now, fetch_query, len(incidents))
extensive_log(debug_message)
if not enrich_notables:
demisto.incidents(incidents)
else:
cache_object.not_yet_submitted_notables += notables
if DUMMY not in last_run_data:
# we add dummy data to the last run to differentiate between the fetch-incidents triggered to the
# fetch-incidents running as part of "Pull from instance" in Classification & Mapping, as we don't
# want to add data to the integration context (which will ruin the logic of the cache object)
last_run_data.update({DUMMY: DUMMY})
if len(incidents) == 0:
next_run = get_next_start_time(last_run_time, now, False)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - Next run time with no incidents found: {}.'.format(next_run))
new_last_run = {
'time': next_run,
'found_incidents_ids': last_run_fetched_ids,
}
else:
if len(last_run_fetched_ids) + FETCH_LIMIT >= batch_size:
# If we almost saw all the events return from the query, we should increase the batch size to reach
# the new events.
batch_size += default_batch_size
latest_incident_fetched_time = get_latest_incident_time(incidents)
next_run = get_next_start_time(latest_incident_fetched_time, now, were_new_incidents_found=True)
extensive_log('[SplunkPyPreRelease] SplunkPyPreRelease - '
'Next run time with too many incidents: {}. Batch size: {}'.format(next_run, batch_size))
new_last_run = {
'time': next_run,
'found_incidents_ids': last_run_fetched_ids,
'batch_size': batch_size
}
last_run_data.update(new_last_run)
demisto.setLastRun(last_run_data)
|
5,825 | def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True, alternative="two-sided"):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The unbiased estimate of the standard deviation(s) of sample 1 (i.e.
`ddof=1`).
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The unbiased estimate of the standard deviations(s) of sample 2 (i.e.
`ddof=1`).
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions are unequal.
* 'less': the mean of the first distribution is less than the
mean of the second distribution.
* 'greater': the mean of the first distribution is greater than the
mean of the second distribution.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
The statistic is calculated as ``(mean1 - mean2)/se``, where ``se`` is the
standard error. Therefore, the statistic will be positive when `mean1` is
greater than `mean2` and negative when `mean1` is less than `mean2`.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows (with the
Samples Variance being the unbiased estimate)::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
mean1 = np.asarray(mean1)
std1 = np.asarray(std1)
mean2 = np.asarray(mean2)
std2 = np.asarray(std2)
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return Ttest_indResult(*res)
| def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True, alternative="two-sided"):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The unbiased estimate of the standard deviation(s) of sample 1 (i.e.
`ddof=1`).
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The unbiased estimate of the standard deviations(s) of sample 2 (i.e.
`ddof=1`).
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions are unequal.
* 'less': the mean of the first distribution is less than the
mean of the second distribution.
* 'greater': the mean of the first distribution is greater than the
mean of the second distribution.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
The statistic is calculated as ``(mean1 - mean2)/se``, where ``se`` is the
standard error. Therefore, the statistic will be positive when `mean1` is
greater than `mean2` and negative when `mean1` is less than `mean2`.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows (with the
Sample Variance being the unbiased estimate)::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
mean1 = np.asarray(mean1)
std1 = np.asarray(std1)
mean2 = np.asarray(mean2)
std2 = np.asarray(std2)
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return Ttest_indResult(*res)
|
20,179 | def request_is_admin() -> bool:
if current_user.is_anonymous:
return False
if 'me' in request.path or current_user.sysadmin:
return True
return False
| def request_is_admin_or_self() -> bool:
if current_user.is_anonymous:
return False
if 'me' in request.path or current_user.sysadmin:
return True
return False
|
910 | def step_response(system, **kwargs):
r"""
Return the unit step response of a continuous-time system.
"""
x = Symbol("x")
expr = system.to_expr()/(system.var)
y = inverse_laplace_transform(expr, system.var, x)
return plot(y, (x, 0, 6), show=True, title="Unit Step Response", \
xlabel="Time (Seconds)", ylabel="Amplitude")
| def step_response(system, **kwargs):
r"""
Return the unit step response of a continuous-time system.
"""
x = Symbol("x")
expr = system.to_expr()/(system.var)
y = inverse_laplace_transform(expr, system.var, x)
return plot(y, (x, 0, 6), show=True, title="Unit Step Response", \
xlabel="Time (s)", ylabel="Amplitude")
|
880 | def diophantine(eq, param=symbols("t", integer=True), syms=None,
permute=False):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
Explanation
===========
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved
independently and combined. Each term is solved by calling
``diop_solve()``. (Although it is possible to call ``diop_solve()``
directly, one must be careful to pass an equation in the correct
form and to interpret the output correctly; ``diophantine()`` is
the public-facing function to use in general.)
Output of ``diophantine()`` is a set of tuples. The elements of the
tuple are the solutions for each variable in the equation and
are arranged according to the alphabetic ordering of the variables.
e.g. For an equation with two variables, `a` and `b`, the first
element of the tuple is the solution for `a` and the second for `b`.
Paramters
=========
``diophantine(eq, t, syms)``: Solve the diophantine
equation ``eq``.
``t`` is the optional parameter to be used by ``diop_solve()``.
``syms`` is an optional list of symbols which determines the
order of the elements in the returned tuple.
By default, only the base solution is returned. If ``permute`` is set to
True then permutations of the base solution and/or permutations of the
signs of the values will be returned when applicable.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import a, b
>>> eq = a**4 + b**4 - (2**4 + 3**4)
>>> diophantine(eq)
{(2, 3)}
>>> diophantine(eq, permute=True)
{(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
{(t_0, -t_0), (t_0, t_0)}
>>> diophantine(x*(2*x + 3*y - z))
{(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)}
>>> diophantine(x**2 + 3*x*y + 4*x)
{(0, n1), (3*t_0 - 4, -t_0)}
See Also
========
diop_solve()
sympy.utilities.iterables.permute_signs
sympy.utilities.iterables.signed_permutations
"""
from sympy.utilities.iterables import (
subsets, permute_signs, signed_permutations)
eq = _sympify(eq)
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
try:
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
if syms:
if not is_sequence(syms):
raise TypeError(
'syms should be given as a sequence, e.g. a list')
syms = [i for i in syms if i in var]
if syms != var:
dict_sym_index = dict(zip(syms, range(len(syms))))
return {tuple([t[dict_sym_index[i]] for i in var])
for t in diophantine(eq, param, permute=permute)}
n, d = eq.as_numer_denom()
if n.is_number:
return set()
if not d.is_number:
dsol = diophantine(d)
good = diophantine(n) - dsol
return {s for s in good if _mexpand(d.subs(zip(var, s)))}
else:
eq = n
eq = factor_terms(eq)
assert not eq.is_number
eq = eq.as_independent(*var, as_Add=False)[1]
p = Poly(eq)
assert not any(g.is_number for g in p.gens)
eq = p.as_expr()
assert eq.is_polynomial()
except (GeneratorsNeeded, AssertionError):
raise TypeError(filldedent('''
Equation should be a polynomial with Rational coefficients.'''))
# permute only sign
do_permute_signs = False
# permute sign and values
do_permute_signs_var = False
# permute few signs
permute_few_signs = False
try:
# if we know that factoring should not be attempted, skip
# the factoring step
v, c, t = classify_diop(eq)
# check for permute sign
if permute:
len_var = len(v)
permute_signs_for = [
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name]
permute_signs_check = [
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
BinaryQuadratic.name]
if t in permute_signs_for:
do_permute_signs_var = True
elif t in permute_signs_check:
# if all the variables in eq have even powers
# then do_permute_sign = True
if len_var == 3:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y), (x, z), (y, z)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda a: a[0]*a[1], var_mul)
# if coeff(y*z), coeff(y*x), coeff(x*z) is not 0 then
# `xy_coeff` => True and do_permute_sign => False.
# Means no permuted solution.
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2, z**2, const is present
do_permute_signs = True
elif not x_coeff:
permute_few_signs = True
elif len_var == 2:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda x: x[0]*x[1], var_mul)
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2 and const is present
# so we can get more soln by permuting this soln.
do_permute_signs = True
elif not x_coeff:
# when coeff(x), coeff(y) is not present then signs of
# x, y can be permuted such that their sign are same
# as sign of x*y.
# e.g 1. (x_val,y_val)=> (x_val,y_val), (-x_val,-y_val)
# 2. (-x_vall, y_val)=> (-x_val,y_val), (x_val,-y_val)
permute_few_signs = True
if t == 'general_sum_of_squares':
# trying to factor such expressions will sometimes hang
terms = [(eq, 1)]
else:
raise TypeError
except (TypeError, NotImplementedError):
fl = factor_list(eq)
if fl[0].is_Rational and fl[0] != 1:
return diophantine(eq/fl[0], param=param, syms=syms, permute=permute)
terms = fl[1]
sols = set([])
for term in terms:
base, _ = term
var_t, _, eq_type = classify_diop(base, _dict=False)
_, base = signsimp(base, evaluate=False).as_coeff_Mul()
solution = diop_solve(base, param)
if eq_type in [
Linear.name,
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
GeneralPythagorean.name]:
sols.add(merge_solution(var, var_t, solution))
elif eq_type in [
BinaryQuadratic.name,
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name,
Univariate.name]:
for sol in solution:
sols.add(merge_solution(var, var_t, sol))
else:
raise NotImplementedError('unhandled type: %s' % eq_type)
# remove null merge results
if () in sols:
sols.remove(())
null = tuple([0]*len(var))
# if there is no solution, return trivial solution
if not sols and eq.subs(zip(var, null)).is_zero:
sols.add(null)
final_soln = set([])
for sol in sols:
if all(_is_int(s) for s in sol):
if do_permute_signs:
permuted_sign = set(permute_signs(sol))
final_soln.update(permuted_sign)
elif permute_few_signs:
lst = list(permute_signs(sol))
lst = list(filter(lambda x: x[0]*x[1] == sol[1]*sol[0], lst))
permuted_sign = set(lst)
final_soln.update(permuted_sign)
elif do_permute_signs_var:
permuted_sign_var = set(signed_permutations(sol))
final_soln.update(permuted_sign_var)
else:
final_soln.add(sol)
else:
final_soln.add(sol)
return final_soln
| def diophantine(eq, param=symbols("t", integer=True), syms=None,
permute=False):
"""
Simplify the solution procedure of diophantine equation ``eq`` by
converting it into a product of terms which should equal zero.
Explanation
===========
For example, when solving, `x^2 - y^2 = 0` this is treated as
`(x + y)(x - y) = 0` and `x + y = 0` and `x - y = 0` are solved
independently and combined. Each term is solved by calling
``diop_solve()``. (Although it is possible to call ``diop_solve()``
directly, one must be careful to pass an equation in the correct
form and to interpret the output correctly; ``diophantine()`` is
the public-facing function to use in general.)
Output of ``diophantine()`` is a set of tuples. The elements of the
tuple are the solutions for each variable in the equation and
are arranged according to the alphabetic ordering of the variables.
e.g. For an equation with two variables, `a` and `b`, the first
element of the tuple is the solution for `a` and the second for `b`.
Parameters
=========
``diophantine(eq, t, syms)``: Solve the diophantine
equation ``eq``.
``t`` is the optional parameter to be used by ``diop_solve()``.
``syms`` is an optional list of symbols which determines the
order of the elements in the returned tuple.
By default, only the base solution is returned. If ``permute`` is set to
True then permutations of the base solution and/or permutations of the
signs of the values will be returned when applicable.
Examples
========
>>> from sympy.solvers.diophantine import diophantine
>>> from sympy.abc import a, b
>>> eq = a**4 + b**4 - (2**4 + 3**4)
>>> diophantine(eq)
{(2, 3)}
>>> diophantine(eq, permute=True)
{(-3, -2), (-3, 2), (-2, -3), (-2, 3), (2, -3), (2, 3), (3, -2), (3, 2)}
Details
=======
``eq`` should be an expression which is assumed to be zero.
``t`` is the parameter to be used in the solution.
Examples
========
>>> from sympy.abc import x, y, z
>>> diophantine(x**2 - y**2)
{(t_0, -t_0), (t_0, t_0)}
>>> diophantine(x*(2*x + 3*y - z))
{(0, n1, n2), (t_0, t_1, 2*t_0 + 3*t_1)}
>>> diophantine(x**2 + 3*x*y + 4*x)
{(0, n1), (3*t_0 - 4, -t_0)}
See Also
========
diop_solve()
sympy.utilities.iterables.permute_signs
sympy.utilities.iterables.signed_permutations
"""
from sympy.utilities.iterables import (
subsets, permute_signs, signed_permutations)
eq = _sympify(eq)
if isinstance(eq, Eq):
eq = eq.lhs - eq.rhs
try:
var = list(eq.expand(force=True).free_symbols)
var.sort(key=default_sort_key)
if syms:
if not is_sequence(syms):
raise TypeError(
'syms should be given as a sequence, e.g. a list')
syms = [i for i in syms if i in var]
if syms != var:
dict_sym_index = dict(zip(syms, range(len(syms))))
return {tuple([t[dict_sym_index[i]] for i in var])
for t in diophantine(eq, param, permute=permute)}
n, d = eq.as_numer_denom()
if n.is_number:
return set()
if not d.is_number:
dsol = diophantine(d)
good = diophantine(n) - dsol
return {s for s in good if _mexpand(d.subs(zip(var, s)))}
else:
eq = n
eq = factor_terms(eq)
assert not eq.is_number
eq = eq.as_independent(*var, as_Add=False)[1]
p = Poly(eq)
assert not any(g.is_number for g in p.gens)
eq = p.as_expr()
assert eq.is_polynomial()
except (GeneratorsNeeded, AssertionError):
raise TypeError(filldedent('''
Equation should be a polynomial with Rational coefficients.'''))
# permute only sign
do_permute_signs = False
# permute sign and values
do_permute_signs_var = False
# permute few signs
permute_few_signs = False
try:
# if we know that factoring should not be attempted, skip
# the factoring step
v, c, t = classify_diop(eq)
# check for permute sign
if permute:
len_var = len(v)
permute_signs_for = [
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name]
permute_signs_check = [
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
BinaryQuadratic.name]
if t in permute_signs_for:
do_permute_signs_var = True
elif t in permute_signs_check:
# if all the variables in eq have even powers
# then do_permute_sign = True
if len_var == 3:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y), (x, z), (y, z)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda a: a[0]*a[1], var_mul)
# if coeff(y*z), coeff(y*x), coeff(x*z) is not 0 then
# `xy_coeff` => True and do_permute_sign => False.
# Means no permuted solution.
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2, z**2, const is present
do_permute_signs = True
elif not x_coeff:
permute_few_signs = True
elif len_var == 2:
var_mul = list(subsets(v, 2))
# here var_mul is like [(x, y)]
xy_coeff = True
x_coeff = True
var1_mul_var2 = map(lambda x: x[0]*x[1], var_mul)
for v1_mul_v2 in var1_mul_var2:
try:
coeff = c[v1_mul_v2]
except KeyError:
coeff = 0
xy_coeff = bool(xy_coeff) and bool(coeff)
var_mul = list(subsets(v, 1))
# here var_mul is like [(x,), (y, )]
for v1 in var_mul:
try:
coeff = c[v1[0]]
except KeyError:
coeff = 0
x_coeff = bool(x_coeff) and bool(coeff)
if not any([xy_coeff, x_coeff]):
# means only x**2, y**2 and const is present
# so we can get more soln by permuting this soln.
do_permute_signs = True
elif not x_coeff:
# when coeff(x), coeff(y) is not present then signs of
# x, y can be permuted such that their sign are same
# as sign of x*y.
# e.g 1. (x_val,y_val)=> (x_val,y_val), (-x_val,-y_val)
# 2. (-x_vall, y_val)=> (-x_val,y_val), (x_val,-y_val)
permute_few_signs = True
if t == 'general_sum_of_squares':
# trying to factor such expressions will sometimes hang
terms = [(eq, 1)]
else:
raise TypeError
except (TypeError, NotImplementedError):
fl = factor_list(eq)
if fl[0].is_Rational and fl[0] != 1:
return diophantine(eq/fl[0], param=param, syms=syms, permute=permute)
terms = fl[1]
sols = set([])
for term in terms:
base, _ = term
var_t, _, eq_type = classify_diop(base, _dict=False)
_, base = signsimp(base, evaluate=False).as_coeff_Mul()
solution = diop_solve(base, param)
if eq_type in [
Linear.name,
HomogeneousTernaryQuadratic.name,
HomogeneousTernaryQuadraticNormal.name,
GeneralPythagorean.name]:
sols.add(merge_solution(var, var_t, solution))
elif eq_type in [
BinaryQuadratic.name,
GeneralSumOfSquares.name,
GeneralSumOfEvenPowers.name,
Univariate.name]:
for sol in solution:
sols.add(merge_solution(var, var_t, sol))
else:
raise NotImplementedError('unhandled type: %s' % eq_type)
# remove null merge results
if () in sols:
sols.remove(())
null = tuple([0]*len(var))
# if there is no solution, return trivial solution
if not sols and eq.subs(zip(var, null)).is_zero:
sols.add(null)
final_soln = set([])
for sol in sols:
if all(_is_int(s) for s in sol):
if do_permute_signs:
permuted_sign = set(permute_signs(sol))
final_soln.update(permuted_sign)
elif permute_few_signs:
lst = list(permute_signs(sol))
lst = list(filter(lambda x: x[0]*x[1] == sol[1]*sol[0], lst))
permuted_sign = set(lst)
final_soln.update(permuted_sign)
elif do_permute_signs_var:
permuted_sign_var = set(signed_permutations(sol))
final_soln.update(permuted_sign_var)
else:
final_soln.add(sol)
else:
final_soln.add(sol)
return final_soln
|
46,376 | def configure_buildah_container(container_name, working_dir=None, env_vars=None,
labels=None, annotations=None,
user=None, cmd=None, entrypoint=None,
ports=None, volumes=None,
debug=False):
"""
apply metadata on the container so they get inherited in an image
:param container_name: name of the container to work in
:param working_dir: str, path to a working directory within container image
:param labels: dict with labels
:param annotations: dict with annotations
:param env_vars: dict with env vars
:param cmd: str, command to run by default in the container
:param entrypoint: str, entrypoint script to configure for the container
:param user: str, username or uid; the container gets invoked with this user by default
:param ports: list of str, ports to expose from container by default
:param volumes: list of str; paths within the container which has data stored outside
of the container
:param debug: bool, make buildah print debug info
"""
config_args = []
if working_dir:
config_args += ["--workingdir", working_dir]
if env_vars:
for k, v in env_vars.items():
config_args += ["-e", "%s=%s" % (k, v)]
if labels:
for k, v in labels.items():
config_args += ["-l", "%s=%s" % (k, v)]
if annotations:
for k, v in annotations.items():
config_args += ["--annotation", "%s=%s" % (k, v)]
if user:
if os.getuid() != 0:
print("Setting user in rootless mode will create issues")
if os.getuid() == 0:
config_args += ["--user", user]
if cmd:
config_args += ["--cmd", cmd]
if entrypoint:
config_args += ["--entrypoint", entrypoint]
if ports:
for p in ports:
config_args += ["-p", p]
if volumes:
for v in volumes:
config_args += ["-v", v]
if config_args:
buildah("config", config_args + [container_name], debug=debug)
return container_name
| def configure_buildah_container(container_name, working_dir=None, env_vars=None,
labels=None, annotations=None,
user=None, cmd=None, entrypoint=None,
ports=None, volumes=None,
debug=False):
"""
apply metadata on the container so they get inherited in an image
:param container_name: name of the container to work in
:param working_dir: str, path to a working directory within container image
:param labels: dict with labels
:param annotations: dict with annotations
:param env_vars: dict with env vars
:param cmd: str, command to run by default in the container
:param entrypoint: str, entrypoint script to configure for the container
:param user: str, username or uid; the container gets invoked with this user by default
:param ports: list of str, ports to expose from container by default
:param volumes: list of str; paths within the container which has data stored outside
of the container
:param debug: bool, make buildah print debug info
"""
config_args = []
if working_dir:
config_args += ["--workingdir", working_dir]
if env_vars:
for k, v in env_vars.items():
config_args += ["-e", "%s=%s" % (k, v)]
if labels:
for k, v in labels.items():
config_args += ["-l", "%s=%s" % (k, v)]
if annotations:
for k, v in annotations.items():
config_args += ["--annotation", "%s=%s" % (k, v)]
if user:
if os.getuid() != 0:
print("Setting user in rootless mode will create issues")
else:
config_args += ["--user", user]
if cmd:
config_args += ["--cmd", cmd]
if entrypoint:
config_args += ["--entrypoint", entrypoint]
if ports:
for p in ports:
config_args += ["-p", p]
if volumes:
for v in volumes:
config_args += ["-v", v]
if config_args:
buildah("config", config_args + [container_name], debug=debug)
return container_name
|
29,117 | def _save_audio_file(
raw_audio_file, filename, entity_type, entity_id, user_id):
"""Saves the given audio file in file system.
Args:
raw_audio_file: *. The raw audio data.
filename: str. The filename of the audio.
entity_type: str. The type of entity to which the audio belongs.
entity_id: str. The id of the entity to which the audio belongs.
user_id: str. The ID of the user saving the audio.
Raises:
Exception: If audio not supplied.
Exception: If the filename extension is unsupported.
"""
allowed_formats = list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys())
if not raw_audio_file:
raise Exception('No audio supplied')
dot_index = filename.rfind('.')
extension = filename[dot_index + 1:].lower()
if dot_index == -1 or dot_index == 0:
raise Exception(
'No filename extension: it should have '
'one of the following extensions: %s' % allowed_formats)
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise Exception(
'Invalid filename extension: it should have '
'one of the following extensions: %s' % allowed_formats)
tempbuffer = python_utils.string_io()
tempbuffer.write(raw_audio_file)
tempbuffer.seek(0)
try:
# For every accepted extension, use the mutagen-specific
# constructor for that type. This will catch mismatched audio
# types e.g. uploading a flac file with an MP3 extension.
if extension == 'mp3':
audio = mp3.MP3(tempbuffer)
else:
audio = mutagen.File(tempbuffer)
except mutagen.MutagenError:
# The calls to mp3.MP3() versus mutagen.File() seem to behave
# differently upon not being able to interpret the audio.
# mp3.MP3() raises a MutagenError whereas mutagen.File()
# seems to return None. It's not clear if this is always
# the case. Occasionally, mutagen.File() also seems to
# raise a MutagenError.
raise Exception('Audio not recognized as a %s file' % extension)
tempbuffer.close()
if audio is None:
raise Exception('Audio not recognized as a %s file' % extension)
if audio.info.length > feconf.MAX_AUDIO_FILE_LENGTH_SEC:
raise Exception(
'Audio files must be under %s seconds in length. The uploaded '
'file is %.2f seconds long.' % (
feconf.MAX_AUDIO_FILE_LENGTH_SEC, audio.info.length))
if len(set(audio.mime).intersection(
set(feconf.ACCEPTED_AUDIO_EXTENSIONS[extension]))) == 0:
raise Exception(
'Although the filename extension indicates the file '
'is a %s file, it was not recognized as one. '
'Found mime types: %s' % (extension, audio.mime))
mimetype = audio.mime[0]
# For a strange, unknown reason, the audio variable must be
# deleted before opening cloud storage. If not, cloud storage
# throws a very mysterious error that entails a mutagen
# object being recursively passed around in app engine.
del audio
# Audio files are stored to the datastore in the dev env, and to GCS
# in production.
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(entity_type, entity_id))
fs.commit(user_id, 'audio/%s' % filename, raw_audio_file, mimetype=mimetype)
| def _save_audio_file(
raw_audio_file, filename, entity_type, entity_id, user_id):
"""Saves the given audio file in file system.
Args:
raw_audio_file: *. The raw audio data.
filename: str. The filename of the audio.
entity_type: str. The type of entity to which the audio belongs.
entity_id: str. The id of the entity to which the audio belongs.
user_id: str. The ID of the user saving the audio.
Raises:
Exception: If audio not supplied.
Exception: If the filename extension is unsupported.
"""
allowed_formats = list(feconf.ACCEPTED_AUDIO_EXTENSIONS.keys())
if not raw_audio_file:
raise Exception('No audio supplied')
dot_index = filename.rfind('.')
extension = filename[dot_index + 1:].lower()
if dot_index == -1 or dot_index == 0:
raise Exception(
'No filename extension provided. It should have '
'one of the following extensions: %s' % allowed_formats)
if extension not in feconf.ACCEPTED_AUDIO_EXTENSIONS:
raise Exception(
'Invalid filename extension: it should have '
'one of the following extensions: %s' % allowed_formats)
tempbuffer = python_utils.string_io()
tempbuffer.write(raw_audio_file)
tempbuffer.seek(0)
try:
# For every accepted extension, use the mutagen-specific
# constructor for that type. This will catch mismatched audio
# types e.g. uploading a flac file with an MP3 extension.
if extension == 'mp3':
audio = mp3.MP3(tempbuffer)
else:
audio = mutagen.File(tempbuffer)
except mutagen.MutagenError:
# The calls to mp3.MP3() versus mutagen.File() seem to behave
# differently upon not being able to interpret the audio.
# mp3.MP3() raises a MutagenError whereas mutagen.File()
# seems to return None. It's not clear if this is always
# the case. Occasionally, mutagen.File() also seems to
# raise a MutagenError.
raise Exception('Audio not recognized as a %s file' % extension)
tempbuffer.close()
if audio is None:
raise Exception('Audio not recognized as a %s file' % extension)
if audio.info.length > feconf.MAX_AUDIO_FILE_LENGTH_SEC:
raise Exception(
'Audio files must be under %s seconds in length. The uploaded '
'file is %.2f seconds long.' % (
feconf.MAX_AUDIO_FILE_LENGTH_SEC, audio.info.length))
if len(set(audio.mime).intersection(
set(feconf.ACCEPTED_AUDIO_EXTENSIONS[extension]))) == 0:
raise Exception(
'Although the filename extension indicates the file '
'is a %s file, it was not recognized as one. '
'Found mime types: %s' % (extension, audio.mime))
mimetype = audio.mime[0]
# For a strange, unknown reason, the audio variable must be
# deleted before opening cloud storage. If not, cloud storage
# throws a very mysterious error that entails a mutagen
# object being recursively passed around in app engine.
del audio
# Audio files are stored to the datastore in the dev env, and to GCS
# in production.
file_system_class = fs_services.get_entity_file_system_class()
fs = fs_domain.AbstractFileSystem(file_system_class(entity_type, entity_id))
fs.commit(user_id, 'audio/%s' % filename, raw_audio_file, mimetype=mimetype)
|
29,421 | def load(obj, env=None, silent=None, key=None):
"""Reads and loads in to "settings" a single key or all keys from vault
:param obj: the settings instance
:param env: settings env default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:return: None
"""
client = get_client(obj)
try:
if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
dirs = client.secrets.kv.v2.list_secrets(
path=obj.VAULT_PATH_FOR_DYNACONF,
mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
)["data"]["keys"]
else:
dirs = client.secrets.kv.v1.list_secrets(
path=obj.VAULT_PATH_FOR_DYNACONF,
mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
)["data"]["keys"]
except InvalidPath:
# The given path is not a directory
dirs = []
# First look for secrets into environments less store
if not obj.ENVIRONMENTS_FOR_DYNACONF:
# By adding '', dynaconf will now read secrets from environments-less
# store which are not written by `dynaconf write` to Vault store
env_list = [obj.MAIN_ENV_FOR_DYNACONF.lower(), ""]
# Finally, look for secret into all the environments
else:
env_list = dirs + build_env_list(obj, env)
for env in env_list:
path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, env])
try:
if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
data = client.secrets.kv.v2.read_secret_version(
path, mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF
)
else:
data = client.secrets.kv.read_secret(
"data/" + path,
mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
)
except InvalidPath:
# If the path doesn't exist, ignore it and set data to None
data = None
if data:
# There seems to be a data dict within a data dict,
# extract the inner data
data = data.get("data", {}).get("data", {})
try:
if data and key:
value = parse_conf_data(
data.get(key), tomlfy=True, box_settings=obj
)
if value:
obj.set(key, value)
elif data:
obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
except Exception:
if silent:
return False
raise
| def load(obj, env=None, silent=None, key=None):
"""Reads and loads in to "settings" a single key or all keys from vault
:param obj: the settings instance
:param env: settings env default='DYNACONF'
:param silent: if errors should raise
:param key: if defined load a single key, else load all in env
:return: None
"""
client = get_client(obj)
try:
if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
dirs = client.secrets.kv.v2.list_secrets(
path=obj.VAULT_PATH_FOR_DYNACONF,
mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
)["data"]["keys"]
else:
dirs = client.secrets.kv.v1.list_secrets(
path=obj.VAULT_PATH_FOR_DYNACONF,
mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
)["data"]["keys"]
except InvalidPath:
# The given path is not a directory
dirs = []
# First look for secrets into environments less store
if not obj.ENVIRONMENTS_FOR_DYNACONF:
# By adding '', dynaconf will now read secrets from environments-less
# store which are not written by `dynaconf write` to Vault store
env_list = [obj.MAIN_ENVIRONMENT_FOR_DYNACONF.lower(), ""]
# Finally, look for secret into all the environments
else:
env_list = dirs + build_env_list(obj, env)
for env in env_list:
path = "/".join([obj.VAULT_PATH_FOR_DYNACONF, env])
try:
if obj.VAULT_KV_VERSION_FOR_DYNACONF == 2:
data = client.secrets.kv.v2.read_secret_version(
path, mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF
)
else:
data = client.secrets.kv.read_secret(
"data/" + path,
mount_point=obj.VAULT_MOUNT_POINT_FOR_DYNACONF,
)
except InvalidPath:
# If the path doesn't exist, ignore it and set data to None
data = None
if data:
# There seems to be a data dict within a data dict,
# extract the inner data
data = data.get("data", {}).get("data", {})
try:
if data and key:
value = parse_conf_data(
data.get(key), tomlfy=True, box_settings=obj
)
if value:
obj.set(key, value)
elif data:
obj.update(data, loader_identifier=IDENTIFIER, tomlfy=True)
except Exception:
if silent:
return False
raise
|
34,836 | def _determine_model_name(
fixed_model_name: Optional[Text], training_type: TrainingType
) -> Text:
if fixed_model_name:
model_file = Path(fixed_model_name)
if not model_file.name.endswith(".tar.gz"):
return model_file.with_suffix(".tar.gz").name
return fixed_model_name
prefix = ""
if training_type in [TrainingType.CORE, TrainingType.NLU]:
prefix = f"{training_type.model_type}-"
time_format = "%Y%m%d-%H%M%S"
return f"{prefix}{time.strftime(time_format)}_{randomname.get_name()}.tar.gz"
| def _determine_model_name(
fixed_model_name: Optional[Text], training_type: TrainingType
) -> Text:
if fixed_model_name:
model_file = Path(fixed_model_name)
if not model_file.name.endswith(".tar.gz"):
return model_file.with_suffix(".tar.gz").name
return fixed_model_name
prefix = ""
if training_type in [TrainingType.CORE, TrainingType.NLU]:
prefix = f"{training_type.model_type}-"
time_format = "%Y%m%d-%H%M%S"
return f"{prefix}{time.strftime(time_format)}-{randomname.get_name()}.tar.gz"
|
40,118 | def create_meta_dict(fo: FileObject):
'''
Creates a dictionary with some fields from fo
'''
meta = {}
_add_firmware_only_fields(fo, meta)
_add_file_object_only_fields(fo, meta)
_add_general_information(fo, meta)
return meta
| def create_meta_dict(fo: FileObject):
'''
Creates a dictionary with the meta information contained in :class:`objects.file.FileObject` `fo`
'''
meta = {}
_add_firmware_only_fields(fo, meta)
_add_file_object_only_fields(fo, meta)
_add_general_information(fo, meta)
return meta
|
53,887 | def _get_valid_lonlats(vis: xr.DataArray) -> tuple[da.Array, da.Array]:
lons, lats = vis.attrs['area'].get_lonlats(chunks=vis.data.chunks)
lons = da.where(lons >= 1e30, np.nan, lons)
lats = da.where(lats >= 1e30, np.nan, lats)
return lons, lats
| def _get_valid_lonlats(data_arr: xr.DataArray) -> tuple[da.Array, da.Array]:
lons, lats = data_arr.attrs['area'].get_lonlats(chunks=data_arr.data.chunks)
lons = da.where(lons >= 1e30, np.nan, lons)
lats = da.where(lats >= 1e30, np.nan, lats)
return lons, lats
|
954 | def _solve_system(exprs, symbols, **flags):
if not exprs:
return False, []
if flags.pop('_split', True):
# Split the system into connected components
V = exprs
symsset = set(symbols)
exprsyms = {e: e.free_symbols & symsset for e in exprs}
E = []
sym_indices = {sym: i for i, sym in enumerate(symbols)}
for n, e1 in enumerate(exprs):
for e2 in exprs[:n]:
# Equations are connected if they share a symbol
if exprsyms[e1] & exprsyms[e2]:
E.append((e1, e2))
G = V, E
subexprs = connected_components(G)
if len(subexprs) > 1:
subsols = []
linear = True
for subexpr in subexprs:
subsyms = set()
for e in subexpr:
subsyms |= exprsyms[e]
subsyms = list(sorted(subsyms, key = lambda x: sym_indices[x]))
flags['_split'] = False # skip split step
_linear, subsol = _solve_system(subexpr, subsyms, **flags)
if linear:
linear = linear and _linear
if not isinstance(subsol, list):
subsol = [subsol]
subsols.append(subsol)
# Full solution is cartesion product of subsystems
sols = []
for soldicts in product(*subsols):
sols.append(dict(item for sd in soldicts
for item in sd.items()))
return linear, sols
polys = []
dens = set()
failed = []
result = False
linear = False
manual = flags.get('manual', False)
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(_simple_dens(g, symbols))
i, d = _invert(g, *symbols)
g = d - i
g = g.as_numer_denom()[0]
if manual:
failed.append(g)
continue
poly = g.as_poly(*symbols, extension=True)
if poly is not None:
polys.append(poly)
else:
failed.append(g)
if not polys:
solved_syms = []
else:
if all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary ({symbols: values}) or None
if flags.pop('particular', False):
result = minsolve_linear_system(matrix, *symbols, **flags)
else:
result = solve_linear_system(matrix, *symbols, **flags)
if failed:
if result:
solved_syms = list(result.keys())
else:
solved_syms = []
else:
linear = True
else:
if len(symbols) > len(polys):
free = set().union(*[p.free_symbols for p in polys])
free = list(ordered(free.intersection(symbols)))
got_s = set()
result = []
for syms in subsets(free, len(polys)):
try:
# returns [], None or list of tuples
res = solve_poly_system(polys, *syms)
if res:
for r in set(res):
skip = False
for r1 in r:
if got_s and any(ss in r1.free_symbols
for ss in got_s):
# sol depends on previously
# solved symbols: discard it
skip = True
if not skip:
got_s.update(syms)
result.append([dict(list(zip(syms, r)))])
except NotImplementedError:
pass
if got_s:
solved_syms = list(got_s)
else:
raise NotImplementedError('no valid subset found')
else:
try:
result = solve_poly_system(polys, *symbols)
if result:
solved_syms = symbols
# we don't know here if the symbols provided
# were given or not, so let solve resolve that.
# A list of dictionaries is going to always be
# returned from here.
result = [dict(list(zip(solved_syms, r))) for r in set(result)]
except NotImplementedError:
failed.extend([g.as_expr() for g in polys])
solved_syms = []
# convert None or [] to [{}]
result = result or [{}]
if failed:
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = e.free_symbols & legal
# Solve first for symbols that have lower degree in the equation.
# Ideally we want to solve firstly for symbols that appear linearly
# with rational coefficients e.g. if e = x*y + z then we should
# solve for z first.
def key(sym):
ep = e.as_poly(sym)
if ep is None:
complexity = (S.Infinity, S.Infinity, S.Infinity)
else:
coeff_syms = ep.LC().free_symbols
complexity = (ep.degree(), len(coeff_syms & rv), len(coeff_syms))
return complexity + (default_sort_key(sym),)
if sort:
rv = sorted(rv, key=key)
return rv
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
u = Dummy() # used in solution checking
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
newresult = []
bad_results = []
got_s = set()
hit = False
for r in result:
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, u, eq2, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
if r:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
try:
soln = _vsolve(eq2, s, **flags)
except NotImplementedError:
continue
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s is being added in-place
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs(s, sol)
# and add this new solution
rnew[s] = sol
# check that it is independent of previous solutions
iset = set(rnew.items())
for i in newresult:
if len(i) < len(iset) and not set(i.items()) - iset:
# this is a superset of a known solution that
# is smaller
break
else:
# keep it
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit:
raise NotImplementedError('could not solve %s' % eq2)
else:
result = newresult
for b in bad_results:
if b in result:
result.remove(b)
if not result:
return False, []
# rely on linear system solvers to simplify
default_simplify = bool(failed) or not linear
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and not linear:
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
result = [r for r in result if r]
return linear, result
| def _solve_system(exprs, symbols, **flags):
if not exprs:
return False, []
if flags.pop('_split', True):
# Split the system into connected components
V = exprs
symsset = set(symbols)
exprsyms = {e: e.free_symbols & symsset for e in exprs}
E = []
sym_indices = {sym: i for i, sym in enumerate(symbols)}
for n, e1 in enumerate(exprs):
for e2 in exprs[:n]:
# Equations are connected if they share a symbol
if exprsyms[e1] & exprsyms[e2]:
E.append((e1, e2))
G = V, E
subexprs = connected_components(G)
if len(subexprs) > 1:
subsols = []
linear = True
for subexpr in subexprs:
subsyms = set()
for e in subexpr:
subsyms |= exprsyms[e]
subsyms = list(sorted(subsyms, key = lambda x: sym_indices[x]))
flags['_split'] = False # skip split step
_linear, subsol = _solve_system(subexpr, subsyms, **flags)
if linear:
linear = linear and _linear
if not isinstance(subsol, list):
subsol = [subsol]
subsols.append(subsol)
# Full solution is cartesion product of subsystems
sols = []
for soldicts in product(*subsols):
sols.append(dict(item for sd in soldicts
for item in sd.items()))
return linear, sols
polys = []
dens = set()
failed = []
result = False
linear = False
manual = flags.get('manual', False)
checkdens = check = flags.get('check', True)
for j, g in enumerate(exprs):
dens.update(_simple_dens(g, symbols))
i, d = _invert(g, *symbols)
g = d - i
g = g.as_numer_denom()[0]
if manual:
failed.append(g)
continue
poly = g.as_poly(*symbols, extension=True)
if poly is not None:
polys.append(poly)
else:
failed.append(g)
if not polys:
solved_syms = []
else:
if all(p.is_linear for p in polys):
n, m = len(polys), len(symbols)
matrix = zeros(n, m + 1)
for i, poly in enumerate(polys):
for monom, coeff in poly.terms():
try:
j = monom.index(1)
matrix[i, j] = coeff
except ValueError:
matrix[i, m] = -coeff
# returns a dictionary ({symbols: values}) or None
if flags.pop('particular', False):
result = minsolve_linear_system(matrix, *symbols, **flags)
else:
result = solve_linear_system(matrix, *symbols, **flags)
result = [result] if result else []
if failed:
if result:
solved_syms = list(result.keys())
else:
solved_syms = []
else:
linear = True
else:
if len(symbols) > len(polys):
free = set().union(*[p.free_symbols for p in polys])
free = list(ordered(free.intersection(symbols)))
got_s = set()
result = []
for syms in subsets(free, len(polys)):
try:
# returns [], None or list of tuples
res = solve_poly_system(polys, *syms)
if res:
for r in set(res):
skip = False
for r1 in r:
if got_s and any(ss in r1.free_symbols
for ss in got_s):
# sol depends on previously
# solved symbols: discard it
skip = True
if not skip:
got_s.update(syms)
result.append([dict(list(zip(syms, r)))])
except NotImplementedError:
pass
if got_s:
solved_syms = list(got_s)
else:
raise NotImplementedError('no valid subset found')
else:
try:
result = solve_poly_system(polys, *symbols)
if result:
solved_syms = symbols
# we don't know here if the symbols provided
# were given or not, so let solve resolve that.
# A list of dictionaries is going to always be
# returned from here.
result = [dict(list(zip(solved_syms, r))) for r in set(result)]
except NotImplementedError:
failed.extend([g.as_expr() for g in polys])
solved_syms = []
# convert None or [] to [{}]
result = result or [{}]
if failed:
# For each failed equation, see if we can solve for one of the
# remaining symbols from that equation. If so, we update the
# solution set and continue with the next failed equation,
# repeating until we are done or we get an equation that can't
# be solved.
def _ok_syms(e, sort=False):
rv = e.free_symbols & legal
# Solve first for symbols that have lower degree in the equation.
# Ideally we want to solve firstly for symbols that appear linearly
# with rational coefficients e.g. if e = x*y + z then we should
# solve for z first.
def key(sym):
ep = e.as_poly(sym)
if ep is None:
complexity = (S.Infinity, S.Infinity, S.Infinity)
else:
coeff_syms = ep.LC().free_symbols
complexity = (ep.degree(), len(coeff_syms & rv), len(coeff_syms))
return complexity + (default_sort_key(sym),)
if sort:
rv = sorted(rv, key=key)
return rv
legal = set(symbols) # what we are interested in
# sort so equation with the fewest potential symbols is first
u = Dummy() # used in solution checking
for eq in ordered(failed, lambda _: len(_ok_syms(_))):
newresult = []
bad_results = []
got_s = set()
hit = False
for r in result:
# update eq with everything that is known so far
eq2 = eq.subs(r)
# if check is True then we see if it satisfies this
# equation, otherwise we just accept it
if check and r:
b = checksol(u, u, eq2, minimal=True)
if b is not None:
# this solution is sufficient to know whether
# it is valid or not so we either accept or
# reject it, then continue
if b:
newresult.append(r)
else:
bad_results.append(r)
continue
# search for a symbol amongst those available that
# can be solved for
ok_syms = _ok_syms(eq2, sort=True)
if not ok_syms:
if r:
newresult.append(r)
break # skip as it's independent of desired symbols
for s in ok_syms:
try:
soln = _vsolve(eq2, s, **flags)
except NotImplementedError:
continue
# put each solution in r and append the now-expanded
# result in the new result list; use copy since the
# solution for s is being added in-place
for sol in soln:
if got_s and any(ss in sol.free_symbols for ss in got_s):
# sol depends on previously solved symbols: discard it
continue
rnew = r.copy()
for k, v in r.items():
rnew[k] = v.subs(s, sol)
# and add this new solution
rnew[s] = sol
# check that it is independent of previous solutions
iset = set(rnew.items())
for i in newresult:
if len(i) < len(iset) and not set(i.items()) - iset:
# this is a superset of a known solution that
# is smaller
break
else:
# keep it
newresult.append(rnew)
hit = True
got_s.add(s)
if not hit:
raise NotImplementedError('could not solve %s' % eq2)
else:
result = newresult
for b in bad_results:
if b in result:
result.remove(b)
if not result:
return False, []
# rely on linear system solvers to simplify
default_simplify = bool(failed) or not linear
if flags.get('simplify', default_simplify):
for r in result:
for k in r:
r[k] = simplify(r[k])
flags['simplify'] = False # don't need to do so in checksol now
if checkdens:
result = [r for r in result
if not any(checksol(d, r, **flags) for d in dens)]
if check and not linear:
result = [r for r in result
if not any(checksol(e, r, **flags) is False for e in exprs)]
result = [r for r in result if r]
return linear, result
|
54,591 | def runner_asserter(inp, ref_subject, method, basis, tnm, scramble, frame):
qcprog = inp["call"]
qc_module_in = inp["qc_module"] # returns "<qcprog>"|"<qcprog>-<module>" # input-specified routing
qc_module_xptd = (
(qcprog + "-" + inp["xptd"]["qc_module"]) if inp.get("xptd", {}).get("qc_module", None) else None
) # expected routing
driver = inp["driver"]
reference = inp["reference"]
fcae = inp["fcae"]
if basis == "cfour-qz2p" and qcprog in ["gamess", "nwchem", "qchem"]:
pytest.skip(f"basis {basis} not available in {qcprog} library")
# <<< Molecule >>>
# 1. ref mol: `ref_subject` nicely oriented mol taken from standard_suite_ref.py
min_nonzero_coords = np.count_nonzero(np.abs(ref_subject.geometry) > 1.0e-10)
if scramble is None:
subject = ref_subject
ref2in_mill = compute_scramble(
len(subject.symbols), do_resort=False, do_shift=False, do_rotate=False, do_mirror=False
) # identity AlignmentMill
else:
subject, data = ref_subject.scramble(**scramble, do_test=False)
ref2in_mill = data["mill"]
# 2. input mol: `subject` now ready for `atin.molecule`. may have been scrambled away from nice ref orientation
# <<< Reference Values >>>
# ? precedence on these types -- not really applicable to qcng
mp2_type = inp.get("corl_type", inp["keywords"].get("mp2_type", "df")) # hard-code of read_options.cc MP2_TYPE
mp_type = inp.get("corl_type", inp["keywords"].get("mp_type", "conv")) # hard-code of read_options.cc MP_TYPE
ci_type = inp.get("corl_type", inp["keywords"].get("ci_type", "conv")) # hard-code of read_options.cc CI_TYPE
cc_type = inp.get("corl_type", inp["keywords"].get("cc_type", "conv")) # hard-code of read_options.cc CC_TYPE
corl_natural_values = {
"hf": "conv", # dummy to assure df/cd/conv scf_type refs available
"mp2": mp2_type,
"mp3": mp_type,
"mp4(sdq)": mp_type,
"mp4": mp_type,
"cisd": ci_type,
"qcisd": ci_type,
"qcisd(t)": ci_type,
"lccd": cc_type,
"lccsd": cc_type,
"ccd": cc_type,
"ccsd": cc_type,
"ccsd+t(ccsd)": cc_type,
"ccsd(t)": cc_type,
"a-ccsd(t)": cc_type,
"ccsdt-1a": cc_type,
"ccsdt-1b": cc_type,
"ccsdt-2": cc_type,
"ccsdt-3": cc_type,
"ccsdt": cc_type,
"ccsdt(q)": cc_type,
"ccsdtq": cc_type,
"pbe": "conv",
"b3lyp": "conv",
"b3lyp5": "conv",
}
corl_type = corl_natural_values[method]
natural_ref = {"conv": "pk", "df": "df", "cd": "cd"}
scf_type = inp["keywords"].get("scf_type", natural_ref[corl_type])
natural_values = {"pk": "pk", "direct": "pk", "df": "df", "mem_df": "df", "disk_df": "df", "cd": "cd"}
scf_type = natural_values[scf_type]
is_dft = method in ["pbe", "b3lyp", "b3lyp5"]
atol_e, rtol_e = 2.0e-7, 1.0e-16
atol_g, rtol_g = 5.0e-7, 2.0e-5
atol_h, rtol_h = 1.0e-5, 2.0e-5
if is_dft:
atol_g = 6.0e-6
chash = answer_hash(
system=subject.name,
basis=basis,
fcae=fcae,
scf_type=scf_type,
reference=reference,
corl_type=corl_type,
)
ref_block = std_suite[chash]
# check all calcs against conventional reference to looser tolerance
atol_conv = 1.0e-4
rtol_conv = 1.0e-3
chash_conv = answer_hash(
system=subject.name,
basis=basis,
fcae=fcae,
reference=reference,
corl_type="conv",
scf_type="pk",
)
ref_block_conv = std_suite[chash_conv]
# <<< Prepare Calculation and Call API >>>
atin = AtomicInput(
**{
"molecule": subject,
"driver": driver,
"model": {
"method": method,
"basis": inp.get("basis", "(auto)"),
},
"keywords": inp["keywords"],
}
)
local_options = {}
# local_options = {"nnodes": 1, "ncores": 1} # debug
if "error" in inp:
errtype, errmatch, reason = inp["error"]
with pytest.raises(errtype) as e:
qcng.compute(atin, qcprog, raise_error=True, return_dict=True, local_options=local_options)
assert re.search(errmatch, str(e.value)), f"Not found: {errtype} '{errmatch}' in {e.value}"
# _recorder(qcprog, qc_module_in, driver, method, reference, fcae, scf_type, corl_type, "error", "nyi: " + reason)
return
wfn = qcng.compute(atin, qcprog, raise_error=True, local_options=local_options)
print("WFN")
pp.pprint(wfn.dict())
qc_module_out = wfn.provenance.creator.lower()
if hasattr(wfn.provenance, "module"):
qc_module_out += "-" + wfn.provenance.module # returns "<qcprog>-<module>"
# assert 0, f"{qc_module_xptd=} {qc_module_in=} {qc_module_out=}" # debug
# 3. output mol: `wfn.molecule` after calc. orientation for nonscalar quantities may be different from `subject` if fix_=False
_, data = ref_subject.align(wfn.molecule, atoms_map=False, mols_align=True, verbose=0)
ref2out_mill = data["mill"]
if subject.fix_com and subject.fix_orientation:
with np.printoptions(precision=3, suppress=True):
assert compare_values(
subject.geometry, wfn.molecule.geometry, atol=5.0e-8
), f"coords: atres ({wfn.molecule.geometry}) != atin ({subject.geometry})" # 10 too much
assert (
ref_subject.fix_com
and ref_subject.fix_orientation
and subject.fix_com
and subject.fix_orientation
and wfn.molecule.fix_com
and wfn.molecule.fix_orientation
), f"fixed, so all T: {ref_subject.fix_com} {ref_subject.fix_orientation} {subject.fix_com} {subject.fix_orientation} {wfn.molecule.fix_com} {wfn.molecule.fix_orientation}"
ref_block = mill_qcvars(ref2in_mill, ref_block)
ref_block_conv = mill_qcvars(ref2in_mill, ref_block_conv)
else:
# this check assumes the qcprog will adjust an ugly Cartesian geometry into a pretty one (with more symmetry for computational efficiency).
# if qcprog doesn't have that behavior, it will need to be excused from this check.
with np.printoptions(precision=3, suppress=True):
assert compare(
min_nonzero_coords, np.count_nonzero(np.abs(wfn.molecule.geometry) > 1.0e-10), tnm + " !0 coords wfn"
), f"count !0 coords {wfn.molecule.geometry} != {min_nonzero_coords}"
assert (
(not ref_subject.fix_com)
and (not ref_subject.fix_orientation)
and (not subject.fix_com)
and (not subject.fix_orientation)
and (not wfn.molecule.fix_com)
and (not wfn.molecule.fix_orientation)
), f"free, so all F: {ref_subject.fix_com} {ref_subject.fix_orientation} {subject.fix_com} {subject.fix_orientation} {wfn.molecule.fix_com} {wfn.molecule.fix_orientation}"
ref_block = mill_qcvars(ref2out_mill, ref_block)
ref_block_conv = mill_qcvars(ref2out_mill, ref_block_conv)
# <<< Comparison Tests >>>
assert wfn.success is True
if qc_module_in != qcprog:
assert qc_module_out == qc_module_in, f"QC_MODULE used ({qc_module_out}) != requested ({qc_module_in})"
if qc_module_xptd:
assert qc_module_out == qc_module_xptd, f"QC_MODULE used ({qc_module_out}) != expected ({qc_module_xptd})"
# qcvars
contractual_args = [
qc_module_out,
driver,
reference,
method,
corl_type,
fcae,
]
asserter_args = [
[wfn.extras["qcvars"], wfn.properties],
ref_block,
[atol_e, atol_g, atol_h],
[rtol_e, rtol_g, rtol_h],
ref_block_conv,
atol_conv,
rtol_conv,
tnm,
]
def qcvar_assertions():
print("BLOCK", chash, contractual_args)
if method == "hf":
_asserter(asserter_args, contractual_args, contractual_hf)
elif method == "mp2":
_asserter(asserter_args, contractual_args, contractual_mp2)
elif method == "mp3":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
elif method == "mp4(sdq)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
_asserter(asserter_args, contractual_args, contractual_mp4_prsdq_pr)
elif method == "mp4":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
_asserter(asserter_args, contractual_args, contractual_mp4_prsdq_pr)
_asserter(asserter_args, contractual_args, contractual_mp4)
elif method == "cisd":
_asserter(asserter_args, contractual_args, contractual_cisd)
elif method == "qcisd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_qcisd)
elif method == "qcisd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_qcisd)
_asserter(asserter_args, contractual_args, contractual_qcisd_prt_pr)
elif method == "lccd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_lccd)
elif method == "lccsd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_lccsd)
elif method == "ccd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccd)
elif method == "ccsd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
elif method == "ccsd+t(ccsd)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_ccsdpt_prccsd_pr)
elif method == "ccsd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_ccsd_prt_pr)
elif method == "a-ccsd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_accsd_prt_pr)
elif method == "ccsdt-1a":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt1a)
elif method == "ccsdt-1b":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt1b)
elif method == "ccsdt-2":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt2)
elif method == "ccsdt-3":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt3)
elif method == "ccsdt":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt)
elif method == "ccsdt(q)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt)
_asserter(asserter_args, contractual_args, contractual_ccsdt_prq_pr)
elif method == "ccsdtq":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdtq)
# separations here for DFT appropriate when qcvars are labeled by functional
if "wrong" in inp:
errmatch, reason = inp["wrong"]
with pytest.raises(AssertionError) as e:
qcvar_assertions()
assert errmatch in str(e.value), f"Not found: AssertionError '{errmatch}' for '{reason}' in {e.value}"
# _recorder(qcprog, qc_module_out, driver, method, reference, fcae, scf_type, corl_type, "wrong", reason + f" First wrong at `{errmatch}`.")
pytest.xfail(reason)
# primary label checks
qcvar_assertions()
# aliases checks
asserter_args[0].pop() # checks not appropriate for properties
if is_dft:
_asserter(asserter_args, contractual_args, contractual_dft_current)
else:
_asserter(asserter_args, contractual_args, contractual_current)
# returns checks
if driver == "energy":
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn.return_result,
tnm + " wfn",
atol=atol_e,
rtol=rtol_e,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"], wfn.return_result, tnm + " wfn", atol=atol_e, rtol=rtol_e
), errmsg
elif driver == "gradient":
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn.return_result,
tnm + " grad wfn",
atol=atol_g,
rtol=rtol_g,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn.return_result,
tnm + " grad wfn",
atol=atol_g,
rtol=rtol_g,
), errmsg
elif driver == "hessian":
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"],
wfn.return_result,
tnm + " hess wfn",
atol=atol_h,
rtol=rtol_h,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"], wfn.return_result, tnm + " hess wfn", atol=atol_h, rtol=rtol_h
), errmsg
if driver in ["energy", "gradient", "hessian"]:
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn.properties.return_energy,
tnm + " prop",
atol=atol_e,
rtol=rtol_e,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn.properties.return_energy,
tnm + " prop",
atol=atol_e,
rtol=rtol_e,
), errmsg
if driver in ["gradient"]: # , "hessian"]:
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn.properties.return_gradient,
tnm + " grad prop",
atol=atol_g,
rtol=rtol_g,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn.properties.return_gradient,
tnm + " grad prop",
atol=atol_g,
rtol=rtol_g,
), errmsg
if driver == "hessian":
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"],
wfn.properties.return_hessian,
tnm + " hess prop",
atol=atol_h,
rtol=rtol_h,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"],
wfn.properties.return_hessian,
tnm + " hess prop",
atol=atol_h,
rtol=rtol_h,
), errmsg
# generics
# yapf: disable
assert compare(ref_block["N BASIS FUNCTIONS"], wfn.properties.calcinfo_nbasis, tnm + " nbasis wfn"), f"nbasis {wfn.properties.calcinfo_nbasis} != {ref_block['N BASIS FUNCTIONS']}"
assert compare(ref_block["N MOLECULAR ORBITALS"], wfn.properties.calcinfo_nmo, tnm + " nmo wfn"), f"nmo {wfn.properties.calcinfo_nmo} != {ref_block['N MOLECULAR ORBITALS']}"
assert compare(ref_block["N ALPHA ELECTRONS"], wfn.properties.calcinfo_nalpha, tnm + " nalpha wfn"), f"nalpha {wfn.properties.calcinfo_nalpha} != {ref_block['N ALPHA ELECTRONS']}"
assert compare(ref_block["N BETA ELECTRONS"], wfn.properties.calcinfo_nbeta, tnm + " nbeta wfn"), f"nbeta {wfn.properties.calcinfo_nbeta} != {ref_block['N BETA ELECTRONS']}"
# yapf: enable
| def runner_asserter(inp, ref_subject, method, basis, tnm, scramble, frame):
qcprog = inp["call"]
qc_module_in = inp["qc_module"] # returns "<qcprog>"|"<qcprog>-<module>" # input-specified routing
qc_module_xptd = (
(qcprog + "-" + inp["xptd"]["qc_module"]) if inp.get("xptd", {}).get("qc_module", None) else None
) # expected routing
driver = inp["driver"]
reference = inp["reference"]
fcae = inp["fcae"]
if basis == "cfour-qz2p" and qcprog in ["gamess", "nwchem", "qchem"]:
pytest.skip(f"basis {basis} not available in {qcprog} library")
# <<< Molecule >>>
# 1. ref mol: `ref_subject` nicely oriented mol taken from standard_suite_ref.py
min_nonzero_coords = np.count_nonzero(np.abs(ref_subject.geometry) > 1.0e-10)
if scramble is None:
subject = ref_subject
ref2in_mill = compute_scramble(
len(subject.symbols), do_resort=False, do_shift=False, do_rotate=False, do_mirror=False
) # identity AlignmentMill
else:
subject, data = ref_subject.scramble(**scramble, do_test=False)
ref2in_mill = data["mill"]
# 2. input mol: `subject` now ready for `atin.molecule`. may have been scrambled away from nice ref orientation
# <<< Reference Values >>>
# ? precedence on these types -- not really applicable to qcng
mp2_type = inp.get("corl_type", inp["keywords"].get("mp2_type", "df")) # hard-code of read_options.cc MP2_TYPE
mp_type = inp.get("corl_type", inp["keywords"].get("mp_type", "conv")) # hard-code of read_options.cc MP_TYPE
ci_type = inp.get("corl_type", inp["keywords"].get("ci_type", "conv")) # hard-code of read_options.cc CI_TYPE
cc_type = inp.get("corl_type", inp["keywords"].get("cc_type", "conv")) # hard-code of read_options.cc CC_TYPE
corl_natural_values = {
"hf": "conv", # dummy to assure df/cd/conv scf_type refs available
"mp2": mp2_type,
"mp3": mp_type,
"mp4(sdq)": mp_type,
"mp4": mp_type,
"cisd": ci_type,
"qcisd": ci_type,
"qcisd(t)": ci_type,
"lccd": cc_type,
"lccsd": cc_type,
"ccd": cc_type,
"ccsd": cc_type,
"ccsd+t(ccsd)": cc_type,
"ccsd(t)": cc_type,
"a-ccsd(t)": cc_type,
"ccsdt-1a": cc_type,
"ccsdt-1b": cc_type,
"ccsdt-2": cc_type,
"ccsdt-3": cc_type,
"ccsdt": cc_type,
"ccsdt(q)": cc_type,
"ccsdtq": cc_type,
"pbe": "conv",
"b3lyp": "conv",
"b3lyp5": "conv",
}
corl_type = corl_natural_values[method]
natural_ref = {"conv": "pk", "df": "df", "cd": "cd"}
scf_type = inp["keywords"].get("scf_type", natural_ref[corl_type])
natural_values = {"pk": "pk", "direct": "pk", "df": "df", "mem_df": "df", "disk_df": "df", "cd": "cd"}
scf_type = natural_values[scf_type]
is_dft = method in ["pbe", "b3lyp", "b3lyp5"]
atol_e, rtol_e = 2.0e-7, 1.0e-16
atol_g, rtol_g = 5.0e-7, 2.0e-5
atol_h, rtol_h = 1.0e-5, 2.0e-5
if is_dft:
atol_g = 6.0e-6
chash = answer_hash(
system=subject.name,
basis=basis,
fcae=fcae,
scf_type=scf_type,
reference=reference,
corl_type=corl_type,
)
ref_block = std_suite[chash]
# check all calcs against conventional reference to looser tolerance
atol_conv = 1.0e-4
rtol_conv = 1.0e-3
chash_conv = answer_hash(
system=subject.name,
basis=basis,
fcae=fcae,
reference=reference,
corl_type="conv",
scf_type="pk",
)
ref_block_conv = std_suite[chash_conv]
# <<< Prepare Calculation and Call API >>>
atin = AtomicInput(
**{
"molecule": subject,
"driver": driver,
"model": {
"method": method,
"basis": inp.get("basis", "(auto)"),
},
"keywords": inp["keywords"],
}
)
# local_options = {} # debug with default (parallel)
local_options = {"nnodes": 1, "ncores": 1} # run serial by default
if "error" in inp:
errtype, errmatch, reason = inp["error"]
with pytest.raises(errtype) as e:
qcng.compute(atin, qcprog, raise_error=True, return_dict=True, local_options=local_options)
assert re.search(errmatch, str(e.value)), f"Not found: {errtype} '{errmatch}' in {e.value}"
# _recorder(qcprog, qc_module_in, driver, method, reference, fcae, scf_type, corl_type, "error", "nyi: " + reason)
return
wfn = qcng.compute(atin, qcprog, raise_error=True, local_options=local_options)
print("WFN")
pp.pprint(wfn.dict())
qc_module_out = wfn.provenance.creator.lower()
if hasattr(wfn.provenance, "module"):
qc_module_out += "-" + wfn.provenance.module # returns "<qcprog>-<module>"
# assert 0, f"{qc_module_xptd=} {qc_module_in=} {qc_module_out=}" # debug
# 3. output mol: `wfn.molecule` after calc. orientation for nonscalar quantities may be different from `subject` if fix_=False
_, data = ref_subject.align(wfn.molecule, atoms_map=False, mols_align=True, verbose=0)
ref2out_mill = data["mill"]
if subject.fix_com and subject.fix_orientation:
with np.printoptions(precision=3, suppress=True):
assert compare_values(
subject.geometry, wfn.molecule.geometry, atol=5.0e-8
), f"coords: atres ({wfn.molecule.geometry}) != atin ({subject.geometry})" # 10 too much
assert (
ref_subject.fix_com
and ref_subject.fix_orientation
and subject.fix_com
and subject.fix_orientation
and wfn.molecule.fix_com
and wfn.molecule.fix_orientation
), f"fixed, so all T: {ref_subject.fix_com} {ref_subject.fix_orientation} {subject.fix_com} {subject.fix_orientation} {wfn.molecule.fix_com} {wfn.molecule.fix_orientation}"
ref_block = mill_qcvars(ref2in_mill, ref_block)
ref_block_conv = mill_qcvars(ref2in_mill, ref_block_conv)
else:
# this check assumes the qcprog will adjust an ugly Cartesian geometry into a pretty one (with more symmetry for computational efficiency).
# if qcprog doesn't have that behavior, it will need to be excused from this check.
with np.printoptions(precision=3, suppress=True):
assert compare(
min_nonzero_coords, np.count_nonzero(np.abs(wfn.molecule.geometry) > 1.0e-10), tnm + " !0 coords wfn"
), f"count !0 coords {wfn.molecule.geometry} != {min_nonzero_coords}"
assert (
(not ref_subject.fix_com)
and (not ref_subject.fix_orientation)
and (not subject.fix_com)
and (not subject.fix_orientation)
and (not wfn.molecule.fix_com)
and (not wfn.molecule.fix_orientation)
), f"free, so all F: {ref_subject.fix_com} {ref_subject.fix_orientation} {subject.fix_com} {subject.fix_orientation} {wfn.molecule.fix_com} {wfn.molecule.fix_orientation}"
ref_block = mill_qcvars(ref2out_mill, ref_block)
ref_block_conv = mill_qcvars(ref2out_mill, ref_block_conv)
# <<< Comparison Tests >>>
assert wfn.success is True
if qc_module_in != qcprog:
assert qc_module_out == qc_module_in, f"QC_MODULE used ({qc_module_out}) != requested ({qc_module_in})"
if qc_module_xptd:
assert qc_module_out == qc_module_xptd, f"QC_MODULE used ({qc_module_out}) != expected ({qc_module_xptd})"
# qcvars
contractual_args = [
qc_module_out,
driver,
reference,
method,
corl_type,
fcae,
]
asserter_args = [
[wfn.extras["qcvars"], wfn.properties],
ref_block,
[atol_e, atol_g, atol_h],
[rtol_e, rtol_g, rtol_h],
ref_block_conv,
atol_conv,
rtol_conv,
tnm,
]
def qcvar_assertions():
print("BLOCK", chash, contractual_args)
if method == "hf":
_asserter(asserter_args, contractual_args, contractual_hf)
elif method == "mp2":
_asserter(asserter_args, contractual_args, contractual_mp2)
elif method == "mp3":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
elif method == "mp4(sdq)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
_asserter(asserter_args, contractual_args, contractual_mp4_prsdq_pr)
elif method == "mp4":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
_asserter(asserter_args, contractual_args, contractual_mp4_prsdq_pr)
_asserter(asserter_args, contractual_args, contractual_mp4)
elif method == "cisd":
_asserter(asserter_args, contractual_args, contractual_cisd)
elif method == "qcisd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_qcisd)
elif method == "qcisd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_qcisd)
_asserter(asserter_args, contractual_args, contractual_qcisd_prt_pr)
elif method == "lccd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_lccd)
elif method == "lccsd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_lccsd)
elif method == "ccd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccd)
elif method == "ccsd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
elif method == "ccsd+t(ccsd)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_ccsdpt_prccsd_pr)
elif method == "ccsd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_ccsd_prt_pr)
elif method == "a-ccsd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_accsd_prt_pr)
elif method == "ccsdt-1a":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt1a)
elif method == "ccsdt-1b":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt1b)
elif method == "ccsdt-2":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt2)
elif method == "ccsdt-3":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt3)
elif method == "ccsdt":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt)
elif method == "ccsdt(q)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt)
_asserter(asserter_args, contractual_args, contractual_ccsdt_prq_pr)
elif method == "ccsdtq":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdtq)
# separations here for DFT appropriate when qcvars are labeled by functional
if "wrong" in inp:
errmatch, reason = inp["wrong"]
with pytest.raises(AssertionError) as e:
qcvar_assertions()
assert errmatch in str(e.value), f"Not found: AssertionError '{errmatch}' for '{reason}' in {e.value}"
# _recorder(qcprog, qc_module_out, driver, method, reference, fcae, scf_type, corl_type, "wrong", reason + f" First wrong at `{errmatch}`.")
pytest.xfail(reason)
# primary label checks
qcvar_assertions()
# aliases checks
asserter_args[0].pop() # checks not appropriate for properties
if is_dft:
_asserter(asserter_args, contractual_args, contractual_dft_current)
else:
_asserter(asserter_args, contractual_args, contractual_current)
# returns checks
if driver == "energy":
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn.return_result,
tnm + " wfn",
atol=atol_e,
rtol=rtol_e,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"], wfn.return_result, tnm + " wfn", atol=atol_e, rtol=rtol_e
), errmsg
elif driver == "gradient":
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn.return_result,
tnm + " grad wfn",
atol=atol_g,
rtol=rtol_g,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn.return_result,
tnm + " grad wfn",
atol=atol_g,
rtol=rtol_g,
), errmsg
elif driver == "hessian":
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"],
wfn.return_result,
tnm + " hess wfn",
atol=atol_h,
rtol=rtol_h,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"], wfn.return_result, tnm + " hess wfn", atol=atol_h, rtol=rtol_h
), errmsg
if driver in ["energy", "gradient", "hessian"]:
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn.properties.return_energy,
tnm + " prop",
atol=atol_e,
rtol=rtol_e,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL ENERGY"],
wfn.properties.return_energy,
tnm + " prop",
atol=atol_e,
rtol=rtol_e,
), errmsg
if driver in ["gradient"]: # , "hessian"]:
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn.properties.return_gradient,
tnm + " grad prop",
atol=atol_g,
rtol=rtol_g,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL GRADIENT"],
wfn.properties.return_gradient,
tnm + " grad prop",
atol=atol_g,
rtol=rtol_g,
), errmsg
if driver == "hessian":
tf, errmsg = compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"],
wfn.properties.return_hessian,
tnm + " hess prop",
atol=atol_h,
rtol=rtol_h,
return_message=True,
quiet=True,
)
assert compare_values(
ref_block[f"{method.upper()} TOTAL HESSIAN"],
wfn.properties.return_hessian,
tnm + " hess prop",
atol=atol_h,
rtol=rtol_h,
), errmsg
# generics
# yapf: disable
assert compare(ref_block["N BASIS FUNCTIONS"], wfn.properties.calcinfo_nbasis, tnm + " nbasis wfn"), f"nbasis {wfn.properties.calcinfo_nbasis} != {ref_block['N BASIS FUNCTIONS']}"
assert compare(ref_block["N MOLECULAR ORBITALS"], wfn.properties.calcinfo_nmo, tnm + " nmo wfn"), f"nmo {wfn.properties.calcinfo_nmo} != {ref_block['N MOLECULAR ORBITALS']}"
assert compare(ref_block["N ALPHA ELECTRONS"], wfn.properties.calcinfo_nalpha, tnm + " nalpha wfn"), f"nalpha {wfn.properties.calcinfo_nalpha} != {ref_block['N ALPHA ELECTRONS']}"
assert compare(ref_block["N BETA ELECTRONS"], wfn.properties.calcinfo_nbeta, tnm + " nbeta wfn"), f"nbeta {wfn.properties.calcinfo_nbeta} != {ref_block['N BETA ELECTRONS']}"
# yapf: enable
|
46,576 | def invalid_cases():
rng = Random(1234)
for (name, (typ, offsets)) in PRESET_CONTAINERS.items():
# using mode_max_count, so that the extra byte cannot be picked up as normal list content
yield f'{name}_extra_byte', \
invalid_test_case(lambda: serialize(
container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff')
if len(offsets) != 0:
# Note: there are many more ways to have invalid offsets,
# these are just example to get clients started looking into hardening ssz.
for mode in [RandomizationMode.mode_random,
RandomizationMode.mode_nil_count,
RandomizationMode.mode_one_count,
RandomizationMode.mode_max_count]:
if len(offsets) != 0:
for index, offset_index in enumerate(offsets):
yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x + 1
))
yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: 0
))
if index == 0:
yield f'{name}_{mode.to_name()}_first offset_{offset_index}_minus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x - 1
))
if mode == RandomizationMode.mode_max_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:2]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_overflow', \
invalid_test_case(lambda: serialized)
if mode == RandomizationMode.mode_one_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:1]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_wrong_byte_length', \
invalid_test_case(lambda: serialized)
| def invalid_cases():
rng = Random(1234)
for (name, (typ, offsets)) in PRESET_CONTAINERS.items():
# using mode_max_count, so that the extra byte cannot be picked up as normal list content
yield f'{name}_extra_byte', \
invalid_test_case(lambda: serialize(
container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff')
if len(offsets) != 0:
# Note: there are many more ways to have invalid offsets,
# these are just example to get clients started looking into hardening ssz.
for mode in [RandomizationMode.mode_random,
RandomizationMode.mode_nil_count,
RandomizationMode.mode_one_count,
RandomizationMode.mode_max_count]:
if len(offsets) != 0:
for index, offset_index in enumerate(offsets):
yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x + 1
))
yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: 0
))
if index == 0:
yield f'{name}_{mode.to_name()}_first offset_{offset_index}_minus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x - 1
))
if mode == RandomizationMode.mode_max_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:2]
yield f'{name}_{mode.to_name()}_last_offset_{offset_index}_overflow', \
invalid_test_case(lambda: serialized)
if mode == RandomizationMode.mode_one_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:1]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_wrong_byte_length', \
invalid_test_case(lambda: serialized)
|
35,278 | def initialize_decomposition(tensor_slices, rank, init='random', svd='numpy_svd', random_state=None):
r"""Initiate a random PARAFAC2 decomposition given rank and tensor slices
Parameters
----------
tensor_slices : Iterable of ndarray
rank : int
init : {'random', 'svd', KruskalTensor, Parafac2Tensor}, optional
random_state : `np.random.RandomState`
Returns
-------
parafac2_tensor : Parafac2Tensor
List of initialized factors of the CP decomposition where element `i`
is of shape (tensor.shape[i], rank)
"""
context = tl.context(tensor_slices[0])
shapes = [m.shape for m in tensor_slices]
if init == 'random':
return random_parafac2(shapes, rank, full=False, random_state=random_state,
**context)
elif init == 'svd':
try:
svd_fun = tl.SVD_FUNS[svd]
except KeyError:
message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
svd, tl.get_backend(), tl.SVD_FUNS)
raise ValueError(message)
padded_tensor = _pad_by_zeros(tensor_slices)
A = T.ones((padded_tensor.shape[0], rank), **context)
unfolded_mode_2 = unfold(padded_tensor, 2)
if T.shape(unfolded_mode_2)[0] < rank:
raise ValueError("Cannot perform SVD init if rank ({}) is less than the number of columns in each tensor slice ({})".format(
rank, T.shape(unfolded_mode_2)[0]
))
C = svd_fun(unfold(padded_tensor, 2), n_eigenvecs=rank)[0]
B = T.eye(rank, **context)
projections = _compute_projections(tensor_slices, (A, B, C), svd_fun)
return Parafac2Tensor((None, [A, B, C], projections))
elif isinstance(init, (tuple, list, Parafac2Tensor, KruskalTensor)):
try:
decomposition = Parafac2Tensor.from_kruskaltensor(init, parafac2_tensor_ok=True)
except ValueError:
raise ValueError(
'If initialization method is a mapping, then it must '
'be possible to convert it to a Parafac2Tensor instance'
)
if decomposition.rank != rank:
raise ValueError('Cannot init with a decomposition of different rank')
return decomposition
raise ValueError('Initialization method "{}" not recognized'.format(init))
| def initialize_decomposition(tensor_slices, rank, init='random', svd='numpy_svd', random_state=None):
r"""Initiate a random PARAFAC2 decomposition given rank and tensor slices
Parameters
----------
tensor_slices : Iterable of ndarray
rank : int
init : {'random', 'svd', KruskalTensor, Parafac2Tensor}, optional
random_state : `np.random.RandomState`
Returns
-------
parafac2_tensor : Parafac2Tensor
List of initialized factors of the CP decomposition where element `i`
is of shape (tensor.shape[i], rank)
"""
context = tl.context(tensor_slices[0])
shapes = [m.shape for m in tensor_slices]
if init == 'random':
return random_parafac2(shapes, rank, full=False, random_state=random_state,
**context)
elif init == 'svd':
try:
svd_fun = tl.SVD_FUNS[svd]
except KeyError:
message = 'Got svd={}. However, for the current backend ({}), the possible choices are {}'.format(
svd, tl.get_backend(), tl.SVD_FUNS)
raise ValueError(message)
padded_tensor = _pad_by_zeros(tensor_slices)
A = T.ones((padded_tensor.shape[0], rank), **context)
unfolded_mode_2 = unfold(padded_tensor, 2)
if T.shape(unfolded_mode_2)[0] < rank:
raise ValueError("Cannot perform SVD init if rank ({}) is greater than the number of columns in each tensor slice ({})".format(
rank, T.shape(unfolded_mode_2)[0]
))
C = svd_fun(unfold(padded_tensor, 2), n_eigenvecs=rank)[0]
B = T.eye(rank, **context)
projections = _compute_projections(tensor_slices, (A, B, C), svd_fun)
return Parafac2Tensor((None, [A, B, C], projections))
elif isinstance(init, (tuple, list, Parafac2Tensor, KruskalTensor)):
try:
decomposition = Parafac2Tensor.from_kruskaltensor(init, parafac2_tensor_ok=True)
except ValueError:
raise ValueError(
'If initialization method is a mapping, then it must '
'be possible to convert it to a Parafac2Tensor instance'
)
if decomposition.rank != rank:
raise ValueError('Cannot init with a decomposition of different rank')
return decomposition
raise ValueError('Initialization method "{}" not recognized'.format(init))
|
1,685 | def _yield_transformer_checks(name, transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if not _safe_tags(transformer, "no_validation"):
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
yield check_transformer_general
# it's not important to preserve types with Clustering
if not isinstance(transformer, ClusterMixin):
yield check_estimators_preserve_dtypes
yield partial(check_transformer_general, readonly_memmap=True)
if not _safe_tags(transformer, "stateless"):
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
| def _yield_transformer_checks(name, transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if not _safe_tags(transformer, "no_validation"):
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
yield check_transformer_general
# it's not possible to preserve dtypes in transform with clustering
if not isinstance(transformer, ClusterMixin):
yield check_estimators_preserve_dtypes
yield partial(check_transformer_general, readonly_memmap=True)
if not _safe_tags(transformer, "stateless"):
yield check_transformers_unfitted
# Dependent on external solvers and hence accessing the iter
# param is non-trivial.
external_solver = ['Isomap', 'KernelPCA', 'LocallyLinearEmbedding',
'RandomizedLasso', 'LogisticRegressionCV']
if name not in external_solver:
yield check_transformer_n_iter
|
57,881 | def create_member(args):
try:
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
accountDetails = []
account = {'AccountId': args.get('accountId'), 'Email': args.get('email')}
accountDetails.append(account)
response = client.create_members(
DetectorId=args.get('detectorId'),
AccountDetails=accountDetails
)
unprocessed_accounts = response.get('UnprocessedAccounts', [])
ec = {"AWS.GuardDuty.CreateMember.UnprocessedAccounts": unprocessed_accounts} \
if unprocessed_accounts else None
return create_entry('AWS GuardDuty Create Member', unprocessed_accounts, ec)
except Exception as e:
return raise_error(e)
| def create_member(args):
try:
client = aws_session(
region=args.get('region'),
roleArn=args.get('roleArn'),
roleSessionName=args.get('roleSessionName'),
roleSessionDuration=args.get('roleSessionDuration'),
)
accountDetails = []
account = {'AccountId': args.get('accountId'), 'Email': args.get('email')}
accountDetails.append(account)
response = client.create_members(
DetectorId=args.get('detectorId'),
AccountDetails= account_details
)
unprocessed_accounts = response.get('UnprocessedAccounts', [])
ec = {"AWS.GuardDuty.CreateMember.UnprocessedAccounts": unprocessed_accounts} \
if unprocessed_accounts else None
return create_entry('AWS GuardDuty Create Member', unprocessed_accounts, ec)
except Exception as e:
return raise_error(e)
|
6,383 | def _check_agent_availability(agent_email,scheduled_time):
appointemnts_at_scheduled_time = frappe.get_list('Appointment', filters={'scheduled_time':scheduled_time})
for appointment in appointemnts_at_scheduled_time:
if appointment._assign == agent_email:
return False
return True
| def _check_agent_availability(agent_email,scheduled_time):
appointments_at_scheduled_time = frappe.get_list('Appointment', filters={'scheduled_time': scheduled_time})
for appointment in appointemnts_at_scheduled_time:
if appointment._assign == agent_email:
return False
return True
|
57,342 | def _ffmpeg_call(infile, output, fmt='f32le', sample_rate=None, num_channels=1,
skip=None, max_len=None, cmd='ffmpeg',
rg_mode=None, rg_preamp_db=0.0):
"""
Create a sequence of strings indicating ffmpeg how to be called as well as
the parameters necessary to decode the given input (file) to the given
format, at the given offset and for the given length to the given output.
Parameters
----------
infile : str
Name of the audio sound file to decode.
output : str
Where to decode to.
fmt : {'f32le', 's16le'}, optional
Format of the samples:
- 'f32le' for float32, little-endian,
- 's16le' for signed 16-bit int, little-endian.
sample_rate : int, optional
Sample rate to re-sample the signal to (if set) [Hz].
num_channels : int, optional
Number of channels to reduce the signal to.
skip : float, optional
Number of seconds to skip at beginning of file.
max_len : float, optional
Maximum length in seconds to decode.
cmd : {'ffmpeg','avconv'}, optional
Decoding command (defaults to ffmpeg, alternatively supports avconv).
rg_mode : {'track','album', None}, optional
Specify the ReplayGain volume-levelling mode (None to disable).
rg_preamp_db : float, optional
Increase the volume by this many dB after applying ReplayGain tags.
Returns
-------
list
ffmpeg call.
Notes
-----
'avconv' rounds decoding positions and decodes in blocks of 4096 length
resulting in incorrect start and stop positions. Thus it should only be
used to decode complete files.
"""
# Note: avconv rounds decoding positions and decodes in blocks of 4096
# length resulting in incorrect start and stop positions
if cmd == 'avconv' and skip is not None and max_len is not None:
raise RuntimeError('avconv has a bug, which results in wrong audio '
'slices! Decode the audio files to .wav first or '
'use ffmpeg.')
# input type handling
if isinstance(infile, Signal):
in_fmt = _ffmpeg_fmt(infile.dtype)
in_ac = str(int(infile.num_channels))
in_ar = str(int(infile.sample_rate))
infile = str("pipe:0")
else:
infile = str(infile)
# general options
call = [cmd, "-v", "quiet", "-y"]
# input options
if skip:
# use "%f" to avoid scientific float notation
call.extend(["-ss", "%f" % float(skip)])
# if we decode from STDIN, the format must be specified
if infile == "pipe:0":
call.extend(["-f", in_fmt, "-ac", in_ac, "-ar", in_ar])
call.extend(["-i", infile])
if rg_mode:
audio_filter = ("volume=replaygain=%s:replaygain_preamp=%.1f"
% (rg_mode, rg_preamp_db))
call.extend(["-af", audio_filter])
# output options
call.extend(["-f", str(fmt)])
if max_len:
# use "%f" to avoid scientific float notation
call.extend(["-t", "%f" % float(max_len)])
# output options
if num_channels:
call.extend(["-ac", str(int(num_channels))])
if sample_rate:
call.extend(["-ar", str(int(sample_rate))])
call.append(output)
return call
| def _ffmpeg_call(infile, output, fmt='f32le', sample_rate=None, num_channels=1,
skip=None, max_len=None, cmd='ffmpeg',
rg_mode=None, rg_preamp_db=0.0):
"""
Create a sequence of strings indicating ffmpeg how to be called as well as
the parameters necessary to decode the given input (file) to the given
format, at the given offset and for the given length to the given output.
Parameters
----------
infile : str
Name of the audio sound file to decode.
output : str
Where to decode to.
fmt : {'f32le', 's16le'}, optional
Format of the samples:
- 'f32le' for float32, little-endian,
- 's16le' for signed 16-bit int, little-endian.
sample_rate : int, optional
Sample rate to re-sample the signal to (if set) [Hz].
num_channels : int, optional
Number of channels to reduce the signal to.
skip : float, optional
Number of seconds to skip at beginning of file.
max_len : float, optional
Maximum length in seconds to decode.
cmd : {'ffmpeg','avconv'}, optional
Decoding command (defaults to ffmpeg, alternatively supports avconv).
rg_mode : {'track','album', None}, optional
Specify the ReplayGain volume-levelling mode (None to disable).
rg_preamp_db : float, optional
replaygain_preamp : float, optional
ReplayGain preamp volume change level (in dB).
Returns
-------
list
ffmpeg call.
Notes
-----
'avconv' rounds decoding positions and decodes in blocks of 4096 length
resulting in incorrect start and stop positions. Thus it should only be
used to decode complete files.
"""
# Note: avconv rounds decoding positions and decodes in blocks of 4096
# length resulting in incorrect start and stop positions
if cmd == 'avconv' and skip is not None and max_len is not None:
raise RuntimeError('avconv has a bug, which results in wrong audio '
'slices! Decode the audio files to .wav first or '
'use ffmpeg.')
# input type handling
if isinstance(infile, Signal):
in_fmt = _ffmpeg_fmt(infile.dtype)
in_ac = str(int(infile.num_channels))
in_ar = str(int(infile.sample_rate))
infile = str("pipe:0")
else:
infile = str(infile)
# general options
call = [cmd, "-v", "quiet", "-y"]
# input options
if skip:
# use "%f" to avoid scientific float notation
call.extend(["-ss", "%f" % float(skip)])
# if we decode from STDIN, the format must be specified
if infile == "pipe:0":
call.extend(["-f", in_fmt, "-ac", in_ac, "-ar", in_ar])
call.extend(["-i", infile])
if rg_mode:
audio_filter = ("volume=replaygain=%s:replaygain_preamp=%.1f"
% (rg_mode, rg_preamp_db))
call.extend(["-af", audio_filter])
# output options
call.extend(["-f", str(fmt)])
if max_len:
# use "%f" to avoid scientific float notation
call.extend(["-t", "%f" % float(max_len)])
# output options
if num_channels:
call.extend(["-ac", str(int(num_channels))])
if sample_rate:
call.extend(["-ar", str(int(sample_rate))])
call.append(output)
return call
|
4,401 | def test_exclude():
"""Test exclude parameter."""
exclude = ["I1", "I2", "I3", "I4"] # list of excluded channels
raw = read_raw_edf(edf_path, exclude=["I1", "I2", "I3", "I4"])
for ch in exclude:
assert ch not in raw.ch_names
raw = read_raw_edf(edf_path, exclude = "I[1-4]")
for ch in exclude:
assert ch not in raw.ch_names
| def test_exclude():
"""Test exclude parameter."""
exclude = ["I1", "I2", "I3", "I4"] # list of excluded channels
raw = read_raw_edf(edf_path, exclude=["I1", "I2", "I3", "I4"])
for ch in exclude:
assert ch not in raw.ch_names
raw = read_raw_edf(edf_path, exclude="I[1-4]")
for ch in exclude:
assert ch not in raw.ch_names
|
23,458 | def check_kite_installers_availability():
"""Check if Kite installers are available."""
url = LINUX_URL
if os.name == 'os':
url = WINDOWS_URL
elif sys.platform == 'darwin':
url = MAC_URL
req = requests.head(url)
available = req.ok
if req.ok:
if req.is_redirect:
loc = req.headers['Location']
req = requests.head(loc)
available = req.ok
return available
| def check_kite_installers_availability():
"""Check if Kite installers are available."""
url = LINUX_URL
if os.name == 'nt':
url = WINDOWS_URL
elif sys.platform == 'darwin':
url = MAC_URL
req = requests.head(url)
available = req.ok
if req.ok:
if req.is_redirect:
loc = req.headers['Location']
req = requests.head(loc)
available = req.ok
return available
|
13,573 | def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
| def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-ew.real)
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
37,828 | def main() -> None:
platform: PlatformName
parser = argparse.ArgumentParser(
description="Build wheels for all the platforms.",
epilog="""
Most options are supplied via environment variables or in
--config-file (pyproject.toml usually). See
https://github.com/pypa/cibuildwheel#options for info.
""",
)
parser.add_argument(
"--platform",
choices=["auto", "linux", "macos", "windows"],
default=os.environ.get("CIBW_PLATFORM", "auto"),
help="""
Platform to build for. Use this option to override the
auto-detected platform or to run cibuildwheel on your development
machine. Specifying "macos" or "windows" only works on that
operating system, but "linux" works on all three, as long as
Docker is installed. Default: auto.
""",
)
arch_list_str = ", ".join(a.name for a in Architecture)
parser.add_argument(
"--archs",
default=None,
help=f"""
Comma-separated list of CPU architectures to build for.
When set to 'auto', builds the architectures natively supported
on this machine. Set this option to build an architecture
via emulation, for example, using binfmt_misc and QEMU.
Default: auto.
Choices: auto, auto64, auto32, native, all, {arch_list_str}
""",
)
parser.add_argument(
"--output-dir",
help="Destination folder for the wheels. Default: wheelhouse.",
)
parser.add_argument(
"--config-file",
default="",
help="""
TOML config file. Default: "", meaning {package}/pyproject.toml,
if it exists.
""",
)
parser.add_argument(
"package_dir",
default=".",
nargs="?",
help="""
Path to the package that you want wheels for. Must be either a
a subdirectory of the working directory or a source distribution
(sdist) archive file. Note that the working directory is always the
'project' directory and is what is copied into the Docker container
on Linux.
Default: the working directory.
""",
)
parser.add_argument(
"--print-build-identifiers",
action="store_true",
help="Print the build identifiers matched by the current invocation and exit.",
)
parser.add_argument(
"--allow-empty",
action="store_true",
help="Do not report an error code if the build does not match any wheels.",
)
parser.add_argument(
"--prerelease-pythons",
action="store_true",
help="Enable pre-release Python versions if available.",
)
args = parser.parse_args(namespace=CommandLineArguments())
if args.platform != "auto":
platform = args.platform
else:
ci_provider = detect_ci_provider()
if ci_provider is None:
print(
textwrap.dedent(
"""
cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server;
Travis CI, AppVeyor, Azure Pipelines, GitHub Actions, CircleCI, and Gitlab are
supported. You can run on your development machine or other CI providers using the
--platform argument. Check --help output for more information.
"""
),
file=sys.stderr,
)
sys.exit(2)
if sys.platform.startswith("linux"):
platform = "linux"
elif sys.platform == "darwin":
platform = "macos"
elif sys.platform == "win32":
platform = "windows"
else:
print(
'cibuildwheel: Unable to detect platform from "sys.platform" in a CI environment. You can run '
"cibuildwheel using the --platform argument. Check --help output for more information.",
file=sys.stderr,
)
sys.exit(2)
if platform not in PLATFORMS:
print(f"cibuildwheel: Unsupported platform: {platform}", file=sys.stderr)
sys.exit(2)
options = compute_options(platform=platform, command_line_arguments=args)
package_dir = options.globals.package_dir
from_sdist = package_dir.is_file()
from_dir = not from_sdist
if from_dir:
package_files = {"setup.py", "setup.cfg", "pyproject.toml"}
if not any(package_dir.joinpath(name).exists() for name in package_files):
names = ", ".join(sorted(package_files, reverse=True))
msg = f"cibuildwheel: Could not find any of {{{names}}} at root of package"
print(msg, file=sys.stderr)
sys.exit(2)
elif from_sdist:
if not str(package_dir).endswith(".tar.gz"):
msg = f"cibuildwheel: Package is not an sdist'.tar.gz' tarball"
print(msg, file=sys.stderr)
sys.exit(2)
identifiers = get_build_identifiers(
platform=platform,
build_selector=options.globals.build_selector,
architectures=options.globals.architectures,
)
if args.print_build_identifiers:
for identifier in identifiers:
print(identifier)
sys.exit(0)
# Add CIBUILDWHEEL environment variable
# This needs to be passed on to the docker container in linux.py
os.environ["CIBUILDWHEEL"] = "1"
# Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print'
sys.stdout = Unbuffered(sys.stdout) # type: ignore[assignment]
# create the cache dir before it gets printed & builds performed
CIBW_CACHE_PATH.mkdir(parents=True, exist_ok=True)
print_preamble(platform=platform, options=options, identifiers=identifiers)
try:
options.check_for_invalid_configuration(identifiers)
allowed_architectures_check(platform, options.globals.architectures)
except ValueError as err:
print("cibuildwheel:", *err.args, file=sys.stderr)
sys.exit(4)
if not identifiers:
print(
f"cibuildwheel: No build identifiers selected: {options.globals.build_selector}",
file=sys.stderr,
)
if not args.allow_empty:
sys.exit(3)
output_dir = options.globals.output_dir
if not output_dir.exists():
output_dir.mkdir(parents=True)
tmp_path = Path(mkdtemp(prefix="cibw-run-")).resolve(strict=True)
try:
with cibuildwheel.util.print_new_wheels(
"\n{n} wheels produced in {m:.0f} minutes:", output_dir
):
if platform == "linux":
cibuildwheel.linux.build(options, tmp_path)
elif platform == "windows":
cibuildwheel.windows.build(options, tmp_path)
elif platform == "macos":
cibuildwheel.macos.build(options, tmp_path)
else:
assert_never(platform)
finally:
shutil.rmtree(tmp_path, ignore_errors=sys.platform.startswith("win"))
if tmp_path.exists():
log.warning(f"Can't delete temporary folder '{str(tmp_path)}'")
| def main() -> None:
platform: PlatformName
parser = argparse.ArgumentParser(
description="Build wheels for all the platforms.",
epilog="""
Most options are supplied via environment variables or in
--config-file (pyproject.toml usually). See
https://github.com/pypa/cibuildwheel#options for info.
""",
)
parser.add_argument(
"--platform",
choices=["auto", "linux", "macos", "windows"],
default=os.environ.get("CIBW_PLATFORM", "auto"),
help="""
Platform to build for. Use this option to override the
auto-detected platform or to run cibuildwheel on your development
machine. Specifying "macos" or "windows" only works on that
operating system, but "linux" works on all three, as long as
Docker is installed. Default: auto.
""",
)
arch_list_str = ", ".join(a.name for a in Architecture)
parser.add_argument(
"--archs",
default=None,
help=f"""
Comma-separated list of CPU architectures to build for.
When set to 'auto', builds the architectures natively supported
on this machine. Set this option to build an architecture
via emulation, for example, using binfmt_misc and QEMU.
Default: auto.
Choices: auto, auto64, auto32, native, all, {arch_list_str}
""",
)
parser.add_argument(
"--output-dir",
help="Destination folder for the wheels. Default: wheelhouse.",
)
parser.add_argument(
"--config-file",
default="",
help="""
TOML config file. Default: "", meaning {package}/pyproject.toml,
if it exists.
""",
)
parser.add_argument(
"package_dir",
default=".",
nargs="?",
help="""
Path to the package that you want wheels for. Must be either a
a subdirectory of the working directory or a source distribution
(sdist) archive file. Note that the working directory is always the
'project' directory and is what is copied into the Docker container
on Linux.
Default: the working directory.
""",
)
parser.add_argument(
"--print-build-identifiers",
action="store_true",
help="Print the build identifiers matched by the current invocation and exit.",
)
parser.add_argument(
"--allow-empty",
action="store_true",
help="Do not report an error code if the build does not match any wheels.",
)
parser.add_argument(
"--prerelease-pythons",
action="store_true",
help="Enable pre-release Python versions if available.",
)
args = parser.parse_args(namespace=CommandLineArguments())
if args.platform != "auto":
platform = args.platform
else:
ci_provider = detect_ci_provider()
if ci_provider is None:
print(
textwrap.dedent(
"""
cibuildwheel: Unable to detect platform. cibuildwheel should run on your CI server;
Travis CI, AppVeyor, Azure Pipelines, GitHub Actions, CircleCI, and Gitlab are
supported. You can run on your development machine or other CI providers using the
--platform argument. Check --help output for more information.
"""
),
file=sys.stderr,
)
sys.exit(2)
if sys.platform.startswith("linux"):
platform = "linux"
elif sys.platform == "darwin":
platform = "macos"
elif sys.platform == "win32":
platform = "windows"
else:
print(
'cibuildwheel: Unable to detect platform from "sys.platform" in a CI environment. You can run '
"cibuildwheel using the --platform argument. Check --help output for more information.",
file=sys.stderr,
)
sys.exit(2)
if platform not in PLATFORMS:
print(f"cibuildwheel: Unsupported platform: {platform}", file=sys.stderr)
sys.exit(2)
options = compute_options(platform=platform, command_line_arguments=args)
package_dir = options.globals.package_dir
from_sdist = package_dir.is_file()
from_dir = not from_sdist
if from_dir:
package_files = {"setup.py", "setup.cfg", "pyproject.toml"}
if not any(package_dir.joinpath(name).exists() for name in package_files):
names = ", ".join(sorted(package_files, reverse=True))
msg = f"cibuildwheel: Could not find any of {{{names}}} at root of package"
print(msg, file=sys.stderr)
sys.exit(2)
elif from_sdist:
if not str(package_dir).endswith(".tar.gz"):
msg = f"cibuildwheel: Package {package_dir} is not an sdist '.tar.gz' tarball"
print(msg, file=sys.stderr)
sys.exit(2)
identifiers = get_build_identifiers(
platform=platform,
build_selector=options.globals.build_selector,
architectures=options.globals.architectures,
)
if args.print_build_identifiers:
for identifier in identifiers:
print(identifier)
sys.exit(0)
# Add CIBUILDWHEEL environment variable
# This needs to be passed on to the docker container in linux.py
os.environ["CIBUILDWHEEL"] = "1"
# Python is buffering by default when running on the CI platforms, giving problems interleaving subprocess call output with unflushed calls to 'print'
sys.stdout = Unbuffered(sys.stdout) # type: ignore[assignment]
# create the cache dir before it gets printed & builds performed
CIBW_CACHE_PATH.mkdir(parents=True, exist_ok=True)
print_preamble(platform=platform, options=options, identifiers=identifiers)
try:
options.check_for_invalid_configuration(identifiers)
allowed_architectures_check(platform, options.globals.architectures)
except ValueError as err:
print("cibuildwheel:", *err.args, file=sys.stderr)
sys.exit(4)
if not identifiers:
print(
f"cibuildwheel: No build identifiers selected: {options.globals.build_selector}",
file=sys.stderr,
)
if not args.allow_empty:
sys.exit(3)
output_dir = options.globals.output_dir
if not output_dir.exists():
output_dir.mkdir(parents=True)
tmp_path = Path(mkdtemp(prefix="cibw-run-")).resolve(strict=True)
try:
with cibuildwheel.util.print_new_wheels(
"\n{n} wheels produced in {m:.0f} minutes:", output_dir
):
if platform == "linux":
cibuildwheel.linux.build(options, tmp_path)
elif platform == "windows":
cibuildwheel.windows.build(options, tmp_path)
elif platform == "macos":
cibuildwheel.macos.build(options, tmp_path)
else:
assert_never(platform)
finally:
shutil.rmtree(tmp_path, ignore_errors=sys.platform.startswith("win"))
if tmp_path.exists():
log.warning(f"Can't delete temporary folder '{str(tmp_path)}'")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.