id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
30,726 | def item_to_incident(item):
dt_string = item.get('last_updated')
dz_string = dt_string.split('.')[0] + "Z"
incident = {
'Type': 'CTS Incident',
'name': '{}'.format(item.get('name')),
'occurred': dz_string,
'shortdesc': item.get('shortdesc_md'),
'shorttext': item.get('shortdesc'),
'longdescmd': item.get('longdesc_md'),
'eventid': item.get('sha'),
'rawJSON': json.dumps(item),
'CTS': {'DeviceName': item.get('devicephysical'),
'Confidence': item.get('faereconfidence'),
'Severity': item.get('faereseverity')}
}
return incident
| def item_to_incident(item):
dt_string = item.get('last_updated', '')
dz_string = dt_string.split('.')[0] + "Z"
incident = {
'Type': 'CTS Incident',
'name': '{}'.format(item.get('name')),
'occurred': dz_string,
'shortdesc': item.get('shortdesc_md'),
'shorttext': item.get('shortdesc'),
'longdescmd': item.get('longdesc_md'),
'eventid': item.get('sha'),
'rawJSON': json.dumps(item),
'CTS': {'DeviceName': item.get('devicephysical'),
'Confidence': item.get('faereconfidence'),
'Severity': item.get('faereseverity')}
}
return incident
|
31,231 | def resume_scans_command():
"""Function for integration command resume_scans_command."""
scan_ids = str(demisto.getArg('scanIds')).split(",")
results = []
for scan_id in scan_ids:
scan_details = send_scan_request(scan_id)
scan_status = {
'Id': scan_id,
'Status': scan_details['info']['status']
}
if scan_status["Status"].lower() == "paused":
send_scan_request(scan_id, "resume", "POST")
resumed_scan = {
"Id": scan_id,
"Status": "Resuming"
}
results.append(get_entry_for_object("The requested scan was resumed successfully",
"TenableIO.Scan", replace_keys(resumed_scan), ["Id", "Status"]))
else:
results.append(
"Command 'tenable-io-resume-scans' cannot be "
"called while scan status is {} for scanID {}".format(scan_status["Status"], scan_id))
return results
| def resume_scans_command():
"""Function for integration command resume_scans_command."""
scan_ids = str(demisto.getArg('scanIds')).split(",")
results = []
for scan_id in scan_ids:
scan_details = send_scan_request(scan_id)
scan_status = {
'Id': scan_id,
'Status': scan_details.get('info').get('status')
}
if scan_status["Status"].lower() == "paused":
send_scan_request(scan_id, "resume", "POST")
resumed_scan = {
"Id": scan_id,
"Status": "Resuming"
}
results.append(get_entry_for_object("The requested scan was resumed successfully",
"TenableIO.Scan", replace_keys(resumed_scan), ["Id", "Status"]))
else:
results.append(
"Command 'tenable-io-resume-scans' cannot be "
"called while scan status is {} for scanID {}".format(scan_status["Status"], scan_id))
return results
|
13,767 | def is_course_app_enabled(course_key: CourseKey, app_id: str) -> bool:
"""
Return if the app with the specified `app_id` is enable for the
specified course.
Args:
course_key (CourseKey): Course key for course
app_id (str): The app id for a course app
Returns:
True or False depending on if the course app is enabled or not.
"""
course_app = CourseAppsPluginManager.get_plugin(app_id)
is_enabled = course_app.default_enabled
# For existing apps that doesn't support the new mechanism of enabling/disabling
# we delegate to the app to know whether it's enabled or disabled.
obj, _ = CourseAppStatus.objects.get_or_create(
course_key=course_key, app_id=app_id, defaults={"enabled": is_enabled}
)
if hasattr(course_app, "is_enabled"):
# If the app has a method to override `is_enabled` use that value and cache it in the database.
is_enabled = course_app.is_enabled(course_key)
if not obj.enabled == is_enabled:
obj.enabled = is_enabled
obj.save()
return obj.enabled
| def is_course_app_enabled(course_key: CourseKey, app_id: str) -> bool:
"""
Return if the app with the specified `app_id` is enabled for the
specified course.
Args:
course_key (CourseKey): Course key for course
app_id (str): The app id for a course app
Returns:
True or False depending on if the course app is enabled or not.
"""
course_app = CourseAppsPluginManager.get_plugin(app_id)
is_enabled = course_app.default_enabled
# For existing apps that doesn't support the new mechanism of enabling/disabling
# we delegate to the app to know whether it's enabled or disabled.
obj, _ = CourseAppStatus.objects.get_or_create(
course_key=course_key, app_id=app_id, defaults={"enabled": is_enabled}
)
if hasattr(course_app, "is_enabled"):
# If the app has a method to override `is_enabled` use that value and cache it in the database.
is_enabled = course_app.is_enabled(course_key)
if not obj.enabled == is_enabled:
obj.enabled = is_enabled
obj.save()
return obj.enabled
|
43,988 | def generalized_singles(wires, delta_sz):
r"""Return generalized single excitation terms
.. math::
\hat{T_1} = \sum_{pq} t_{p}^{q} \hat{c}^{\dagger}_q \hat{c}_p
"""
sz = np.array(
[0.5 if (i % 2 == 0) else -0.5 for i in range(len(wires))]
) # alpha-beta electrons
gen_singles_wires = [
wires[r : p + 1] if r < p else wires[p : r + 1][::-1] # wires for [wire[r], wire[p]] terms
for r in range(len(wires))
for p in range(len(wires))
if sz[p] - sz[r] == delta_sz # selection rules for spin projection
and p != r # remove redundant terms
]
return gen_singles_wires
| def generalized_singles(wires, delta_sz):
r"""Return generalized single excitation terms
.. math::
\hat{T_1} = \sum_{pq} t_{p}^{q} \hat{c}^{\dagger}_{q} \hat{c}_{p}
"""
sz = np.array(
[0.5 if (i % 2 == 0) else -0.5 for i in range(len(wires))]
) # alpha-beta electrons
gen_singles_wires = [
wires[r : p + 1] if r < p else wires[p : r + 1][::-1] # wires for [wire[r], wire[p]] terms
for r in range(len(wires))
for p in range(len(wires))
if sz[p] - sz[r] == delta_sz # selection rules for spin projection
and p != r # remove redundant terms
]
return gen_singles_wires
|
40,855 | def training_pipeline(batch_size,
batch_type="examples",
batch_multiplier=1,
batch_size_multiple=1,
process_fn=None,
transform_fns=[],
length_bucket_width=None,
features_length_fn=None,
labels_length_fn=None,
maximum_features_length=None,
maximum_labels_length=None,
single_pass=False,
num_shards=1,
shard_index=0,
num_threads=None,
shuffle_buffer_size=None,
prefetch_buffer_size=None,
cardinality_multiple=1):
"""Transformation that applies most of the dataset operations commonly used
for training on sequence data:
* sharding
* shuffling
* processing
* filtering
* bucketization
* batching
* prefetching
Example:
>>> dataset = dataset.apply(opennmt.data.training_pipeline(...))
Args:
batch_size: The batch size to use.
batch_type: The training batching stragety to use: can be "examples" or
"tokens".
batch_multiplier: The batch size multiplier.
batch_size_multiple: When :obj:`batch_type` is "tokens", ensure that the
resulting batch size is a multiple of this value.
process_fn: The processing function to apply on each element.
transform_fns: list of transformation functions (map or filter) to apply on
the batch. More generic than `process_fn`.
length_bucket_width: The width of the length buckets to select batch
candidates from. ``None`` to not constrain batch formation.
features_length_fn: A function mapping features to a sequence length.
labels_length_fn: A function mapping labels to a sequence length.
maximum_features_length: The maximum length or list of maximum lengths of
the features sequence(s). ``None`` to not constrain the length.
maximum_labels_length: The maximum length of the labels sequence.
``None`` to not constrain the length.
single_pass: If ``True``, makes a single pass over the training data.
num_shards: The number of data shards (usually the number of workers in a
distributed setting).
shard_index: The shard index this data pipeline should read from.
num_threads: The number of elements processed in parallel.
shuffle_buffer_size: The number of elements from which to sample.
prefetch_buffer_size: The number of batches to prefetch asynchronously. If
``None``, use an automatically tuned value.
cardinality_multiple: Ensure that the dataset cardinality is a multiple of
this value when :obj:`single_pass` is ``True``.
Returns:
A ``tf.data.Dataset`` transformation.
See Also:
- :func:`opennmt.data.batch_sequence_dataset`
- :func:`opennmt.data.make_cardinality_multiple_of`
- :func:`opennmt.data.filter_examples_by_length`
- :func:`opennmt.data.filter_irregular_batches`
- :func:`opennmt.data.shuffle_dataset`
"""
def _make_weighted_dataset(datasets, weights):
if single_pass:
raise ValueError("single_pass parameter is not compatible with weighted datasets")
if not datasets:
raise ValueError("At least one dataset is required")
if weights is not None and len(weights) != len(datasets):
raise ValueError("%d dataset weights were provided, but %d were expected to match the "
"number of data files" % (len(weights), len(datasets)))
if num_shards > 1:
datasets = [dataset.shard(num_shards, shard_index) for dataset in datasets]
weights = normalize_weights(datasets, weights=weights)
datasets = [dataset.repeat() for dataset in datasets]
dataset = tf.data.experimental.sample_from_datasets(datasets, weights=weights)
if shuffle_buffer_size is not None and shuffle_buffer_size != 0:
if shuffle_buffer_size < 0:
raise ValueError("shuffle_buffer_size < 0 is not compatible with weighted datasets")
dataset = dataset.shuffle(shuffle_buffer_size)
return dataset
def _make_single_dataset(dataset):
if num_shards > 1:
dataset = dataset.shard(num_shards, shard_index)
if shuffle_buffer_size is not None and shuffle_buffer_size != 0:
dataset = dataset.apply(shuffle_dataset(shuffle_buffer_size))
return dataset
def _pipeline(dataset):
if isinstance(dataset, tuple):
dataset, weights = dataset
else:
weights = None
is_weighted_dataset = isinstance(dataset, list)
if is_weighted_dataset:
dataset = _make_weighted_dataset(dataset, weights)
else:
dataset = _make_single_dataset(dataset)
if process_fn is not None:
dataset = dataset.map(process_fn, num_parallel_calls=num_threads or 4)
for transform_fn in transform_fns:
dataset = dataset.apply(transform_fn)
# the following is now by default part of transform_fn
dataset = dataset.apply(filter_examples_by_length(
maximum_features_length=maximum_features_length,
maximum_labels_length=maximum_labels_length,
features_length_fn=features_length_fn,
labels_length_fn=labels_length_fn))
dataset = dataset.apply(batch_sequence_dataset(
batch_size,
batch_type=batch_type,
batch_multiplier=batch_multiplier,
batch_size_multiple=batch_size_multiple,
length_bucket_width=length_bucket_width,
length_fn=[features_length_fn, labels_length_fn]))
dataset = dataset.apply(filter_irregular_batches(batch_multiplier))
if not single_pass:
if not is_weighted_dataset: # Weighted dataset is repeated before sampling.
dataset = dataset.repeat()
else:
dataset = dataset.apply(make_cardinality_multiple_of(cardinality_multiple))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return _pipeline
| def training_pipeline(batch_size,
batch_type="examples",
batch_multiplier=1,
batch_size_multiple=1,
process_fn=None,
transform_fns=[],
length_bucket_width=None,
features_length_fn=None,
labels_length_fn=None,
maximum_features_length=None,
maximum_labels_length=None,
single_pass=False,
num_shards=1,
shard_index=0,
num_threads=None,
shuffle_buffer_size=None,
prefetch_buffer_size=None,
cardinality_multiple=1):
"""Transformation that applies most of the dataset operations commonly used
for training on sequence data:
* sharding
* shuffling
* processing
* filtering
* bucketization
* batching
* prefetching
Example:
>>> dataset = dataset.apply(opennmt.data.training_pipeline(...))
Args:
batch_size: The batch size to use.
batch_type: The training batching stragety to use: can be "examples" or
"tokens".
batch_multiplier: The batch size multiplier.
batch_size_multiple: When :obj:`batch_type` is "tokens", ensure that the
resulting batch size is a multiple of this value.
process_fn: The processing function to apply on each element.
transform_fns: list of transformation functions (map or filter) to apply on
the dataset. More generic than `process_fn`.
length_bucket_width: The width of the length buckets to select batch
candidates from. ``None`` to not constrain batch formation.
features_length_fn: A function mapping features to a sequence length.
labels_length_fn: A function mapping labels to a sequence length.
maximum_features_length: The maximum length or list of maximum lengths of
the features sequence(s). ``None`` to not constrain the length.
maximum_labels_length: The maximum length of the labels sequence.
``None`` to not constrain the length.
single_pass: If ``True``, makes a single pass over the training data.
num_shards: The number of data shards (usually the number of workers in a
distributed setting).
shard_index: The shard index this data pipeline should read from.
num_threads: The number of elements processed in parallel.
shuffle_buffer_size: The number of elements from which to sample.
prefetch_buffer_size: The number of batches to prefetch asynchronously. If
``None``, use an automatically tuned value.
cardinality_multiple: Ensure that the dataset cardinality is a multiple of
this value when :obj:`single_pass` is ``True``.
Returns:
A ``tf.data.Dataset`` transformation.
See Also:
- :func:`opennmt.data.batch_sequence_dataset`
- :func:`opennmt.data.make_cardinality_multiple_of`
- :func:`opennmt.data.filter_examples_by_length`
- :func:`opennmt.data.filter_irregular_batches`
- :func:`opennmt.data.shuffle_dataset`
"""
def _make_weighted_dataset(datasets, weights):
if single_pass:
raise ValueError("single_pass parameter is not compatible with weighted datasets")
if not datasets:
raise ValueError("At least one dataset is required")
if weights is not None and len(weights) != len(datasets):
raise ValueError("%d dataset weights were provided, but %d were expected to match the "
"number of data files" % (len(weights), len(datasets)))
if num_shards > 1:
datasets = [dataset.shard(num_shards, shard_index) for dataset in datasets]
weights = normalize_weights(datasets, weights=weights)
datasets = [dataset.repeat() for dataset in datasets]
dataset = tf.data.experimental.sample_from_datasets(datasets, weights=weights)
if shuffle_buffer_size is not None and shuffle_buffer_size != 0:
if shuffle_buffer_size < 0:
raise ValueError("shuffle_buffer_size < 0 is not compatible with weighted datasets")
dataset = dataset.shuffle(shuffle_buffer_size)
return dataset
def _make_single_dataset(dataset):
if num_shards > 1:
dataset = dataset.shard(num_shards, shard_index)
if shuffle_buffer_size is not None and shuffle_buffer_size != 0:
dataset = dataset.apply(shuffle_dataset(shuffle_buffer_size))
return dataset
def _pipeline(dataset):
if isinstance(dataset, tuple):
dataset, weights = dataset
else:
weights = None
is_weighted_dataset = isinstance(dataset, list)
if is_weighted_dataset:
dataset = _make_weighted_dataset(dataset, weights)
else:
dataset = _make_single_dataset(dataset)
if process_fn is not None:
dataset = dataset.map(process_fn, num_parallel_calls=num_threads or 4)
for transform_fn in transform_fns:
dataset = dataset.apply(transform_fn)
# the following is now by default part of transform_fn
dataset = dataset.apply(filter_examples_by_length(
maximum_features_length=maximum_features_length,
maximum_labels_length=maximum_labels_length,
features_length_fn=features_length_fn,
labels_length_fn=labels_length_fn))
dataset = dataset.apply(batch_sequence_dataset(
batch_size,
batch_type=batch_type,
batch_multiplier=batch_multiplier,
batch_size_multiple=batch_size_multiple,
length_bucket_width=length_bucket_width,
length_fn=[features_length_fn, labels_length_fn]))
dataset = dataset.apply(filter_irregular_batches(batch_multiplier))
if not single_pass:
if not is_weighted_dataset: # Weighted dataset is repeated before sampling.
dataset = dataset.repeat()
else:
dataset = dataset.apply(make_cardinality_multiple_of(cardinality_multiple))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
return _pipeline
|
22,792 | def enhance(config: configuration.NamespaceConfig,
plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Add security enhancements to existing configuration
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating and error
:rtype: None or str
"""
supported_enhancements = ["hsts", "redirect", "uir", "staple"]
# Check that at least one enhancement was requested on command line
oldstyle_enh = any(getattr(config, enh) for enh in supported_enhancements)
if not enhancements.are_requested(config) and not oldstyle_enh:
msg = ("Please specify one or more enhancement types to configure. To list "
"the available enhancement types, run:\n\n%s --help enhance\n")
logger.error(msg, cli.cli_command)
raise errors.MisconfigurationError("No enhancements requested, exiting.")
try:
installer, _ = plug_sel.choose_configurator_plugins(config, plugins, "enhance")
except errors.PluginSelectionError as e:
return str(e)
if not enhancements.are_supported(config, installer):
raise errors.NotSupportedError("One ore more of the requested enhancements "
"are not supported by the selected installer")
certname_question = ("Which certificate would you like to use to enhance "
"your configuration?")
config.certname = cert_manager.get_certnames(
config, "enhance", allow_multiple=False,
custom_prompt=certname_question)[0]
cert_domains = cert_manager.domains_for_certname(config, config.certname)
if cert_domains is None:
raise errors.Error("Could not find the list of domains for the given certificate name.")
if config.noninteractive_mode:
domains = cert_domains
else:
domain_question = ("Which domain names would you like to enable the "
"selected enhancements for?")
domains = display_ops.choose_values(cert_domains, domain_question)
if not domains:
raise errors.Error("User cancelled the domain selection. No domains "
"defined, exiting.")
lineage = cert_manager.lineage_for_certname(config, config.certname)
if not lineage:
raise errors.Error("Could not find the lineage for the given certificate name.")
if not config.chain_path:
config.chain_path = lineage.chain_path
if oldstyle_enh:
le_client = _init_le_client(config, authenticator=None, installer=installer)
le_client.enhance_config(domains, config.chain_path, redirect_default=False)
if enhancements.are_requested(config):
enhancements.enable(lineage, domains, installer, config)
return None
| def enhance(config: configuration.NamespaceConfig,
plugins: plugins_disco.PluginsRegistry) -> Optional[str]:
"""Add security enhancements to existing configuration
:param config: Configuration object
:type config: configuration.NamespaceConfig
:param plugins: List of plugins
:type plugins: plugins_disco.PluginsRegistry
:returns: `None` or a string indicating an error
:rtype: None or str
"""
supported_enhancements = ["hsts", "redirect", "uir", "staple"]
# Check that at least one enhancement was requested on command line
oldstyle_enh = any(getattr(config, enh) for enh in supported_enhancements)
if not enhancements.are_requested(config) and not oldstyle_enh:
msg = ("Please specify one or more enhancement types to configure. To list "
"the available enhancement types, run:\n\n%s --help enhance\n")
logger.error(msg, cli.cli_command)
raise errors.MisconfigurationError("No enhancements requested, exiting.")
try:
installer, _ = plug_sel.choose_configurator_plugins(config, plugins, "enhance")
except errors.PluginSelectionError as e:
return str(e)
if not enhancements.are_supported(config, installer):
raise errors.NotSupportedError("One ore more of the requested enhancements "
"are not supported by the selected installer")
certname_question = ("Which certificate would you like to use to enhance "
"your configuration?")
config.certname = cert_manager.get_certnames(
config, "enhance", allow_multiple=False,
custom_prompt=certname_question)[0]
cert_domains = cert_manager.domains_for_certname(config, config.certname)
if cert_domains is None:
raise errors.Error("Could not find the list of domains for the given certificate name.")
if config.noninteractive_mode:
domains = cert_domains
else:
domain_question = ("Which domain names would you like to enable the "
"selected enhancements for?")
domains = display_ops.choose_values(cert_domains, domain_question)
if not domains:
raise errors.Error("User cancelled the domain selection. No domains "
"defined, exiting.")
lineage = cert_manager.lineage_for_certname(config, config.certname)
if not lineage:
raise errors.Error("Could not find the lineage for the given certificate name.")
if not config.chain_path:
config.chain_path = lineage.chain_path
if oldstyle_enh:
le_client = _init_le_client(config, authenticator=None, installer=installer)
le_client.enhance_config(domains, config.chain_path, redirect_default=False)
if enhancements.are_requested(config):
enhancements.enable(lineage, domains, installer, config)
return None
|
42,514 | def clever_t(
classifier: "CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE",
x: np.ndarray,
target_class: int,
nb_batches: int,
batch_size: int,
radius: float,
norm: int,
c_init: float = 1.0,
pool_factor: int = 10,
) -> float:
"""
Compute CLEVER score for a targeted attack.
| Paper link: https://arxiv.org/abs/1801.10578
:param classifier: A trained model.
:param x: One input sample.
:param target_class: Targeted class.
:param nb_batches: Number of repetitions of the estimate.
:param batch_size: Number of random examples to sample per batch.
:param radius: Radius of the maximum perturbation.
:param norm: Current support: 1, 2, np.inf.
:param c_init: Initialization of Weibull distribution.
:param pool_factor: The factor to create a pool of random samples with size pool_factor x n_s.
:return: CLEVER score.
"""
# Check if the targeted class is different from the predicted class
y_pred = classifier.predict(np.array([x]))
pred_class = np.argmax(y_pred, axis=1)[0]
if target_class == pred_class:
raise ValueError("The targeted class is the predicted class.")
# Check if pool_factor is smaller than 1
if pool_factor < 1:
raise ValueError("The `pool_factor` must be larger than 1.")
# Some auxiliary vars
rand_pool_grad_set = []
grad_norm_set = []
dim = reduce(lambda x_, y: x_ * y, x.shape, 1)
shape = [pool_factor * batch_size]
shape.extend(x.shape)
# Generate a pool of samples
rand_pool = np.reshape(
random_sphere(nb_points=pool_factor * batch_size, nb_dims=dim, radius=radius, norm=norm), shape,
)
rand_pool += np.repeat(np.array([x]), pool_factor * batch_size, 0)
rand_pool = rand_pool.astype(ART_NUMPY_DTYPE)
if hasattr(classifier, "clip_values") and classifier.clip_values is not None:
np.clip(rand_pool, classifier.clip_values[0], classifier.clip_values[1], out=rand_pool)
# Change norm since q = p / (p-1)
if norm == 1:
norm = np.inf
elif norm == np.inf:
norm = 1
elif norm != 2:
raise ValueError("Norm {} not supported".format(norm))
# Compute gradients for all samples in rand_pool
for i in range(batch_size):
rand_pool_batch = rand_pool[i * pool_factor:(i+1) * pool_factor]
# Compute gradients
grad_pred_class = classifier.class_gradient(rand_pool_batch, label=pred_class)
grad_target_class = classifier.class_gradient(rand_pool_batch, label=target_class)
if np.isnan(grad_pred_class).any() or np.isnan(grad_target_class).any():
raise Exception("The classifier results NaN gradients.")
grad = grad_pred_class - grad_target_class
grad = np.reshape(grad, (pool_factor, -1))
grad = np.linalg.norm(grad, ord=norm, axis=1)
rand_pool_grad_set.extend(grad)
rand_pool_grads = np.array(rand_pool_grad_set)
# Loop over the batches
for _ in range(nb_batches):
# Random selection of gradients
grad_norm = rand_pool_grads[np.random.choice(pool_factor * batch_size, batch_size)]
grad_norm = np.max(grad_norm)
grad_norm_set.append(grad_norm)
# Maximum likelihood estimation for max gradient norms
[_, loc, _] = weibull_min.fit(-np.array(grad_norm_set), c_init, optimizer=scipy_optimizer)
# Compute function value
values = classifier.predict(np.array([x]))
value = values[:, pred_class] - values[:, target_class]
# Compute scores
score = np.min([-value[0] / loc, radius])
return score
| def clever_t(
classifier: "CLASSIFIER_CLASS_LOSS_GRADIENTS_TYPE",
x: np.ndarray,
target_class: int,
nb_batches: int,
batch_size: int,
radius: float,
norm: int,
c_init: float = 1.0,
pool_factor: int = 10,
) -> float:
"""
Compute CLEVER score for a targeted attack.
| Paper link: https://arxiv.org/abs/1801.10578
:param classifier: A trained model.
:param x: One input sample.
:param target_class: Targeted class.
:param nb_batches: Number of repetitions of the estimate.
:param batch_size: Number of random examples to sample per batch.
:param radius: Radius of the maximum perturbation.
:param norm: Current support: 1, 2, np.inf.
:param c_init: Initialization of Weibull distribution.
:param pool_factor: The factor to create a pool of random samples with size pool_factor x n_s.
:return: CLEVER score.
"""
# Check if the targeted class is different from the predicted class
y_pred = classifier.predict(np.array([x]))
pred_class = np.argmax(y_pred, axis=1)[0]
if target_class == pred_class:
raise ValueError("The targeted class is the predicted class.")
# Check if pool_factor is smaller than 1
if pool_factor < 1:
raise ValueError("The `pool_factor` must be larger than 1.")
# Some auxiliary vars
rand_pool_grad_set = []
grad_norm_set = []
dim = reduce(lambda x_, y: x_ * y, x.shape, 1)
shape = [pool_factor * batch_size]
shape.extend(x.shape)
# Generate a pool of samples
rand_pool = np.reshape(
random_sphere(nb_points=pool_factor * batch_size, nb_dims=dim, radius=radius, norm=norm), shape,
)
rand_pool += np.repeat(np.array([x]), pool_factor * batch_size, 0)
rand_pool = rand_pool.astype(ART_NUMPY_DTYPE)
if hasattr(classifier, "clip_values") and classifier.clip_values is not None:
np.clip(rand_pool, classifier.clip_values[0], classifier.clip_values[1], out=rand_pool)
# Change norm since q = p / (p-1)
if norm == 1:
norm = np.inf
elif norm == np.inf:
norm = 1
elif norm != 2:
raise ValueError("Norm {} not supported".format(norm))
# Compute gradients for all samples in rand_pool
for i in range(batch_size):
rand_pool_batch = rand_pool[i * pool_factor:(i + 1) * pool_factor]
# Compute gradients
grad_pred_class = classifier.class_gradient(rand_pool_batch, label=pred_class)
grad_target_class = classifier.class_gradient(rand_pool_batch, label=target_class)
if np.isnan(grad_pred_class).any() or np.isnan(grad_target_class).any():
raise Exception("The classifier results NaN gradients.")
grad = grad_pred_class - grad_target_class
grad = np.reshape(grad, (pool_factor, -1))
grad = np.linalg.norm(grad, ord=norm, axis=1)
rand_pool_grad_set.extend(grad)
rand_pool_grads = np.array(rand_pool_grad_set)
# Loop over the batches
for _ in range(nb_batches):
# Random selection of gradients
grad_norm = rand_pool_grads[np.random.choice(pool_factor * batch_size, batch_size)]
grad_norm = np.max(grad_norm)
grad_norm_set.append(grad_norm)
# Maximum likelihood estimation for max gradient norms
[_, loc, _] = weibull_min.fit(-np.array(grad_norm_set), c_init, optimizer=scipy_optimizer)
# Compute function value
values = classifier.predict(np.array([x]))
value = values[:, pred_class] - values[:, target_class]
# Compute scores
score = np.min([-value[0] / loc, radius])
return score
|
6,356 | def get_palette(may_use_fancy_formats: bool, theme: str = "classic") -> list:
"""
Load the requested theme and return a list containing all palette entries
needed to highlight the debugger UI, including syntax highlighting.
"""
inheritance_overrides = {}
if may_use_fancy_formats:
def add_setting(color, setting):
return f"{color}, {setting}"
else:
def add_setting(color, setting):
return color
def link(child: str, parent: str):
inheritance_overrides[child] = parent
# {{{ themes
if theme == "classic":
# {{{ classic theme
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "light cyan"),
"highlighted": ("dark blue", "yellow"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"header": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
# }}}
# {{{ source view
"source": ("yellow", "dark blue"),
"current source": ("dark blue", "dark green"),
"breakpoint source": (
add_setting("yellow", "bold"), "dark red"),
"line number": ("light gray", "dark blue"),
"breakpoint marker": (
add_setting("dark red", "bold"), "dark blue"),
# }}}
# {{{ sidebar
"sidebar two": ("dark blue", "dark cyan"),
"sidebar three": ("dark gray", "dark cyan"),
"focused sidebar two": ("dark blue", "light cyan"),
"focused sidebar three": ("dark gray", "light cyan"),
# }}}
# {{{ variables view
"return label": ("white", "dark blue"),
"focused return label": ("light gray", "dark blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark cyan"),
"focused current frame name": (
add_setting("black", "bold"), "light cyan"),
# }}}
# {{{ shell
"command line output": ("light cyan", "dark blue"),
"command line prompt": (
add_setting("white", "bold"), "dark blue"),
"command line error": (
add_setting("light green", "bold"), "dark blue"),
"command line clear button": (
add_setting("white", "bold"), "dark blue"),
"command line focused button": ("dark blue", "dark cyan"),
# }}}
# {{{ Code syntax
"keyword": (add_setting("white", "bold"), "dark blue"),
"function": ("light cyan", "dark blue"),
"literal": (add_setting("light green", "bold"), "dark blue"),
"punctuation": ("light gray", "dark blue"),
"comment": ("dark cyan", "dark blue"),
# }}}
}
# }}}
elif theme == "vim":
# {{{ vim theme
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "light cyan"),
"hotkey": (add_setting("black", "bold, underline"), "light gray"),
"highlighted": ("black", "yellow"),
# }}}
# {{{ general ui
"header": (add_setting("black", "bold"), "light gray"),
"group head": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "dark blue"),
"input": ("black", "dark cyan"),
"focused input": ("black", "light cyan"),
"warning": (add_setting("dark red", "bold"), "white"),
"header warning": (add_setting("dark red", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("black", "white"),
"current source": ("black", "dark cyan"),
"breakpoint source": ("dark red", "light gray"),
"line number": ("dark gray", "white"),
"current line marker": ("dark red", "white"),
"breakpoint marker": ("dark red", "white"),
# }}}
# {{{ sidebar
"sidebar one": ("black", "dark cyan"),
"sidebar two": ("dark blue", "dark cyan"),
"sidebar three": ("dark gray", "dark cyan"),
"focused sidebar one": ("black", "light cyan"),
"focused sidebar two": ("dark blue", "light cyan"),
"focused sidebar three": ("dark gray", "light cyan"),
# }}}
# {{{ variables view
"highlighted var label": ("dark blue", "yellow"),
"return label": ("white", "dark blue"),
"focused return label": ("light gray", "dark blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark cyan"),
"focused current frame name": (
add_setting("black", "bold"), "light cyan"),
# }}}
# {{{ shell
"command line output": (
add_setting("dark gray", "bold"), "white"),
# }}}
# {{{ Code syntax
"keyword2": ("dark magenta", "white"),
"namespace": ("dark magenta", "white"),
"literal": ("dark red", "white"),
"exception": ("dark red", "white"),
"comment": ("dark gray", "white"),
"function": ("dark blue", "white"),
"pseudo": ("dark gray", "white"),
"builtin": ("light blue", "white"),
# }}}
}
# }}}
elif theme == "dark vim":
# {{{ dark vim
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("white", "dark gray"),
"focused selectable": (add_setting("white", "bold"), "light blue"),
"highlighted": ("black", "dark green"),
"hotkey": (add_setting("dark blue", "underline"), "light gray"),
# }}}
# {{{ general ui
"header": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "black"),
"warning": (add_setting("light red", "bold"), "black"),
"header warning": (add_setting("light red", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("white", "black"),
"current source": (add_setting("white", "bold"), "dark gray"),
"line number": (add_setting("dark gray", "bold"), "black"),
"breakpoint marker": (add_setting("light red", "bold"), "black"),
"breakpoint source": (add_setting("white", "bold"), "dark red"),
# }}}
# {{{ sidebar
"sidebar two": ("yellow", "dark gray"),
"focused sidebar two": ("light cyan", "light blue"),
"sidebar three": ("light gray", "dark gray"),
"focused sidebar three": ("yellow", "light blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark gray"),
# }}}
# {{{ shell
"command line output": (add_setting("yellow", "bold"), "black"),
# }}}
# {{{ Code syntax
"keyword": ("yellow", "black"),
"literal": ("light magenta", "black"),
"function": (add_setting("light cyan", "bold"), "black"),
"punctuation": ("yellow", "black"),
"comment": ("dark cyan", "black"),
"exception": ("light red", "black"),
"builtin": ("light green", "black"),
"pseudo": ("dark green", "black"),
# }}}
}
# }}}
elif theme == "midnight":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"hotkey": (add_setting("black", "underline, italics"), "light gray"),
"highlighted": ("white", "dark cyan"),
# }}}
# {{{ general ui
"input": (add_setting("yellow", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
"search box": ("white", "black"),
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"group head": (add_setting("dark blue", "bold"), "light gray"),
"focused sidebar": ("black", "white"),
"button": (add_setting("white", "bold"), "dark blue"),
"focused button": ("light cyan", "black"),
"value": (add_setting("yellow", "bold"), "dark blue"),
"fixed value": ("light gray", "dark blue"),
# }}}
# {{{ source view
"source": ("dark green", "black"),
"highlighted source": ("black", "dark green"),
"current source": ("black", "brown"),
"current focused source": ("black", "yellow"),
"focused source": ("white", "dark blue"),
"breakpoint source": (add_setting("yellow", "bold"), "dark red"),
"current breakpoint source": ("black", "dark red"),
"line number": ("light gray", "black"),
"current line marker": ("dark red", "black"),
"breakpoint marker": ("dark red", "black"),
# }}}
# {{{ sidebar
# }}}
# {{{ variables view
"variables": ("white", "black"),
"var label": ("light blue", "black"),
"var value": ("white", "black"),
"variable separator": ("dark cyan", "light gray"),
"focused var label": ("white", "dark blue"),
"focused var value": ("white", "dark blue"),
"highlighted var label": ("black", "dark green"),
"highlighted var value": ("black", "dark green"),
"focused highlighted var label": ("black", "light green"),
"focused highlighted var value": ("black", "light green"),
"return label": ("white", "dark blue"),
"return value": ("black", "dark cyan"),
"focused return label": ("light gray", "dark blue"),
"focused return value": ("black", "dark blue"),
# }}}
# {{{ stack
"stack": ("white", "black"),
"frame name": ("white", "black"),
"frame class": ("light blue", "black"),
"frame location": ("light cyan", "black"),
"current frame name": (add_setting("white", "bold"), "black"),
"current frame class": (add_setting("light blue", "bold"), "black"),
"current frame location": (add_setting("light cyan", "bold"), "black"),
"focused frame name": ("white", "dark blue"),
"focused frame class": ("white", "dark blue"),
"focused frame location": ("white", "dark blue"),
"focused current frame name": (
add_setting("white", "bold"), "dark blue"),
"focused current frame class": (
add_setting("white", "bold"), "dark blue"),
"focused current frame location": (
add_setting("white", "bold"), "dark blue"),
# }}}
# {{{ breakpoints view
"breakpoint": ("white", "black"),
"disabled breakpoint": ("dark gray", "black"),
"focused breakpoint": ("white", "dark blue"),
"focused disabled breakpoint": ("light gray", "dark blue"),
"current breakpoint": (add_setting("white", "bold"), "black"),
"disabled current breakpoint": (
add_setting("dark gray", "bold"), "black"),
"focused current breakpoint": (
add_setting("white", "bold"), "dark blue"),
"focused disabled current breakpoint": (
add_setting("light gray", "bold"), "dark blue"),
# }}}
# {{{ shell
"command line edit": ("white", "black"),
"command line prompt": (add_setting("white", "bold"), "black"),
"command line output": ("white", "black"),
"command line input": ("white", "black"),
"command line error": (add_setting("light red", "bold"), "black"),
"focused command line output": ("white", "dark blue"),
"focused command line input": (
"white", "dark blue"),
"focused command line error": ("black", "light red"),
"command line clear button": (add_setting("white", "bold"), "black"),
"command line focused button": ("black", "light gray"),
# }}}
# {{{ Code syntax
"keyword": ("dark magenta", "black"),
"pseudo": ("light magenta", "black"),
"function": (add_setting("light blue", "bold"), "black"),
"builtin": ("dark gray", "black"),
"literal": ("dark cyan", "black"),
"string": ("dark red", "black"),
"doublestring": ("dark red", "black"),
"docstring": ("yellow", "black"),
"backtick": ("light green", "black"),
"punctuation": ("white", "black"),
"comment": ("white", "black"),
"exception": ("light green", "black"),
# }}}
}
# }}}
elif theme == "solarized":
# {{{ solarized
palette_dict = {
# {{{ base styles
"background": ("light green", "light gray"),
"selectable": ("light green", "white"),
"focused selectable": ("white", "dark blue"),
"highlighted": ("white", "dark cyan"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"warning": (add_setting("light red", "bold"), "white"),
"header warning": (add_setting("light red", "bold"), "light gray"),
"focused sidebar": ("dark red", "light gray"),
"group head": (add_setting("yellow", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("yellow", "white"),
"breakpoint source": ("light red", "light gray"),
"current source": ("light gray", "light blue"),
"line number": ("light blue", "white"),
"current line marker": (
add_setting("light blue", "bold"), "white"),
"breakpoint marker": (
add_setting("light red", "bold"), "white"),
# }}}
# {{{ sidebar
"sidebar two": ("dark blue", "white"),
"sidebar three": ("light cyan", "white"),
"focused sidebar three": ("light gray", "dark blue"),
# }}}
# {{{ variables view
"return label": ("white", "yellow"),
"focused return label": ("white", "yellow"),
# }}}
# {{{ stack
"current frame name": (
add_setting("light green", "bold"), "white"),
"focused current frame name": (
add_setting("white", "bold"), "dark blue"),
# }}}
# {{{ shell
"command line output": ("light green", "white"),
# }}}
# {{{ Code syntax
"namespace": ("dark red", "white"),
"exception": ("light red", "white"),
"keyword": ("brown", "white"),
"keyword2": ("dark magenta", "white"),
"function": ("dark green", "white"),
"literal": ("dark cyan", "white"),
"builtin": ("dark blue", "white"),
"comment": ("light cyan", "white"),
"pseudo": ("light cyan", "white"),
# }}}
}
# }}}
elif theme == "agr-256":
# {{{ agr-256
# Give the colors some comprehensible names
black = "h235"
blacker = "h233"
dark_cyan = "h24"
dark_gray = "h241"
dark_green = "h22"
dark_red = "h88"
dark_teal = "h23"
light_blue = "h111"
light_cyan = "h80"
light_gray = "h252"
light_green = "h113"
light_red = "h160"
medium_gray = "h246"
salmon = "h223"
orange = "h173"
white = "h255"
yellow = "h192"
link("focused breakpoint", "focused selectable")
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": (black, light_gray),
"selectable": (white, blacker),
"focused selectable": (yellow, dark_cyan),
"hotkey": (add_setting(black, "underline"), light_gray),
"highlighted": (white, dark_green),
# }}}
# {{{ general ui
"focused sidebar": (dark_cyan, light_gray),
"group head": (add_setting(dark_cyan, "bold"), light_gray),
"dialog title": (add_setting(light_gray, "bold"), black),
"warning": (add_setting(white, "bold"), dark_red),
"fixed value": (add_setting(white, "bold"), dark_gray),
"button": (add_setting(white, "bold"), black),
"focused button": (add_setting(yellow, "bold"), dark_cyan),
# }}}
# {{{ source view
"line number": (dark_gray, black),
"current line marker": (add_setting(yellow, "bold"), black),
"breakpoint marker": (add_setting(light_red, "bold"), black),
"source": (white, black),
"breakpoint source": (add_setting(white, "bold"), dark_red),
"current source": (add_setting(light_gray, "bold"), dark_teal),
# }}}
# {{{ sidebar
"sidebar two": (light_blue, blacker),
"focused sidebar two": (light_gray, dark_cyan),
"sidebar three": (medium_gray, blacker),
"focused sidebar three": (salmon, dark_cyan),
# }}}
# {{{ variables view
"highlighted var label": (light_gray, dark_green),
"return label": (light_green, blacker),
"focused return label": (
add_setting(light_gray, "bold"), dark_cyan),
# }}}
# {{{ stack
"current frame name": (yellow, blacker),
"focused current frame name": (
add_setting(yellow, "bold"), dark_cyan),
# }}}
# {{{ shell
"command line prompt": (add_setting(yellow, "bold"), black),
"command line output": (light_cyan, black),
"command line error": (light_red, black),
# }}}
# {{{ Code syntax
"comment": (medium_gray, black),
"exception": (orange, black),
"function": (yellow, black),
"keyword": (light_blue, black),
"literal": (orange, black),
"operator": (yellow, black),
"pseudo": (medium_gray, black),
"punctuation": (salmon, black),
"string": (light_green, black),
# }}}
}
# }}}
elif theme == "monokai":
# {{{ monokai
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("white", "black"),
"focused selectable": ("white", "dark gray"),
"highlighted": ("black", "dark green"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"input": ("white", "black"),
"button": (add_setting("white", "bold"), "black"),
"focused button": (add_setting("white", "bold"), "dark gray"),
"focused sidebar": ("dark blue", "light gray"),
"warning": (add_setting("white", "bold"), "dark red"),
"group head": (add_setting("black", "bold"), "light gray"),
"dialog title": (add_setting("white", "bold"), "black"),
# }}}
# {{{ source view
"current source": ("black", "dark cyan"),
"breakpoint source": (add_setting("white", "bold"), "dark red"),
"line number": ("dark gray", "black"),
"current line marker": (add_setting("dark cyan", "bold"), "black"),
"breakpoint marker": (add_setting("dark red", "bold"), "black"),
# }}}
# {{{ sidebar
"sidebar two": ("light cyan", "black"),
"focused sidebar two": ("light cyan", "dark gray"),
"sidebar three": ("light magenta", "black"),
"focused sidebar three": ("light magenta", "dark gray"),
# }}}
# {{{ variables view
"return label": ("light green", "black"),
"focused return label": ("light green", "dark gray"),
# }}}
# {{{ stack
"current frame name": ("light green", "black"),
"focused current frame name": ("light green", "dark gray"),
# }}}
# {{{ shell
"command line prompt": (add_setting("yellow", "bold"), "black"),
"command line output": ("light cyan", "black"),
"command line error": ("yellow", "black"),
"focused command line output": ("light cyan", "dark gray"),
"focused command line error": (
add_setting("yellow", "bold"), "dark gray"),
# }}}
# {{{ Code syntax
"literal": ("light magenta", "black"),
"builtin": ("light cyan", "black"),
"exception": ("light cyan", "black"),
"keyword2": ("light cyan", "black"),
"function": ("light green", "black"),
"class": (add_setting("light green", "underline"), "black"),
"keyword": ("light red", "black"),
"operator": ("light red", "black"),
"comment": ("dark gray", "black"),
"docstring": ("dark gray", "black"),
"argument": ("brown", "black"),
"pseudo": ("brown", "black"),
"string": ("yellow", "black"),
# }}}
}
# }}}
elif theme == "monokai-256":
# {{{ monokai-256
# Give the colors some comprehensible names
black = "h236"
blacker = "h234"
dark_gray = "h240"
dark_green = "h28"
dark_red = "h124"
dark_teal = "h30"
dark_magenta = "h141"
light_blue = "h111"
light_cyan = "h51"
light_gray = "h252"
light_green = "h155"
light_red = "h160"
light_magenta = "h198"
medium_gray = "h243"
orange = "h208"
white = "h255"
yellow = "h228"
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": (black, light_gray),
"selectable": (white, blacker),
"focused selectable": (white, dark_gray),
"highlighted": (white, dark_green),
"hotkey": (add_setting(black, "underline"), light_gray),
# }}}
# {{{ general ui
"input": (white, black),
"button": (add_setting(white, "bold"), black),
"focused button": (add_setting(white, "bold"), dark_gray),
"focused sidebar": (dark_teal, light_gray),
"warning": (add_setting(white, "bold"), dark_red),
"group head": (add_setting(black, "bold"), light_gray),
"dialog title": (add_setting(white, "bold"), blacker),
# }}}
# {{{ source view
"source": (white, black),
"current source": (add_setting(light_gray, "bold"), dark_teal),
"breakpoint source": (add_setting(white, "bold"), dark_red),
"line number": (dark_gray, black),
"current line marker": (add_setting(light_cyan, "bold"), black),
"breakpoint marker": (add_setting(light_red, "bold"), black),
# }}}
# {{{ sidebar
"sidebar two": (light_cyan, blacker),
"focused sidebar two": (light_cyan, dark_gray),
"sidebar three": (dark_magenta, blacker),
"focused sidebar three": (dark_magenta, dark_gray),
# }}}
# {{{ variables view
"highlighted var label": (light_gray, dark_green),
"return label": (light_green, blacker),
"focused return label": (light_green, dark_gray),
# }}}
# {{{ stack
"current frame name": (light_green, blacker),
"focused current frame name": (light_green, dark_gray),
# }}}
# {{{ shell
"command line prompt": (
add_setting(yellow, "bold"), black),
"command line output": (light_cyan, black),
"command line error": (orange, black),
"focused command line output": (light_cyan, dark_gray),
"focused command line error": (
add_setting(orange, "bold"), dark_gray),
# }}}
# {{{ Code syntax
"literal": (dark_magenta, black),
"builtin": (light_cyan, black),
"exception": (light_cyan, black),
"keyword2": (light_cyan, black),
"function": (light_green, black),
"class": (add_setting(light_green, "underline"), black),
"keyword": (light_magenta, black),
"operator": (light_magenta, black),
"comment": (medium_gray, black),
"docstring": (medium_gray, black),
"argument": (orange, black),
"pseudo": (orange, black),
"string": (yellow, black),
# }}}
}
# }}}
elif theme == "mono":
# {{{ mono
palette_dict = {
"background": ("standout",),
"selectable": (),
"focused selectable": ("underline",),
"highlighted": ("bold",),
"hotkey": ("underline, standout",),
}
# }}}
else:
# {{{ custom
try:
# {{{ base styles
palette_dict = {
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"input": (add_setting("yellow", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
"highlighted": ("white", "dark cyan"),
"source": ("white", "dark blue"),
}
# }}}
symbols = {
"palette": palette_dict,
"add_setting": add_setting,
"link": link,
}
from os.path import expanduser, expandvars
fname = expanduser(expandvars(theme))
with open(fname) as inf:
exec(compile(inf.read(), fname, "exec"), symbols)
except FileNotFoundError:
ui_log.error("Unable to locate custom theme file {!r}"
.format(theme))
return None
except Exception:
ui_log.exception("Error when importing theme:")
return None
# }}}
# }}}
# Apply style inheritance
for style_name in set(INHERITANCE_MAP.keys()).union(BASE_STYLES.keys()):
get_style(palette_dict, style_name, inheritance_overrides)
palette_list = [
astuple(entry)
for entry in palette_dict.values()
if isinstance(entry, PaletteEntry)
]
return palette_list
| def get_palette(may_use_fancy_formats: bool, theme: str = "classic") -> list:
"""
Load the requested theme and return a list containing all palette entries
needed to highlight the debugger UI, including syntax highlighting.
"""
inheritance_overrides = {}
if may_use_fancy_formats:
def add_setting(color, setting):
return f"{color}, {setting}"
else:
def add_setting(color, setting):
return color
def link(child: str, parent: str):
inheritance_overrides[child] = parent
# {{{ themes
if theme == "classic":
# {{{ classic theme
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "light cyan"),
"highlighted": ("dark blue", "yellow"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"header": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
# }}}
# {{{ source view
"source": ("yellow", "dark blue"),
"current source": ("dark blue", "dark green"),
"breakpoint source": (
add_setting("yellow", "bold"), "dark red"),
"line number": ("light gray", "dark blue"),
"breakpoint marker": (
add_setting("dark red", "bold"), "dark blue"),
# }}}
# {{{ sidebar
"sidebar two": ("dark blue", "dark cyan"),
"sidebar three": ("dark gray", "dark cyan"),
"focused sidebar two": ("dark blue", "light cyan"),
"focused sidebar three": ("dark gray", "light cyan"),
# }}}
# {{{ variables view
"return label": ("white", "dark blue"),
"focused return label": ("light gray", "dark blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark cyan"),
"focused current frame name": (
add_setting("black", "bold"), "light cyan"),
# }}}
# {{{ shell
"command line output": ("light cyan", "dark blue"),
"command line prompt": (
add_setting("white", "bold"), "dark blue"),
"command line error": (
add_setting("light green", "bold"), "dark blue"),
"command line clear button": (
add_setting("white", "bold"), "dark blue"),
"command line focused button": ("dark blue", "dark cyan"),
# }}}
# {{{ Code syntax
"keyword": (add_setting("white", "bold"), "dark blue"),
"function": ("light cyan", "dark blue"),
"literal": (add_setting("light green", "bold"), "dark blue"),
"punctuation": ("light gray", "dark blue"),
"comment": ("dark cyan", "dark blue"),
# }}}
}
# }}}
elif theme == "vim":
# {{{ vim theme
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "light cyan"),
"hotkey": (add_setting("black", "bold, underline"), "light gray"),
"highlighted": ("black", "yellow"),
# }}}
# {{{ general ui
"header": (add_setting("black", "bold"), "light gray"),
"group head": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "dark blue"),
"input": ("black", "dark cyan"),
"focused input": ("black", "light cyan"),
"warning": (add_setting("dark red", "bold"), "white"),
"header warning": (add_setting("dark red", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("black", "white"),
"current source": ("black", "dark cyan"),
"breakpoint source": ("dark red", "light gray"),
"line number": ("dark gray", "white"),
"current line marker": ("dark red", "white"),
"breakpoint marker": ("dark red", "white"),
# }}}
# {{{ sidebar
"sidebar one": ("black", "dark cyan"),
"sidebar two": ("dark blue", "dark cyan"),
"sidebar three": ("dark gray", "dark cyan"),
"focused sidebar one": ("black", "light cyan"),
"focused sidebar two": ("dark blue", "light cyan"),
"focused sidebar three": ("dark gray", "light cyan"),
# }}}
# {{{ variables view
"highlighted var label": ("dark blue", "yellow"),
"return label": ("white", "dark blue"),
"focused return label": ("light gray", "dark blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark cyan"),
"focused current frame name": (
add_setting("black", "bold"), "light cyan"),
# }}}
# {{{ shell
"command line output": (
add_setting("dark gray", "bold"), "white"),
# }}}
# {{{ Code syntax
"keyword2": ("dark magenta", "white"),
"namespace": ("dark magenta", "white"),
"literal": ("dark red", "white"),
"exception": ("dark red", "white"),
"comment": ("dark gray", "white"),
"function": ("dark blue", "white"),
"pseudo": ("dark gray", "white"),
"builtin": ("light blue", "white"),
# }}}
}
# }}}
elif theme == "dark vim":
# {{{ dark vim
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("white", "dark gray"),
"focused selectable": (add_setting("white", "bold"), "light blue"),
"highlighted": ("black", "dark green"),
"hotkey": (add_setting("dark blue", "underline"), "light gray"),
# }}}
# {{{ general ui
"header": ("dark blue", "light gray"),
"dialog title": (add_setting("white", "bold"), "black"),
"warning": (add_setting("light red", "bold"), "black"),
"header warning": (add_setting("light red", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("white", "black"),
"current source": (add_setting("white", "bold"), "dark gray"),
"line number": (add_setting("dark gray", "bold"), "black"),
"breakpoint marker": (add_setting("light red", "bold"), "black"),
"breakpoint source": (add_setting("white", "bold"), "dark red"),
# }}}
# {{{ sidebar
"sidebar two": ("yellow", "dark gray"),
"focused sidebar two": ("light cyan", "light blue"),
"sidebar three": ("light gray", "dark gray"),
"focused sidebar three": ("yellow", "light blue"),
# }}}
# {{{ stack
"current frame name": (
add_setting("white", "bold"), "dark gray"),
# }}}
# {{{ shell
"command line output": (add_setting("yellow", "bold"), "black"),
# }}}
# {{{ Code syntax
"keyword": ("yellow", "black"),
"literal": ("light magenta", "black"),
"function": (add_setting("light cyan", "bold"), "black"),
"punctuation": ("yellow", "black"),
"comment": ("dark cyan", "black"),
"exception": ("light red", "black"),
"builtin": ("light green", "black"),
"pseudo": ("dark green", "black"),
# }}}
}
# }}}
elif theme == "midnight":
# {{{ midnight
# Based on XCode's midnight theme
# Looks best in a console with green text against black background
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"hotkey": (add_setting("black", "underline, italics"), "light gray"),
"highlighted": ("white", "dark cyan"),
# }}}
# {{{ general ui
"input": (add_setting("yellow", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
"search box": ("white", "black"),
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"group head": (add_setting("dark blue", "bold"), "light gray"),
"focused sidebar": ("black", "white"),
"button": (add_setting("white", "bold"), "dark blue"),
"focused button": ("light cyan", "black"),
"value": (add_setting("yellow", "bold"), "dark blue"),
"fixed value": ("light gray", "dark blue"),
# }}}
# {{{ source view
"source": ("dark green", "black"),
"highlighted source": ("black", "dark green"),
"current source": ("black", "brown"),
"current focused source": (add_setting("yellow", "bold"), "dark blue"),
"focused source": ("white", "dark blue"),
"breakpoint source": (add_setting("yellow", "bold"), "dark red"),
"current breakpoint source": ("black", "dark red"),
"line number": ("light gray", "black"),
"current line marker": ("dark red", "black"),
"breakpoint marker": ("dark red", "black"),
# }}}
# {{{ sidebar
# }}}
# {{{ variables view
"variables": ("white", "black"),
"var label": ("light blue", "black"),
"var value": ("white", "black"),
"variable separator": ("dark cyan", "light gray"),
"focused var label": ("white", "dark blue"),
"focused var value": ("white", "dark blue"),
"highlighted var label": ("black", "dark green"),
"highlighted var value": ("black", "dark green"),
"focused highlighted var label": ("black", "light green"),
"focused highlighted var value": ("black", "light green"),
"return label": ("white", "dark blue"),
"return value": ("black", "dark cyan"),
"focused return label": ("light gray", "dark blue"),
"focused return value": ("black", "dark blue"),
# }}}
# {{{ stack
"stack": ("white", "black"),
"frame name": ("white", "black"),
"frame class": ("light blue", "black"),
"frame location": ("light cyan", "black"),
"current frame name": (add_setting("white", "bold"), "black"),
"current frame class": (add_setting("light blue", "bold"), "black"),
"current frame location": (add_setting("light cyan", "bold"), "black"),
"focused frame name": ("white", "dark blue"),
"focused frame class": ("white", "dark blue"),
"focused frame location": ("white", "dark blue"),
"focused current frame name": (
add_setting("white", "bold"), "dark blue"),
"focused current frame class": (
add_setting("white", "bold"), "dark blue"),
"focused current frame location": (
add_setting("white", "bold"), "dark blue"),
# }}}
# {{{ breakpoints view
"breakpoint": ("white", "black"),
"disabled breakpoint": ("dark gray", "black"),
"focused breakpoint": ("white", "dark blue"),
"focused disabled breakpoint": ("light gray", "dark blue"),
"current breakpoint": (add_setting("white", "bold"), "black"),
"disabled current breakpoint": (
add_setting("dark gray", "bold"), "black"),
"focused current breakpoint": (
add_setting("white", "bold"), "dark blue"),
"focused disabled current breakpoint": (
add_setting("light gray", "bold"), "dark blue"),
# }}}
# {{{ shell
"command line edit": ("white", "black"),
"command line prompt": (add_setting("white", "bold"), "black"),
"command line output": ("white", "black"),
"command line input": ("white", "black"),
"command line error": (add_setting("light red", "bold"), "black"),
"focused command line output": ("white", "dark blue"),
"focused command line input": (
"white", "dark blue"),
"focused command line error": ("black", "light red"),
"command line clear button": (add_setting("white", "bold"), "black"),
"command line focused button": ("black", "light gray"),
# }}}
# {{{ Code syntax
"keyword": ("dark magenta", "black"),
"pseudo": ("light magenta", "black"),
"function": (add_setting("light blue", "bold"), "black"),
"builtin": ("dark gray", "black"),
"literal": ("dark cyan", "black"),
"string": ("dark red", "black"),
"doublestring": ("dark red", "black"),
"docstring": ("yellow", "black"),
"backtick": ("light green", "black"),
"punctuation": ("white", "black"),
"comment": ("white", "black"),
"exception": ("light green", "black"),
# }}}
}
# }}}
elif theme == "solarized":
# {{{ solarized
palette_dict = {
# {{{ base styles
"background": ("light green", "light gray"),
"selectable": ("light green", "white"),
"focused selectable": ("white", "dark blue"),
"highlighted": ("white", "dark cyan"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"dialog title": (add_setting("white", "bold"), "dark cyan"),
"warning": (add_setting("light red", "bold"), "white"),
"header warning": (add_setting("light red", "bold"), "light gray"),
"focused sidebar": ("dark red", "light gray"),
"group head": (add_setting("yellow", "bold"), "light gray"),
# }}}
# {{{ source view
"source": ("yellow", "white"),
"breakpoint source": ("light red", "light gray"),
"current source": ("light gray", "light blue"),
"line number": ("light blue", "white"),
"current line marker": (
add_setting("light blue", "bold"), "white"),
"breakpoint marker": (
add_setting("light red", "bold"), "white"),
# }}}
# {{{ sidebar
"sidebar two": ("dark blue", "white"),
"sidebar three": ("light cyan", "white"),
"focused sidebar three": ("light gray", "dark blue"),
# }}}
# {{{ variables view
"return label": ("white", "yellow"),
"focused return label": ("white", "yellow"),
# }}}
# {{{ stack
"current frame name": (
add_setting("light green", "bold"), "white"),
"focused current frame name": (
add_setting("white", "bold"), "dark blue"),
# }}}
# {{{ shell
"command line output": ("light green", "white"),
# }}}
# {{{ Code syntax
"namespace": ("dark red", "white"),
"exception": ("light red", "white"),
"keyword": ("brown", "white"),
"keyword2": ("dark magenta", "white"),
"function": ("dark green", "white"),
"literal": ("dark cyan", "white"),
"builtin": ("dark blue", "white"),
"comment": ("light cyan", "white"),
"pseudo": ("light cyan", "white"),
# }}}
}
# }}}
elif theme == "agr-256":
# {{{ agr-256
# Give the colors some comprehensible names
black = "h235"
blacker = "h233"
dark_cyan = "h24"
dark_gray = "h241"
dark_green = "h22"
dark_red = "h88"
dark_teal = "h23"
light_blue = "h111"
light_cyan = "h80"
light_gray = "h252"
light_green = "h113"
light_red = "h160"
medium_gray = "h246"
salmon = "h223"
orange = "h173"
white = "h255"
yellow = "h192"
link("focused breakpoint", "focused selectable")
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": (black, light_gray),
"selectable": (white, blacker),
"focused selectable": (yellow, dark_cyan),
"hotkey": (add_setting(black, "underline"), light_gray),
"highlighted": (white, dark_green),
# }}}
# {{{ general ui
"focused sidebar": (dark_cyan, light_gray),
"group head": (add_setting(dark_cyan, "bold"), light_gray),
"dialog title": (add_setting(light_gray, "bold"), black),
"warning": (add_setting(white, "bold"), dark_red),
"fixed value": (add_setting(white, "bold"), dark_gray),
"button": (add_setting(white, "bold"), black),
"focused button": (add_setting(yellow, "bold"), dark_cyan),
# }}}
# {{{ source view
"line number": (dark_gray, black),
"current line marker": (add_setting(yellow, "bold"), black),
"breakpoint marker": (add_setting(light_red, "bold"), black),
"source": (white, black),
"breakpoint source": (add_setting(white, "bold"), dark_red),
"current source": (add_setting(light_gray, "bold"), dark_teal),
# }}}
# {{{ sidebar
"sidebar two": (light_blue, blacker),
"focused sidebar two": (light_gray, dark_cyan),
"sidebar three": (medium_gray, blacker),
"focused sidebar three": (salmon, dark_cyan),
# }}}
# {{{ variables view
"highlighted var label": (light_gray, dark_green),
"return label": (light_green, blacker),
"focused return label": (
add_setting(light_gray, "bold"), dark_cyan),
# }}}
# {{{ stack
"current frame name": (yellow, blacker),
"focused current frame name": (
add_setting(yellow, "bold"), dark_cyan),
# }}}
# {{{ shell
"command line prompt": (add_setting(yellow, "bold"), black),
"command line output": (light_cyan, black),
"command line error": (light_red, black),
# }}}
# {{{ Code syntax
"comment": (medium_gray, black),
"exception": (orange, black),
"function": (yellow, black),
"keyword": (light_blue, black),
"literal": (orange, black),
"operator": (yellow, black),
"pseudo": (medium_gray, black),
"punctuation": (salmon, black),
"string": (light_green, black),
# }}}
}
# }}}
elif theme == "monokai":
# {{{ monokai
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": ("black", "light gray"),
"selectable": ("white", "black"),
"focused selectable": ("white", "dark gray"),
"highlighted": ("black", "dark green"),
"hotkey": (add_setting("black", "underline"), "light gray"),
# }}}
# {{{ general ui
"input": ("white", "black"),
"button": (add_setting("white", "bold"), "black"),
"focused button": (add_setting("white", "bold"), "dark gray"),
"focused sidebar": ("dark blue", "light gray"),
"warning": (add_setting("white", "bold"), "dark red"),
"group head": (add_setting("black", "bold"), "light gray"),
"dialog title": (add_setting("white", "bold"), "black"),
# }}}
# {{{ source view
"current source": ("black", "dark cyan"),
"breakpoint source": (add_setting("white", "bold"), "dark red"),
"line number": ("dark gray", "black"),
"current line marker": (add_setting("dark cyan", "bold"), "black"),
"breakpoint marker": (add_setting("dark red", "bold"), "black"),
# }}}
# {{{ sidebar
"sidebar two": ("light cyan", "black"),
"focused sidebar two": ("light cyan", "dark gray"),
"sidebar three": ("light magenta", "black"),
"focused sidebar three": ("light magenta", "dark gray"),
# }}}
# {{{ variables view
"return label": ("light green", "black"),
"focused return label": ("light green", "dark gray"),
# }}}
# {{{ stack
"current frame name": ("light green", "black"),
"focused current frame name": ("light green", "dark gray"),
# }}}
# {{{ shell
"command line prompt": (add_setting("yellow", "bold"), "black"),
"command line output": ("light cyan", "black"),
"command line error": ("yellow", "black"),
"focused command line output": ("light cyan", "dark gray"),
"focused command line error": (
add_setting("yellow", "bold"), "dark gray"),
# }}}
# {{{ Code syntax
"literal": ("light magenta", "black"),
"builtin": ("light cyan", "black"),
"exception": ("light cyan", "black"),
"keyword2": ("light cyan", "black"),
"function": ("light green", "black"),
"class": (add_setting("light green", "underline"), "black"),
"keyword": ("light red", "black"),
"operator": ("light red", "black"),
"comment": ("dark gray", "black"),
"docstring": ("dark gray", "black"),
"argument": ("brown", "black"),
"pseudo": ("brown", "black"),
"string": ("yellow", "black"),
# }}}
}
# }}}
elif theme == "monokai-256":
# {{{ monokai-256
# Give the colors some comprehensible names
black = "h236"
blacker = "h234"
dark_gray = "h240"
dark_green = "h28"
dark_red = "h124"
dark_teal = "h30"
dark_magenta = "h141"
light_blue = "h111"
light_cyan = "h51"
light_gray = "h252"
light_green = "h155"
light_red = "h160"
light_magenta = "h198"
medium_gray = "h243"
orange = "h208"
white = "h255"
yellow = "h228"
link("current breakpoint", "current frame name")
link("focused current breakpoint", "focused current frame name")
palette_dict = {
# {{{ base styles
"background": (black, light_gray),
"selectable": (white, blacker),
"focused selectable": (white, dark_gray),
"highlighted": (white, dark_green),
"hotkey": (add_setting(black, "underline"), light_gray),
# }}}
# {{{ general ui
"input": (white, black),
"button": (add_setting(white, "bold"), black),
"focused button": (add_setting(white, "bold"), dark_gray),
"focused sidebar": (dark_teal, light_gray),
"warning": (add_setting(white, "bold"), dark_red),
"group head": (add_setting(black, "bold"), light_gray),
"dialog title": (add_setting(white, "bold"), blacker),
# }}}
# {{{ source view
"source": (white, black),
"current source": (add_setting(light_gray, "bold"), dark_teal),
"breakpoint source": (add_setting(white, "bold"), dark_red),
"line number": (dark_gray, black),
"current line marker": (add_setting(light_cyan, "bold"), black),
"breakpoint marker": (add_setting(light_red, "bold"), black),
# }}}
# {{{ sidebar
"sidebar two": (light_cyan, blacker),
"focused sidebar two": (light_cyan, dark_gray),
"sidebar three": (dark_magenta, blacker),
"focused sidebar three": (dark_magenta, dark_gray),
# }}}
# {{{ variables view
"highlighted var label": (light_gray, dark_green),
"return label": (light_green, blacker),
"focused return label": (light_green, dark_gray),
# }}}
# {{{ stack
"current frame name": (light_green, blacker),
"focused current frame name": (light_green, dark_gray),
# }}}
# {{{ shell
"command line prompt": (
add_setting(yellow, "bold"), black),
"command line output": (light_cyan, black),
"command line error": (orange, black),
"focused command line output": (light_cyan, dark_gray),
"focused command line error": (
add_setting(orange, "bold"), dark_gray),
# }}}
# {{{ Code syntax
"literal": (dark_magenta, black),
"builtin": (light_cyan, black),
"exception": (light_cyan, black),
"keyword2": (light_cyan, black),
"function": (light_green, black),
"class": (add_setting(light_green, "underline"), black),
"keyword": (light_magenta, black),
"operator": (light_magenta, black),
"comment": (medium_gray, black),
"docstring": (medium_gray, black),
"argument": (orange, black),
"pseudo": (orange, black),
"string": (yellow, black),
# }}}
}
# }}}
elif theme == "mono":
# {{{ mono
palette_dict = {
"background": ("standout",),
"selectable": (),
"focused selectable": ("underline",),
"highlighted": ("bold",),
"hotkey": ("underline, standout",),
}
# }}}
else:
# {{{ custom
try:
# {{{ base styles
palette_dict = {
"background": ("black", "light gray"),
"hotkey": (add_setting("black", "underline"), "light gray"),
"selectable": ("black", "dark cyan"),
"focused selectable": ("black", "dark green"),
"input": (add_setting("yellow", "bold"), "dark blue"),
"warning": (add_setting("white", "bold"), "dark red"),
"highlighted": ("white", "dark cyan"),
"source": ("white", "dark blue"),
}
# }}}
symbols = {
"palette": palette_dict,
"add_setting": add_setting,
"link": link,
}
from os.path import expanduser, expandvars
fname = expanduser(expandvars(theme))
with open(fname) as inf:
exec(compile(inf.read(), fname, "exec"), symbols)
except FileNotFoundError:
ui_log.error("Unable to locate custom theme file {!r}"
.format(theme))
return None
except Exception:
ui_log.exception("Error when importing theme:")
return None
# }}}
# }}}
# Apply style inheritance
for style_name in set(INHERITANCE_MAP.keys()).union(BASE_STYLES.keys()):
get_style(palette_dict, style_name, inheritance_overrides)
palette_list = [
astuple(entry)
for entry in palette_dict.values()
if isinstance(entry, PaletteEntry)
]
return palette_list
|
21,121 | def symlink_to(orig, dest):
if (is_python2 or is_python3) and is_windows:
import subprocess
subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)
else:
orig.symlink_to(dest)
| def symlink_to(orig, dest):
if is_windows:
import subprocess
subprocess.call(['mklink', '/d', path2str(orig), path2str(dest)], shell=True)
else:
orig.symlink_to(dest)
|
2,492 | def test_gradient_boosting_early_stopping():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(
n_estimators=100,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
gbr = GradientBoostingRegressor(
n_estimators=100,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if early_stopping works as expected
for est, tol, early_stop_n_estimators in (
(gbc, 1e-1, 28),
(gbr, 1e-1, 13),
(gbc, 1e-3, 70),
(gbr, 1e-3, 28),
):
est.set_params(tol=tol)
est.fit(X_train, y_train)
assert est.n_estimators_ == early_stop_n_estimators
assert est.score(X_test, y_test) > 0.7
# Without early stopping
gbc = GradientBoostingClassifier(
n_estimators=5, learning_rate=0.1, max_depth=3, random_state=42
)
gbc.fit(X, y)
gbr = GradientBoostingRegressor(
n_estimators=10, learning_rate=0.1, max_depth=3, random_state=42
)
gbr.fit(X, y)
assert gbc.n_estimators_ == 5
assert gbr.n_estimators_ == 10
| def test_gradient_boosting_early_stopping():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(
n_estimators=100,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
gbr = GradientBoostingRegressor(
n_estimators=1000,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if early_stopping works as expected
for est, tol, early_stop_n_estimators in (
(gbc, 1e-1, 28),
(gbr, 1e-1, 13),
(gbc, 1e-3, 70),
(gbr, 1e-3, 28),
):
est.set_params(tol=tol)
est.fit(X_train, y_train)
assert est.n_estimators_ == early_stop_n_estimators
assert est.score(X_test, y_test) > 0.7
# Without early stopping
gbc = GradientBoostingClassifier(
n_estimators=5, learning_rate=0.1, max_depth=3, random_state=42
)
gbc.fit(X, y)
gbr = GradientBoostingRegressor(
n_estimators=10, learning_rate=0.1, max_depth=3, random_state=42
)
gbr.fit(X, y)
assert gbc.n_estimators_ == 5
assert gbr.n_estimators_ == 10
|
2,821 | def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ... .
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 distances between the row vectors of `X` and the row vectors
of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.eye(3, k=1)
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
| def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between X and Y.
Distances are calculated between (X[0], Y[0]), (X[1], Y[1]), ... .
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Y : array-like of shape (n_samples, n_features)
An array where each row is a sample and each column is a feature.
Returns
-------
distances : ndarray of shape (n_samples,)
L1 distances between the row vectors of `X` and the row vectors
of `Y`.
Examples
--------
>>> from sklearn.metrics.pairwise import paired_manhattan_distances
>>> import numpy as np
>>> X = np.array([[1, 1, 0], [0, 1, 0], [0, 0, 1]])
>>> Y = np.array([[0, 1, 0], [0, 0, 1], [0, 0, 0]])
>>> paired_manhattan_distances(X, Y)
array([1., 2., 1.])
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
|
10,409 | def _create_powershell_wrapper(b_module_data, module_path, module_args,
environment, async_timeout, become,
become_method, become_user, become_password,
become_flags, substyle, task_vars):
# creates the manifest/wrapper used in PowerShell/C# modules to enable
# things like become and async - this is also called in action/script.py
# FUTURE: add process_wrapper.ps1 to run module_wrapper in a new process
# if running under a persistent connection and substyle is C# so we
# don't have type conflicts
finder = PSModuleDepFinder()
if substyle != 'script':
# don't scan the module for util dependencies and other Ansible related
# flags if the substyle is 'script' which is set by action/script
finder.scan_module(b_module_data, powershell=(substyle == "powershell"))
module_wrapper = "module_%s_wrapper" % substyle
exec_manifest = dict(
module_entry=to_text(base64.b64encode(b_module_data)),
powershell_modules=dict(),
csharp_utils=dict(),
csharp_utils_module=list(), # csharp_utils only required by a module
module_args=module_args,
actions=[module_wrapper],
environment=environment,
encoded_output=False,
)
finder.scan_exec_script(module_wrapper)
if async_timeout > 0:
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('async_watchdog')
finder.scan_exec_script('async_wrapper')
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["actions"].insert(0, 'async_wrapper')
exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
exec_manifest["async_timeout_sec"] = async_timeout
exec_manifest["async_named_pipe_timeout"] = C.config.get_config_value('DEFAULT_WIN_ASYNC_STARTUP_TIMEOUT')
if become and become_method == 'runas':
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('become_wrapper')
exec_manifest["actions"].insert(0, 'become_wrapper')
exec_manifest["become_user"] = become_user
exec_manifest["become_password"] = become_password
exec_manifest['become_flags'] = become_flags
exec_manifest['min_ps_version'] = finder.ps_version
exec_manifest['min_os_version'] = finder.os_version
if finder.become and 'become_wrapper' not in exec_manifest['actions']:
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('become_wrapper')
exec_manifest['actions'].insert(0, 'become_wrapper')
exec_manifest['become_user'] = 'SYSTEM'
exec_manifest['become_password'] = None
exec_manifest['become_flags'] = None
coverage_manifest = dict(
module_path=module_path,
module_util_paths=dict(),
output=None,
)
coverage_output = C.config.get_config_value('COVERAGE_REMOTE_OUTPUT', variables=task_vars)
if coverage_output and substyle == 'powershell':
finder.scan_exec_script('coverage_wrapper')
coverage_manifest['output'] = coverage_output
coverage_whitelist = C.config.get_config_value('COVERAGE_REMOTE_WHITELIST', variables=task_vars)
coverage_manifest['whitelist'] = coverage_whitelist
# make sure Ansible.ModuleUtils.AddType is added if any C# utils are used
if len(finder.cs_utils_wrapper) > 0 or len(finder.cs_utils_module) > 0:
finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1"),
wrapper=False)
# exec_wrapper is only required to be part of the payload if using
# become or async, to save on payload space we check if exec_wrapper has
# already been added, and remove it manually if it hasn't later
exec_required = "exec_wrapper" in finder.exec_scripts.keys()
finder.scan_exec_script("exec_wrapper")
# must contain an empty newline so it runs the begin/process/end block
finder.exec_scripts["exec_wrapper"] += b"\n\n"
exec_wrapper = finder.exec_scripts["exec_wrapper"]
if not exec_required:
finder.exec_scripts.pop("exec_wrapper")
for name, data in finder.exec_scripts.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest[name] = b64_data
for name, data in finder.ps_modules.items():
b64_data = to_text(base64.b64encode(data['data']))
exec_manifest['powershell_modules'][name] = b64_data
coverage_manifest['module_util_paths'][name] = data['path']
cs_utils = {}
for cs_util in [finder.cs_utils_wrapper, finder.cs_utils_module]:
for name, data in cs_util.items():
cs_utils[name] = data['data']
for name, data in cs_utils.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest['csharp_utils'][name] = b64_data
exec_manifest['csharp_utils_module'] = list(finder.cs_utils_module.keys())
# To save on the data we are sending across we only add the coverage info if coverage is being run
if 'coverage_wrapper' in exec_manifest:
exec_manifest['coverage'] = coverage_manifest
b_json = to_bytes(json.dumps(exec_manifest))
# delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
b_data = exec_wrapper + b'\0\0\0\0' + b_json
return b_data
| def _create_powershell_wrapper(b_module_data, module_path, module_args,
environment, async_timeout, become,
become_method, become_user, become_password,
become_flags, substyle, task_vars):
# creates the manifest/wrapper used in PowerShell/C# modules to enable
# things like become and async - this is also called in action/script.py
# FUTURE: add process_wrapper.ps1 to run module_wrapper in a new process
# if running under a persistent connection and substyle is C# so we
# don't have type conflicts
finder = PSModuleDepFinder()
if substyle != 'script':
# don't scan the module for util dependencies and other Ansible related
# flags if the substyle is 'script' which is set by action/script
finder.scan_module(b_module_data, powershell=(substyle == "powershell"))
module_wrapper = "module_%s_wrapper" % substyle
exec_manifest = dict(
module_entry=to_text(base64.b64encode(b_module_data)),
powershell_modules=dict(),
csharp_utils=dict(),
csharp_utils_module=list(), # csharp_utils only required by a module
module_args=module_args,
actions=[module_wrapper],
environment=environment,
encoded_output=False,
)
finder.scan_exec_script(module_wrapper)
if async_timeout > 0:
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('async_watchdog')
finder.scan_exec_script('async_wrapper')
exec_manifest["actions"].insert(0, 'async_watchdog')
exec_manifest["actions"].insert(0, 'async_wrapper')
exec_manifest["async_jid"] = str(random.randint(0, 999999999999))
exec_manifest["async_timeout_sec"] = async_timeout
exec_manifest["async_named_pipe_timeout"] = C.DEFAULT_WIN_ASYNC_STARTUP_TIMEOUT
if become and become_method == 'runas':
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('become_wrapper')
exec_manifest["actions"].insert(0, 'become_wrapper')
exec_manifest["become_user"] = become_user
exec_manifest["become_password"] = become_password
exec_manifest['become_flags'] = become_flags
exec_manifest['min_ps_version'] = finder.ps_version
exec_manifest['min_os_version'] = finder.os_version
if finder.become and 'become_wrapper' not in exec_manifest['actions']:
finder.scan_exec_script('exec_wrapper')
finder.scan_exec_script('become_wrapper')
exec_manifest['actions'].insert(0, 'become_wrapper')
exec_manifest['become_user'] = 'SYSTEM'
exec_manifest['become_password'] = None
exec_manifest['become_flags'] = None
coverage_manifest = dict(
module_path=module_path,
module_util_paths=dict(),
output=None,
)
coverage_output = C.config.get_config_value('COVERAGE_REMOTE_OUTPUT', variables=task_vars)
if coverage_output and substyle == 'powershell':
finder.scan_exec_script('coverage_wrapper')
coverage_manifest['output'] = coverage_output
coverage_whitelist = C.config.get_config_value('COVERAGE_REMOTE_WHITELIST', variables=task_vars)
coverage_manifest['whitelist'] = coverage_whitelist
# make sure Ansible.ModuleUtils.AddType is added if any C# utils are used
if len(finder.cs_utils_wrapper) > 0 or len(finder.cs_utils_module) > 0:
finder._add_module((b"Ansible.ModuleUtils.AddType", ".psm1"),
wrapper=False)
# exec_wrapper is only required to be part of the payload if using
# become or async, to save on payload space we check if exec_wrapper has
# already been added, and remove it manually if it hasn't later
exec_required = "exec_wrapper" in finder.exec_scripts.keys()
finder.scan_exec_script("exec_wrapper")
# must contain an empty newline so it runs the begin/process/end block
finder.exec_scripts["exec_wrapper"] += b"\n\n"
exec_wrapper = finder.exec_scripts["exec_wrapper"]
if not exec_required:
finder.exec_scripts.pop("exec_wrapper")
for name, data in finder.exec_scripts.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest[name] = b64_data
for name, data in finder.ps_modules.items():
b64_data = to_text(base64.b64encode(data['data']))
exec_manifest['powershell_modules'][name] = b64_data
coverage_manifest['module_util_paths'][name] = data['path']
cs_utils = {}
for cs_util in [finder.cs_utils_wrapper, finder.cs_utils_module]:
for name, data in cs_util.items():
cs_utils[name] = data['data']
for name, data in cs_utils.items():
b64_data = to_text(base64.b64encode(data))
exec_manifest['csharp_utils'][name] = b64_data
exec_manifest['csharp_utils_module'] = list(finder.cs_utils_module.keys())
# To save on the data we are sending across we only add the coverage info if coverage is being run
if 'coverage_wrapper' in exec_manifest:
exec_manifest['coverage'] = coverage_manifest
b_json = to_bytes(json.dumps(exec_manifest))
# delimit the payload JSON from the wrapper to keep sensitive contents out of scriptblocks (which can be logged)
b_data = exec_wrapper + b'\0\0\0\0' + b_json
return b_data
|
32,362 | def set_password_not_expire(default_base_dn):
args = demisto.args()
sam_account_name = args.get('username')
pwd_n_exp = args.get('value')
# query by sAMAccountName
if sam_account_name or args.get('sAMAccountName'):
if sam_account_name:
username = escape_filter_chars(sam_account_name)
else:
username = escape_filter_chars(args['sAMAccountName'])
query = "(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))".format(username)
entries = search_with_paging(query, default_base_dn, attributes='userAccountControl', size_limit=0, time_limit=0)
user = entries.get('flat')[0]
user_account_control = user.get('userAccountControl')[0]
# Check if UAC flag for "Password Never Expire" (0x10000) is set to True or False:
if pwd_n_exp == 'true':
# Sets the bit 16 to 1
user_account_control |= 1 << 16
content_output = f"AD account {username} has set \"password never expire\" attribute. Value is set to True"
else:
# Clears the bit 16 to 0
user_account_control &= ~(1 << 16)
content_output = f"AD account {username} has cleared \"password never expire\" attribute. Value is set to False"
attribute_name = 'userAccountControl'
attribute_value = user_account_control
dn = user_dn(sam_account_name, default_base_dn)
modification = {}
modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)]
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': content_output
}
demisto.results(demisto_entry)
| def set_password_not_expire(default_base_dn):
args = demisto.args()
sam_account_name = args.get('username')
pwd_n_exp = arg_to_boolean(args.get('value'))
# query by sAMAccountName
if sam_account_name or args.get('sAMAccountName'):
if sam_account_name:
username = escape_filter_chars(sam_account_name)
else:
username = escape_filter_chars(args['sAMAccountName'])
query = "(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))".format(username)
entries = search_with_paging(query, default_base_dn, attributes='userAccountControl', size_limit=0, time_limit=0)
user = entries.get('flat')[0]
user_account_control = user.get('userAccountControl')[0]
# Check if UAC flag for "Password Never Expire" (0x10000) is set to True or False:
if pwd_n_exp == 'true':
# Sets the bit 16 to 1
user_account_control |= 1 << 16
content_output = f"AD account {username} has set \"password never expire\" attribute. Value is set to True"
else:
# Clears the bit 16 to 0
user_account_control &= ~(1 << 16)
content_output = f"AD account {username} has cleared \"password never expire\" attribute. Value is set to False"
attribute_name = 'userAccountControl'
attribute_value = user_account_control
dn = user_dn(sam_account_name, default_base_dn)
modification = {}
modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)]
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': content_output
}
demisto.results(demisto_entry)
|
39,406 | def test_squeeze(pv_ndarray_1d):
reshaped_pvarr = pv_ndarray_1d.reshape((3, 1))
assert np.allclose(reshaped_pvarr.squeeze(), np.array(reshaped_pvarr.squeeze()))
| def test_squeeze(pv_ndarray_1d):
reshaped_pvarr = pv_ndarray_1d.reshape((3, 1))
assert np.array_equal(reshaped_pvarr.squeeze(), np.array(reshaped_pvarr.squeeze()))
|
39,933 | def test_token_deployer_and_agent(testerchain, deployment_progress, test_registry):
origin = testerchain.etherbase_account
# Trying to get token from blockchain it's been published fails
with pytest.raises(BaseContractRegistry.UnknownContract):
NucypherTokenAgent(registry=test_registry)
# The big day...
deployer = NucypherTokenDeployer(registry=test_registry, deployer_address=origin)
deployment_receipts = deployer.deploy(progress=deployment_progress)
for title, receipt in deployment_receipts.items():
assert receipt['status'] == 1
# deployment steps must match expected number of steps
assert deployment_progress.num_steps == len(deployer.deployment_steps) == 1
# Create a token instance
token_agent = deployer.make_agent()
token_contract = token_agent.contract
expected_token_supply = token_contract.functions.totalSupply().call()
assert expected_token_supply == token_agent.contract.functions.totalSupply().call()
# Retrieve the token from the blockchain
same_token_agent = NucypherTokenAgent(registry=test_registry)
# Compare the contract address for equality
assert token_agent.contract_address == same_token_agent.contract_address
assert token_agent == same_token_agent # __eq__
| def test_token_deployer_and_agent(testerchain, deployment_progress, test_registry):
origin = testerchain.etherbase_account
# Trying to get token from blockchain before it's been published should fail
with pytest.raises(BaseContractRegistry.UnknownContract):
NucypherTokenAgent(registry=test_registry)
# The big day...
deployer = NucypherTokenDeployer(registry=test_registry, deployer_address=origin)
deployment_receipts = deployer.deploy(progress=deployment_progress)
for title, receipt in deployment_receipts.items():
assert receipt['status'] == 1
# deployment steps must match expected number of steps
assert deployment_progress.num_steps == len(deployer.deployment_steps) == 1
# Create a token instance
token_agent = deployer.make_agent()
token_contract = token_agent.contract
expected_token_supply = token_contract.functions.totalSupply().call()
assert expected_token_supply == token_agent.contract.functions.totalSupply().call()
# Retrieve the token from the blockchain
same_token_agent = NucypherTokenAgent(registry=test_registry)
# Compare the contract address for equality
assert token_agent.contract_address == same_token_agent.contract_address
assert token_agent == same_token_agent # __eq__
|
54,151 | def input_output_list_converter(object_list):
"""Converts a list of ProcessingInput or ProcessingOutput objects to a list of dicts
Args:
object_list (list[ProcessingInput or ProcessingOutput]
Returns:
List of dicts
"""
if object_list is not None:
dict_list = [obj._to_request_dict() for obj in object_list]
else:
dict_list = object_list
return dict_list
| def input_output_list_converter(object_list):
"""Converts a list of ProcessingInput or ProcessingOutput objects to a list of dicts
Args:
object_list (list[ProcessingInput or ProcessingOutput]
Returns:
List of dicts
"""
if object_list:
return [obj._to_request_dict() for obj in object_list]
return object_list
|
58,145 | def add_custom_ip_feeds(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Add a list of banned IPs to be blocked by the system.
Implement the command 'prisma-cloud-compute-custom-feeds-ip-add'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-custom-feeds-ip-add command arguments.
Returns:
CommandResults: command-results object.
"""
# the api overrides the blacklisted IPs, therefore it is necessary to add those who exist to the 'PUT' request.
current_ip_feeds = client.get_custom_ip_feeds()
if current_ip_feeds:
current_ip_feeds = current_ip_feeds.get("feed", [])
else:
current_ip_feeds = []
new_ip_feeds = argToList(arg=args.pop("ip"))
# remove duplicates, the api doesn't give error on duplicate IPs
combined_feeds = list(set(current_ip_feeds + new_ip_feeds))
client.add_custom_ip_feeds(feeds=combined_feeds)
return CommandResults(readable_output="Successfully updated the custom IP feeds")
| def add_custom_ip_feeds(client: PrismaCloudComputeClient, args: dict) -> CommandResults:
"""
Add a list of banned IPs to be blocked by the system.
Implement the command 'prisma-cloud-compute-custom-feeds-ip-add'
Args:
client (PrismaCloudComputeClient): prisma-cloud-compute client.
args (dict): prisma-cloud-compute-custom-feeds-ip-add command arguments.
Returns:
CommandResults: command-results object.
"""
# the api overrides the blacklisted IPs, therefore it is necessary to add those who exist to the 'PUT' request.
# if there aren't any IP feeds in the environment, the api returns None
feeds = (client.get_custom_ip_feeds() or {}).get('feed') or []
new_ip_feeds = argToList(arg=args.pop("ip"))
# remove duplicates, the api doesn't give error on duplicate IPs
combined_feeds = list(set(current_ip_feeds + new_ip_feeds))
client.add_custom_ip_feeds(feeds=combined_feeds)
return CommandResults(readable_output="Successfully updated the custom IP feeds")
|
22,061 | def androsign_main(args_apk, args_hash, args_all, show):
from androguard.core.bytecodes.apk import APK
from androguard.util import get_certificate_name_string
import hashlib
import binascii
import traceback
from colorama import Fore, Style
from asn1crypto import x509, keys
from oscrypto import asymmetric
# Keep the list of hash functions in sync with cli/entry_points.py:sign
hashfunctions = dict(md5=hashlib.md5,
sha1=hashlib.sha1,
sha256=hashlib.sha256,
sha512=hashlib.sha512,
)
if args_hash.lower() not in hashfunctions:
print("Hash function {} not supported!"
.format(args_hash.lower()), file=sys.stderr)
print("Use one of {}"
.format(", ".join(hashfunctions.keys())), file=sys.stderr)
sys.exit(1)
for path in args_apk:
try:
a = APK(path)
print("{}, package: '{}'".format(os.path.basename(path), a.get_package()))
print("Is signed v1: {}".format(a.is_signed_v1()))
print("Is signed v2: {}".format(a.is_signed_v2()))
print("Is signed v3: {}".format(a.is_signed_v3()))
certs = set(a.get_certificates_der_v3() + a.get_certificates_der_v2() + [a.get_certificate_der(x) for x in a.get_signature_names()])
pkeys = set(a.get_public_keys_der_v3() + a.get_public_keys_der_v2())
if len(certs) > 0:
print("Found {} unique certificates".format(len(certs)))
for cert in certs:
if show:
x509_cert = x509.Certificate.load(cert)
print("Issuer:", get_certificate_name_string(x509_cert.issuer, short=True))
print("Subject:", get_certificate_name_string(x509_cert.subject, short=True))
print("Serial Number:", hex(x509_cert.serial_number))
print("Hash Algorithm:", x509_cert.hash_algo)
print("Signature Algorithm:", x509_cert.signature_algo)
print("Valid not before:", x509_cert['tbs_certificate']['validity']['not_before'].native)
print("Valid not after:", x509_cert['tbs_certificate']['validity']['not_after'].native)
if not args_all:
print("{} {}".format(args_hash.lower(), hashfunctions[args_hash.lower()](cert).hexdigest()))
else:
for k, v in hashfunctions.items():
print("{} {}".format(k, v(cert).hexdigest()))
print()
if len(certs) > 0:
print("Found {} unique public keys associated with the certs".format(len(pkeys)))
for public_key in pkeys:
if show:
x509_public_key = asymmetric.load_public_key(public_key)
print("PublicKey Algorithm:", x509_public_key.algorithm)
print("Bit Size:", x509_public_key.bit_size)
print("Fingerprint:", binascii.hexlify(x509_public_key.fingerprint))
try:
print("Hash Algorithm:", hash_algo(x509_public_key))
except ValueError as ve:
# RSA pkey does not have an hash algorithm
pass
print()
except:
print(Fore.RED + "Error in {}".format(os.path.basename(path)) + Style.RESET_ALL, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
if len(args_apk) > 1:
print()
| def androsign_main(args_apk, args_hash, args_all, show):
from androguard.core.bytecodes.apk import APK
from androguard.util import get_certificate_name_string
import hashlib
import binascii
import traceback
from colorama import Fore, Style
from asn1crypto import x509, keys
from oscrypto import asymmetric
# Keep the list of hash functions in sync with cli/entry_points.py:sign
hashfunctions = dict(md5=hashlib.md5,
sha1=hashlib.sha1,
sha256=hashlib.sha256,
sha512=hashlib.sha512,
)
if args_hash.lower() not in hashfunctions:
print("Hash function {} not supported!"
.format(args_hash.lower()), file=sys.stderr)
print("Use one of {}"
.format(", ".join(hashfunctions.keys())), file=sys.stderr)
sys.exit(1)
for path in args_apk:
try:
a = APK(path)
print("{}, package: '{}'".format(os.path.basename(path), a.get_package()))
print("Is signed v1: {}".format(a.is_signed_v1()))
print("Is signed v2: {}".format(a.is_signed_v2()))
print("Is signed v3: {}".format(a.is_signed_v3()))
certs = set(a.get_certificates_der_v3() + a.get_certificates_der_v2() + [a.get_certificate_der(x) for x in a.get_signature_names()])
pkeys = set(a.get_public_keys_der_v3() + a.get_public_keys_der_v2())
if len(certs) > 0:
print("Found {} unique certificates".format(len(certs)))
for cert in certs:
if show:
x509_cert = x509.Certificate.load(cert)
print("Issuer:", get_certificate_name_string(x509_cert.issuer, short=True))
print("Subject:", get_certificate_name_string(x509_cert.subject, short=True))
print("Serial Number:", hex(x509_cert.serial_number))
print("Hash Algorithm:", x509_cert.hash_algo)
print("Signature Algorithm:", x509_cert.signature_algo)
print("Valid not before:", x509_cert['tbs_certificate']['validity']['not_before'].native)
print("Valid not after:", x509_cert['tbs_certificate']['validity']['not_after'].native)
if not args_all:
print("{} {}".format(args_hash.lower(), hashfunctions[args_hash.lower()](cert).hexdigest()))
else:
for k, v in hashfunctions.items():
print("{} {}".format(k, v(cert).hexdigest()))
print()
if len(certs) > 0:
print("Found {} unique public keys associated with the certs".format(len(pkeys)))
for public_key in pkeys:
if show:
x509_public_key = asymmetric.load_public_key(public_key)
print("PublicKey Algorithm:", x509_public_key.algorithm)
print("Bit Size:", x509_public_key.bit_size)
print("Fingerprint:", binascii.hexlify(x509_public_key.fingerprint).decode())
try:
print("Hash Algorithm:", hash_algo(x509_public_key))
except ValueError as ve:
# RSA pkey does not have an hash algorithm
pass
print()
except:
print(Fore.RED + "Error in {}".format(os.path.basename(path)) + Style.RESET_ALL, file=sys.stderr)
traceback.print_exc(file=sys.stderr)
if len(args_apk) > 1:
print()
|
20,758 | def trim_lines(in_obj, out_obj):
for line in in_obj:
line = trim_line(line)
if line:
out_obj.write(line + "\n")
| def trim_lines(in_obj: TextIO, out_obj: TextIO) -> None:
for line in in_obj:
line = trim_line(line)
if line:
out_obj.write(line + "\n")
|
53,084 | def _descriptionToDisplay(
description: Optional[str],
descriptionFrom: Optional[DescriptionFrom],
) -> Optional[str]:
""" Determines if the given description should be reported. Returns None if description should not be
reportedm otherwise returns the description to report.
"""
if description and (
config.conf["presentation"]["reportObjectDescriptions"]
or (
config.conf["annotations"]["reportAriaDescription"]
and descriptionFrom == DescriptionFrom.ARIA_DESCRIPTION
)
):
return description
| def _descriptionToDisplay(
description: Optional[str],
descriptionFrom: Optional[DescriptionFrom],
) -> Optional[str]:
""" Determines if the given description should be reported. Returns None if description should not be
reported, otherwise returns the description to report.
"""
if description and (
config.conf["presentation"]["reportObjectDescriptions"]
or (
config.conf["annotations"]["reportAriaDescription"]
and descriptionFrom == DescriptionFrom.ARIA_DESCRIPTION
)
):
return description
|
14,056 | def plot_dataframe(
df,
column=None,
cmap=None,
color=None,
ax=None,
cax=None,
categorical=False,
legend=False,
scheme=None,
k=5,
vmin=None,
vmax=None,
markersize=None,
figsize=None,
legend_kwds=None,
categories=None,
classification_kwds=None,
missing_kwds=None,
aspect="auto",
**style_kwds,
):
"""
Plot a GeoDataFrame.
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column.
Parameters
----------
column : str, np.array, pd.Series (default None)
The name of the dataframe column, np.array, or pd.Series to be plotted.
If np.array or pd.Series are used then it must have same length as
dataframe. Values are used to color the plot. Ignored if `color` is
also set.
kind: str
The kind of plots to produce:
- 'geo': Map (default)
Pandas Kinds
- 'line' : line plot
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : BoxPlot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
- 'hexbin' : hexbin plot.
cmap : str (default None)
The name of a colormap recognized by matplotlib.
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
cax : matplotlib.pyplot Artist (default None)
axes on which to draw the legend in case of color map.
categorical : bool (default False)
If False, cmap will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default False)
Plot a legend. Ignored if no `column` is given, or if `color` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires mapclassify).
A mapclassify.MapClassifier object will be used
under the hood. Supported are all schemes provided by mapclassify (e.g.
'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled',
'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced',
'JenksCaspallSampled', 'MaxP', 'MaximumBreaks',
'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean',
'UserDefined'). Arguments can be passed in classification_kwds.
k : int (default 5)
Number of classes (ignored if scheme is None)
vmin : None or float (default None)
Minimum value of cmap. If None, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of cmap. If None, the maximum data value
in the column to be plotted is used.
markersize : str or float or sequence (default None)
Only applies to point geometries within a frame.
If a str, will use the values in the column of the frame specified
by markersize to set the size of markers. Otherwise can be a value
to apply to all points, or a sequence of the same length as the
number of points.
figsize : tuple of integers (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
axes is given explicitly, figsize is ignored.
legend_kwds : dict (default None)
Keyword arguments to pass to matplotlib.pyplot.legend() or
matplotlib.pyplot.colorbar().
Additional accepted keywords when `scheme` is specified:
fmt : string
A formatting specification for the bin edges of the classes in the
legend. For example, to have no decimals: ``{"fmt": "{:.0f}"}``.
labels : list-like
A list of legend labels to override the auto-generated labels.
Needs to have the same number of elements as the number of
classes (`k`).
interval : boolean (default False)
An option to control brackets from mapclassify legend.
If True, open/closed interval brackets are shown in the legend.
categories : list-like
Ordered list-like object of categories to be used for categorical plot.
classification_kwds : dict (default None)
Keyword arguments to pass to mapclassify
missing_kwds : dict (default None)
Keyword arguments specifying color options (as style_kwds)
to be passed on to geometries with missing values in addition to
or overwriting other style kwds. If None, geometries with missing
values are not plotted.
aspect : 'auto', 'equal', None or float (default 'auto')
Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if
however data are not projected (coordinates are long/lat), the aspect is by
default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of
the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat
square appears square in the middle of the plot. This implies an
Equirectangular projection. If None, the aspect of `ax` won't be changed. It can
also be set manually (float) as the ratio of y-unit to x-unit.
**style_kwds : dict
Style options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
Examples
--------
>>> df = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
>>> df.head() # doctest: +SKIP
pop_est continent name iso_a3 \
gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
2 603253 Africa W. Sahara ESH 906.5 POLYGON (\
(-8.66559 27.65643, -8.66512 27.58948...
3 35623680 North America Canada CAN 1674000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -122.9742...
4 326625791 North America United States of America USA 18560000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -120.0000...
>>> df.plot("pop_est", cmap="Blues") # doctest: +SKIP
See the User Guide page :doc:`../../user_guide/mapping` for details.
"""
if "colormap" in style_kwds:
warnings.warn(
"'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)",
FutureWarning,
)
cmap = style_kwds.pop("colormap")
if "axes" in style_kwds:
warnings.warn(
"'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)",
FutureWarning,
)
ax = style_kwds.pop("axes")
if column is not None and color is not None:
warnings.warn(
"Only specify one of 'column' or 'color'. Using 'color'.", UserWarning
)
column = None
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"The matplotlib package is required for plotting in geopandas. "
"You can install it using 'conda install -c conda-forge matplotlib' or "
"'pip install matplotlib'."
)
if ax is None:
if cax is not None:
raise ValueError("'ax' can not be None if 'cax' is not.")
fig, ax = plt.subplots(figsize=figsize)
if aspect == "auto":
if df.crs and df.crs.is_geographic:
bounds = df.total_bounds
y_coord = np.mean([bounds[1], bounds[3]])
ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))
# formula ported from R package sp
# https://github.com/edzer/sp/blob/master/R/mapasp.R
else:
ax.set_aspect("equal")
elif aspect is not None:
ax.set_aspect(aspect)
# GH 1555
# if legend_kwds set, copy so we don't update it in place
if legend_kwds is not None:
legend_kwds = legend_kwds.copy()
if df.empty:
warnings.warn(
"The GeoDataFrame you are attempting to plot is "
"empty. Nothing has been displayed.",
UserWarning,
)
return ax
if isinstance(markersize, str):
markersize = df[markersize].values
if column is None:
return plot_series(
df.geometry,
cmap=cmap,
color=color,
ax=ax,
figsize=figsize,
markersize=markersize,
aspect=aspect,
**style_kwds,
)
# To accept pd.Series and np.arrays as column
if isinstance(column, (np.ndarray, pd.Series)):
if column.shape[0] != df.shape[0]:
raise ValueError(
"The dataframe and given column have different number of rows."
)
else:
values = column
# Make sure index of a Series matches index of df
if isinstance(values, pd.Series):
values = values.reindex(df.index)
else:
values = df[column]
if pd.api.types.is_categorical_dtype(values.dtype):
if categories is not None:
raise ValueError(
"Cannot specify 'categories' when column has categorical dtype"
)
categorical = True
elif values.dtype is np.dtype("O") or categories:
categorical = True
nan_idx = np.asarray(pd.isna(values), dtype="bool")
# Define `values` as a Series
if categorical:
if cmap is None:
cmap = "tab10"
cat = pd.Categorical(values, categories=categories)
categories = list(cat.categories)
# values missing in the Categorical but not in original values
missing = list(np.unique(values[~nan_idx & cat.isna()]))
if missing:
raise ValueError(
"Column contains values not listed in categories. "
"Missing categories: {}.".format(missing)
)
values = cat.codes[~nan_idx]
vmin = 0 if vmin is None else vmin
vmax = len(categories) - 1 if vmax is None else vmax
if scheme is not None:
mc_err = "The 'mapclassify' >= 2.4.0 is required to use the 'scheme' keyword."
try:
import mapclassify
except ImportError:
raise ImportError(mc_err)
if mapclassify.__version__ < LooseVersion("2.4.0"):
raise ImportError(mc_err)
if classification_kwds is None:
classification_kwds = {}
if "k" not in classification_kwds:
classification_kwds["k"] = k
binning = mapclassify.classify(
np.asarray(values[~nan_idx]), scheme, **classification_kwds
)
# set categorical to True for creating the legend
categorical = True
if legend_kwds is not None and "labels" in legend_kwds:
if len(legend_kwds["labels"]) != binning.k:
raise ValueError(
"Number of labels must match number of bins, "
"received {} labels for {} bins".format(
len(legend_kwds["labels"]), binning.k
)
)
else:
categories = list(legend_kwds.pop("labels"))
else:
fmt = "{:.2f}"
if legend_kwds is not None and "fmt" in legend_kwds:
fmt = legend_kwds.pop("fmt")
categories = binning.get_legend_classes(fmt)
if legend_kwds is not None:
show_interval = legend_kwds.pop("interval", False)
else:
show_interval = False
if not show_interval:
categories = [c[1:-1] for c in categories]
values = np.array(binning.yb)
# fill values with placeholder where were NaNs originally to map them properly
# (after removing them in categorical or scheme)
if categorical:
for n in np.where(nan_idx)[0]:
values = np.insert(values, n, values[0])
mn = values[~np.isnan(values)].min() if vmin is None else vmin
mx = values[~np.isnan(values)].max() if vmax is None else vmax
# decompose GeometryCollections
geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix="Geom")
values = np.take(values, multiindex, axis=0)
nan_idx = np.take(nan_idx, multiindex, axis=0)
expl_series = geopandas.GeoSeries(geoms)
geom_types = expl_series.type
poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
line_idx = np.asarray(
(geom_types == "LineString")
| (geom_types == "MultiLineString")
| (geom_types == "LinearRing")
)
point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
# plot all Polygons and all MultiPolygon components in the same collection
polys = expl_series[poly_idx & np.invert(nan_idx)]
subset = values[poly_idx & np.invert(nan_idx)]
if not polys.empty:
_plot_polygon_collection(
ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds
)
# plot all LineStrings and MultiLineString components in same collection
lines = expl_series[line_idx & np.invert(nan_idx)]
subset = values[line_idx & np.invert(nan_idx)]
if not lines.empty:
_plot_linestring_collection(
ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds
)
# plot all Points in the same collection
points = expl_series[point_idx & np.invert(nan_idx)]
subset = values[point_idx & np.invert(nan_idx)]
if not points.empty:
if isinstance(markersize, np.ndarray):
markersize = np.take(markersize, multiindex, axis=0)
markersize = markersize[point_idx & np.invert(nan_idx)]
_plot_point_collection(
ax,
points,
subset,
vmin=mn,
vmax=mx,
markersize=markersize,
cmap=cmap,
**style_kwds,
)
if missing_kwds is not None and not expl_series[nan_idx].empty:
if color:
if "color" not in missing_kwds:
missing_kwds["color"] = color
merged_kwds = style_kwds.copy()
merged_kwds.update(missing_kwds)
plot_series(expl_series[nan_idx], ax=ax, **merged_kwds)
if legend and not color:
if legend_kwds is None:
legend_kwds = {}
if "fmt" in legend_kwds:
legend_kwds.pop("fmt")
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
norm = style_kwds.get("norm", None)
if not norm:
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(
Line2D(
[0],
[0],
linestyle="none",
marker="o",
alpha=style_kwds.get("alpha", 1),
markersize=10,
markerfacecolor=n_cmap.to_rgba(value),
markeredgewidth=0,
)
)
if missing_kwds is not None:
if "color" in merged_kwds:
merged_kwds["facecolor"] = merged_kwds["color"]
patches.append(
Line2D(
[0],
[0],
linestyle="none",
marker="o",
alpha=merged_kwds.get("alpha", 1),
markersize=10,
markerfacecolor=merged_kwds.get("facecolor", None),
markeredgecolor=merged_kwds.get("edgecolor", None),
markeredgewidth=merged_kwds.get(
"linewidth", 1 if merged_kwds.get("edgecolor", False) else 0
),
)
)
categories.append(merged_kwds.get("label", "NaN"))
legend_kwds.setdefault("numpoints", 1)
legend_kwds.setdefault("loc", "best")
ax.legend(patches, categories, **legend_kwds)
else:
if cax is not None:
legend_kwds.setdefault("cax", cax)
else:
legend_kwds.setdefault("ax", ax)
n_cmap.set_array([])
ax.get_figure().colorbar(n_cmap, **legend_kwds)
plt.draw()
return ax
| def plot_dataframe(
df,
column=None,
cmap=None,
color=None,
ax=None,
cax=None,
categorical=False,
legend=False,
scheme=None,
k=5,
vmin=None,
vmax=None,
markersize=None,
figsize=None,
legend_kwds=None,
categories=None,
classification_kwds=None,
missing_kwds=None,
aspect="auto",
**style_kwds,
):
"""
Plot a GeoDataFrame.
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column.
Parameters
----------
column : str, np.array, pd.Series (default None)
The name of the dataframe column, np.array, or pd.Series to be plotted.
If np.array or pd.Series are used then it must have same length as
dataframe. Values are used to color the plot. Ignored if `color` is
also set.
kind: str
The kind of plots to produce:
- 'geo': Map (default)
Pandas Kinds
- 'line' : line plot
- 'bar' : vertical bar plot
- 'barh' : horizontal bar plot
- 'hist' : histogram
- 'box' : BoxPlot
- 'kde' : Kernel Density Estimation plot
- 'density' : same as 'kde'
- 'area' : area plot
- 'pie' : pie plot
- 'scatter' : scatter plot
- 'hexbin' : hexbin plot.
cmap : str (default None)
The name of a colormap recognized by matplotlib.
color : str (default None)
If specified, all objects will be colored uniformly.
ax : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
cax : matplotlib.pyplot Artist (default None)
axes on which to draw the legend in case of color map.
categorical : bool (default False)
If False, cmap will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default False)
Plot a legend. Ignored if no `column` is given, or if `color` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires mapclassify).
A mapclassify.MapClassifier object will be used
under the hood. Supported are all schemes provided by mapclassify (e.g.
'BoxPlot', 'EqualInterval', 'FisherJenks', 'FisherJenksSampled',
'HeadTailBreaks', 'JenksCaspall', 'JenksCaspallForced',
'JenksCaspallSampled', 'MaxP', 'MaximumBreaks',
'NaturalBreaks', 'Quantiles', 'Percentiles', 'StdMean',
'UserDefined'). Arguments can be passed in classification_kwds.
k : int (default 5)
Number of classes (ignored if scheme is None)
vmin : None or float (default None)
Minimum value of cmap. If None, the minimum data value
in the column to be plotted is used.
vmax : None or float (default None)
Maximum value of cmap. If None, the maximum data value
in the column to be plotted is used.
markersize : str or float or sequence (default None)
Only applies to point geometries within a frame.
If a str, will use the values in the column of the frame specified
by markersize to set the size of markers. Otherwise can be a value
to apply to all points, or a sequence of the same length as the
number of points.
figsize : tuple of integers (default None)
Size of the resulting matplotlib.figure.Figure. If the argument
axes is given explicitly, figsize is ignored.
legend_kwds : dict (default None)
Keyword arguments to pass to matplotlib.pyplot.legend() or
matplotlib.pyplot.colorbar().
Additional accepted keywords when `scheme` is specified:
fmt : string
A formatting specification for the bin edges of the classes in the
legend. For example, to have no decimals: ``{"fmt": "{:.0f}"}``.
labels : list-like
A list of legend labels to override the auto-generated labels.
Needs to have the same number of elements as the number of
classes (`k`).
interval : boolean (default False)
An option to control brackets from mapclassify legend.
If True, open/closed interval brackets are shown in the legend.
categories : list-like
Ordered list-like object of categories to be used for categorical plot.
classification_kwds : dict (default None)
Keyword arguments to pass to mapclassify
missing_kwds : dict (default None)
Keyword arguments specifying color options (as style_kwds)
to be passed on to geometries with missing values in addition to
or overwriting other style kwds. If None, geometries with missing
values are not plotted.
aspect : 'auto', 'equal', None or float (default 'auto')
Set aspect of axis. If 'auto', the default aspect for map plots is 'equal'; if
however data are not projected (coordinates are long/lat), the aspect is by
default set to 1/cos(df_y * pi/180) with df_y the y coordinate of the middle of
the GeoDataFrame (the mean of the y range of bounding box) so that a long/lat
square appears square in the middle of the plot. This implies an
Equirectangular projection. If None, the aspect of `ax` won't be changed. It can
also be set manually (float) as the ratio of y-unit to x-unit.
**style_kwds : dict
Style options to be passed on to the actual plot function, such
as ``edgecolor``, ``facecolor``, ``linewidth``, ``markersize``,
``alpha``.
Returns
-------
ax : matplotlib axes instance
Examples
--------
>>> df = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
>>> df.head() # doctest: +SKIP
pop_est continent name iso_a3 \
gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
2 603253 Africa W. Sahara ESH 906.5 POLYGON (\
(-8.66559 27.65643, -8.66512 27.58948...
3 35623680 North America Canada CAN 1674000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -122.9742...
4 326625791 North America United States of America USA 18560000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -120.0000...
>>> df.plot("pop_est", cmap="Blues") # doctest: +SKIP
See the User Guide page :doc:`../../user_guide/mapping` for details.
"""
if "colormap" in style_kwds:
warnings.warn(
"'colormap' is deprecated, please use 'cmap' instead "
"(for consistency with matplotlib)",
FutureWarning,
)
cmap = style_kwds.pop("colormap")
if "axes" in style_kwds:
warnings.warn(
"'axes' is deprecated, please use 'ax' instead "
"(for consistency with pandas)",
FutureWarning,
)
ax = style_kwds.pop("axes")
if column is not None and color is not None:
warnings.warn(
"Only specify one of 'column' or 'color'. Using 'color'.", UserWarning
)
column = None
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError(
"The matplotlib package is required for plotting in geopandas. "
"You can install it using 'conda install -c conda-forge matplotlib' or "
"'pip install matplotlib'."
)
if ax is None:
if cax is not None:
raise ValueError("'ax' can not be None if 'cax' is not.")
fig, ax = plt.subplots(figsize=figsize)
if aspect == "auto":
if df.crs and df.crs.is_geographic:
bounds = df.total_bounds
y_coord = np.mean([bounds[1], bounds[3]])
ax.set_aspect(1 / np.cos(y_coord * np.pi / 180))
# formula ported from R package sp
# https://github.com/edzer/sp/blob/master/R/mapasp.R
else:
ax.set_aspect("equal")
elif aspect is not None:
ax.set_aspect(aspect)
# GH 1555
# if legend_kwds set, copy so we don't update it in place
if legend_kwds is not None:
legend_kwds = legend_kwds.copy()
if df.empty:
warnings.warn(
"The GeoDataFrame you are attempting to plot is "
"empty. Nothing has been displayed.",
UserWarning,
)
return ax
if isinstance(markersize, str):
markersize = df[markersize].values
if column is None:
return plot_series(
df.geometry,
cmap=cmap,
color=color,
ax=ax,
figsize=figsize,
markersize=markersize,
aspect=aspect,
**style_kwds,
)
# To accept pd.Series and np.arrays as column
if isinstance(column, (np.ndarray, pd.Series)):
if column.shape[0] != df.shape[0]:
raise ValueError(
"The dataframe and given column have different number of rows."
)
else:
values = column
# Make sure index of a Series matches index of df
if isinstance(values, pd.Series):
values = values.reindex(df.index)
else:
values = df[column]
if pd.api.types.is_categorical_dtype(values.dtype):
if categories is not None:
raise ValueError(
"Cannot specify 'categories' when column has categorical dtype"
)
categorical = True
elif values.dtype is np.dtype("O") or categories:
categorical = True
nan_idx = np.asarray(pd.isna(values), dtype="bool")
# Define `values` as a Series
if categorical:
if cmap is None:
cmap = "tab10"
cat = pd.Categorical(values, categories=categories)
categories = list(cat.categories)
# values missing in the Categorical but not in original values
missing = list(np.unique(values[~nan_idx & cat.isna()]))
if missing:
raise ValueError(
"Column contains values not listed in categories. "
"Missing categories: {}.".format(missing)
)
values = cat.codes[~nan_idx]
vmin = 0 if vmin is None else vmin
vmax = len(categories) - 1 if vmax is None else vmax
if scheme is not None:
mc_err = "The 'mapclassify' package (>= 2.4.0) is required to use the 'scheme' keyword."
try:
import mapclassify
except ImportError:
raise ImportError(mc_err)
if mapclassify.__version__ < LooseVersion("2.4.0"):
raise ImportError(mc_err)
if classification_kwds is None:
classification_kwds = {}
if "k" not in classification_kwds:
classification_kwds["k"] = k
binning = mapclassify.classify(
np.asarray(values[~nan_idx]), scheme, **classification_kwds
)
# set categorical to True for creating the legend
categorical = True
if legend_kwds is not None and "labels" in legend_kwds:
if len(legend_kwds["labels"]) != binning.k:
raise ValueError(
"Number of labels must match number of bins, "
"received {} labels for {} bins".format(
len(legend_kwds["labels"]), binning.k
)
)
else:
categories = list(legend_kwds.pop("labels"))
else:
fmt = "{:.2f}"
if legend_kwds is not None and "fmt" in legend_kwds:
fmt = legend_kwds.pop("fmt")
categories = binning.get_legend_classes(fmt)
if legend_kwds is not None:
show_interval = legend_kwds.pop("interval", False)
else:
show_interval = False
if not show_interval:
categories = [c[1:-1] for c in categories]
values = np.array(binning.yb)
# fill values with placeholder where were NaNs originally to map them properly
# (after removing them in categorical or scheme)
if categorical:
for n in np.where(nan_idx)[0]:
values = np.insert(values, n, values[0])
mn = values[~np.isnan(values)].min() if vmin is None else vmin
mx = values[~np.isnan(values)].max() if vmax is None else vmax
# decompose GeometryCollections
geoms, multiindex = _flatten_multi_geoms(df.geometry, prefix="Geom")
values = np.take(values, multiindex, axis=0)
nan_idx = np.take(nan_idx, multiindex, axis=0)
expl_series = geopandas.GeoSeries(geoms)
geom_types = expl_series.type
poly_idx = np.asarray((geom_types == "Polygon") | (geom_types == "MultiPolygon"))
line_idx = np.asarray(
(geom_types == "LineString")
| (geom_types == "MultiLineString")
| (geom_types == "LinearRing")
)
point_idx = np.asarray((geom_types == "Point") | (geom_types == "MultiPoint"))
# plot all Polygons and all MultiPolygon components in the same collection
polys = expl_series[poly_idx & np.invert(nan_idx)]
subset = values[poly_idx & np.invert(nan_idx)]
if not polys.empty:
_plot_polygon_collection(
ax, polys, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds
)
# plot all LineStrings and MultiLineString components in same collection
lines = expl_series[line_idx & np.invert(nan_idx)]
subset = values[line_idx & np.invert(nan_idx)]
if not lines.empty:
_plot_linestring_collection(
ax, lines, subset, vmin=mn, vmax=mx, cmap=cmap, **style_kwds
)
# plot all Points in the same collection
points = expl_series[point_idx & np.invert(nan_idx)]
subset = values[point_idx & np.invert(nan_idx)]
if not points.empty:
if isinstance(markersize, np.ndarray):
markersize = np.take(markersize, multiindex, axis=0)
markersize = markersize[point_idx & np.invert(nan_idx)]
_plot_point_collection(
ax,
points,
subset,
vmin=mn,
vmax=mx,
markersize=markersize,
cmap=cmap,
**style_kwds,
)
if missing_kwds is not None and not expl_series[nan_idx].empty:
if color:
if "color" not in missing_kwds:
missing_kwds["color"] = color
merged_kwds = style_kwds.copy()
merged_kwds.update(missing_kwds)
plot_series(expl_series[nan_idx], ax=ax, **merged_kwds)
if legend and not color:
if legend_kwds is None:
legend_kwds = {}
if "fmt" in legend_kwds:
legend_kwds.pop("fmt")
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
norm = style_kwds.get("norm", None)
if not norm:
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(
Line2D(
[0],
[0],
linestyle="none",
marker="o",
alpha=style_kwds.get("alpha", 1),
markersize=10,
markerfacecolor=n_cmap.to_rgba(value),
markeredgewidth=0,
)
)
if missing_kwds is not None:
if "color" in merged_kwds:
merged_kwds["facecolor"] = merged_kwds["color"]
patches.append(
Line2D(
[0],
[0],
linestyle="none",
marker="o",
alpha=merged_kwds.get("alpha", 1),
markersize=10,
markerfacecolor=merged_kwds.get("facecolor", None),
markeredgecolor=merged_kwds.get("edgecolor", None),
markeredgewidth=merged_kwds.get(
"linewidth", 1 if merged_kwds.get("edgecolor", False) else 0
),
)
)
categories.append(merged_kwds.get("label", "NaN"))
legend_kwds.setdefault("numpoints", 1)
legend_kwds.setdefault("loc", "best")
ax.legend(patches, categories, **legend_kwds)
else:
if cax is not None:
legend_kwds.setdefault("cax", cax)
else:
legend_kwds.setdefault("ax", ax)
n_cmap.set_array([])
ax.get_figure().colorbar(n_cmap, **legend_kwds)
plt.draw()
return ax
|
28,574 | def plot_elpd(
compare_dict,
color="C0",
xlabels=False,
figsize=None,
textsize=None,
coords=None,
legend=False,
threshold=None,
ax=None,
ic=None,
scale=None,
plot_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot pointwise elpd differences between two or more models.
Parameters
----------
compare_dict : mapping, str -> ELPDData or InferenceData
A dictionary mapping the model name to the object containing inference data or the result
of :func:`arviz.loo` or :func:`arviz.waic` functions.
Refer to :func:`arviz.convert_to_inference_data` for details on possible dict items.
color : str or array_like, optional
Colors of the scatter plot. If color is a str all dots will have the same color.
If it is the size of the observations, each dot will have the specified color.
Otherwise, it will be interpreted as a list of the dims to be used for the color code.
xlabels : bool, optional
Use coords as xticklabels. Defaults to False.
figsize : figure size tuple, optional
If None, size is (8 + numvars, 8 + numvars).
textsize: int, optional
Text size for labels. If None it will be autoscaled based on figsize.
coords : mapping, optional
Coordinates of points to plot. **All** values are used for computation, but only a
a subset can be plotted for convenience.
legend : bool, optional
Include a legend to the plot. Only taken into account when color argument is a dim name.
threshold : float
If some elpd difference is larger than ``threshold * elpd.std()``, show its label. If
`None`, no observations will be highlighted.
ic : str, optional
Information Criterion ("loo" for PSIS-LOO, "waic" for WAIC) used to compare models.
Defaults to ``rcParams["stats.information_criterion"]``.
Only taken into account when input is :class:`arviz.InferenceData`.
scale : str, optional
Scale argument passed to :func:`arviz.loo` or :func:`arviz.waic`, see their docs for
details. Only taken into account when input is :class:`arviz.InferenceData`.
plot_kwargs : dicts, optional
Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter`.
ax: axes, optional
:class:`matplotlib.axes.Axes` or :class:`bokeh.plotting.Figure`.
backend: str, optional
Select plotting backend {"matplotlib", "bokeh"}. Defaults to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
See Also
--------
plot_compare : Summary plot for model comparison.
Examples
--------
Compare pointwise PSIS-LOO for centered and non centered models of the 8-schools problem
using matplotlib.
.. plot::
:context: close-figs
>>> import arviz as az
>>> idata1 = az.load_arviz_data("centered_eight")
>>> idata2 = az.load_arviz_data("non_centered_eight")
>>> az.plot_elpd(
>>> {"centered model": idata1, "non centered model": idata2},
>>> xlabels=True
>>> )
.. bokeh-plot::
:source-position: above
import arviz as az
idata1 = az.load_arviz_data("centered_eight")
idata2 = az.load_arviz_data("non_centered_eight")
az.plot_elpd(
{"centered model": idata1, "non centered model": idata2},
backend="bokeh"
)
"""
valid_ics = ["loo", "waic"]
ic = rcParams["stats.information_criterion"] if ic is None else ic.lower()
scale = rcParams["stats.ic_scale"] if scale is None else scale.lower()
if ic not in valid_ics:
raise ValueError(
("Information Criteria type {} not recognized." "IC must be in {}").format(
ic, valid_ics
)
)
ic_fun = loo if ic == "loo" else waic
# Make sure all object are ELPDData
compare_dict = deepcopy(compare_dict)
for k, item in compare_dict.items():
if not isinstance(item, ELPDData):
compare_dict[k] = ic_fun(convert_to_inference_data(item), pointwise=True, scale=scale)
ics = [elpd_data.index[0] for elpd_data in compare_dict.values()]
if not all(x == ics[0] for x in ics):
raise SyntaxError(
"All Information Criteria must be of the same kind, but both loo and waic data present"
)
ic = ics[0]
scales = [elpd_data[f"{ic}_scale"] for elpd_data in compare_dict.values()]
if not all(x == scales[0] for x in scales):
raise SyntaxError(
"All Information Criteria must be on the same scale, but {} are present".format(
set(scales)
)
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
numvars = len(compare_dict)
models = list(compare_dict.keys())
if coords is None:
coords = {}
pointwise_data = [get_coords(compare_dict[model][f"{ic}_i"], coords) for model in models]
xdata = np.arange(pointwise_data[0].size)
coord_labels = format_coords_as_labels(pointwise_data[0]) if xlabels else None
if numvars < 2:
raise Exception("Number of models to compare must be 2 or greater.")
elpd_plot_kwargs = dict(
ax=ax,
models=models,
pointwise_data=pointwise_data,
numvars=numvars,
figsize=figsize,
textsize=textsize,
plot_kwargs=plot_kwargs,
xlabels=xlabels,
coord_labels=coord_labels,
xdata=xdata,
threshold=threshold,
legend=legend,
color=color,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_elpd", "elpdplot", backend)
ax = plot(**elpd_plot_kwargs)
return ax
| def plot_elpd(
compare_dict,
color="C0",
xlabels=False,
figsize=None,
textsize=None,
coords=None,
legend=False,
threshold=None,
ax=None,
ic=None,
scale=None,
plot_kwargs=None,
backend=None,
backend_kwargs=None,
show=None,
):
"""
Plot pointwise elpd differences between two or more models.
Parameters
----------
compare_dict : mapping, str -> ELPDData or InferenceData
A dictionary mapping the model name to the object containing inference data or the result
of :func:`arviz.loo` or :func:`arviz.waic` functions.
Refer to :func:`arviz.convert_to_inference_data` for details on possible dict items.
color : str or array_like, optional
Colors of the scatter plot. If color is a str all dots will have the same color.
If it is the size of the observations, each dot will have the specified color.
Otherwise, it will be interpreted as a list of the dims to be used for the color code.
xlabels : bool, optional
Use coords as xticklabels. Defaults to False.
figsize : figure size tuple, optional
If None, size is (8 + numvars, 8 + numvars).
textsize: int, optional
Text size for labels. If None it will be autoscaled based on ``figsize``.
coords : mapping, optional
Coordinates of points to plot. **All** values are used for computation, but only a
a subset can be plotted for convenience.
legend : bool, optional
Include a legend to the plot. Only taken into account when color argument is a dim name.
threshold : float
If some elpd difference is larger than ``threshold * elpd.std()``, show its label. If
`None`, no observations will be highlighted.
ic : str, optional
Information Criterion ("loo" for PSIS-LOO, "waic" for WAIC) used to compare models.
Defaults to ``rcParams["stats.information_criterion"]``.
Only taken into account when input is :class:`arviz.InferenceData`.
scale : str, optional
Scale argument passed to :func:`arviz.loo` or :func:`arviz.waic`, see their docs for
details. Only taken into account when input is :class:`arviz.InferenceData`.
plot_kwargs : dicts, optional
Additional keywords passed to :meth:`matplotlib.axes.Axes.scatter`.
ax: axes, optional
:class:`matplotlib.axes.Axes` or :class:`bokeh.plotting.Figure`.
backend: str, optional
Select plotting backend {"matplotlib", "bokeh"}. Defaults to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
See Also
--------
plot_compare : Summary plot for model comparison.
Examples
--------
Compare pointwise PSIS-LOO for centered and non centered models of the 8-schools problem
using matplotlib.
.. plot::
:context: close-figs
>>> import arviz as az
>>> idata1 = az.load_arviz_data("centered_eight")
>>> idata2 = az.load_arviz_data("non_centered_eight")
>>> az.plot_elpd(
>>> {"centered model": idata1, "non centered model": idata2},
>>> xlabels=True
>>> )
.. bokeh-plot::
:source-position: above
import arviz as az
idata1 = az.load_arviz_data("centered_eight")
idata2 = az.load_arviz_data("non_centered_eight")
az.plot_elpd(
{"centered model": idata1, "non centered model": idata2},
backend="bokeh"
)
"""
valid_ics = ["loo", "waic"]
ic = rcParams["stats.information_criterion"] if ic is None else ic.lower()
scale = rcParams["stats.ic_scale"] if scale is None else scale.lower()
if ic not in valid_ics:
raise ValueError(
("Information Criteria type {} not recognized." "IC must be in {}").format(
ic, valid_ics
)
)
ic_fun = loo if ic == "loo" else waic
# Make sure all object are ELPDData
compare_dict = deepcopy(compare_dict)
for k, item in compare_dict.items():
if not isinstance(item, ELPDData):
compare_dict[k] = ic_fun(convert_to_inference_data(item), pointwise=True, scale=scale)
ics = [elpd_data.index[0] for elpd_data in compare_dict.values()]
if not all(x == ics[0] for x in ics):
raise SyntaxError(
"All Information Criteria must be of the same kind, but both loo and waic data present"
)
ic = ics[0]
scales = [elpd_data[f"{ic}_scale"] for elpd_data in compare_dict.values()]
if not all(x == scales[0] for x in scales):
raise SyntaxError(
"All Information Criteria must be on the same scale, but {} are present".format(
set(scales)
)
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
numvars = len(compare_dict)
models = list(compare_dict.keys())
if coords is None:
coords = {}
pointwise_data = [get_coords(compare_dict[model][f"{ic}_i"], coords) for model in models]
xdata = np.arange(pointwise_data[0].size)
coord_labels = format_coords_as_labels(pointwise_data[0]) if xlabels else None
if numvars < 2:
raise Exception("Number of models to compare must be 2 or greater.")
elpd_plot_kwargs = dict(
ax=ax,
models=models,
pointwise_data=pointwise_data,
numvars=numvars,
figsize=figsize,
textsize=textsize,
plot_kwargs=plot_kwargs,
xlabels=xlabels,
coord_labels=coord_labels,
xdata=xdata,
threshold=threshold,
legend=legend,
color=color,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_elpd", "elpdplot", backend)
ax = plot(**elpd_plot_kwargs)
return ax
|
53,857 | def transform_share_list_handle(result):
new_result = {}
if len(result) > 1 and isinstance(result[-1], dict):
new_result["items"] = result[:-1]
new_result["nextMarker"] = result[-1]["nextMarker"]
else:
new_result["items"] = result
new_result["nextMarker"] = None
for item in new_result["items"]:
item["handleId"] = item.id
delattr(item, "id")
return new_result
| def transform_share_list_handle(result):
for item in result["items"]:
item["handleId"] = item.id
delattr(item, "id")
return result
|
45,726 | def c_m(df):
"""This function calculates the center of total (precipitation) mass in one time step.
All nan values are replaced with 0 to calculate centroid.
Parameters
----------
ds: 2-d xarray for a time step containing input precipitation data.
Returns
-------
cent:
The coordinates of center of mass.
"""
from scipy import ndimage
cent = ndimage.measurements.center_of_mass(np.nan_to_num(df))
return cent
| def c_m(df):
"""Calculate the center of total (precipitation) mass in one timestep.
All nan values are replaced with 0 to calculate centroid.
Parameters
----------
ds: 2-d xarray for a time step containing input precipitation data.
Returns
-------
cent:
The coordinates of center of mass.
"""
from scipy import ndimage
cent = ndimage.measurements.center_of_mass(np.nan_to_num(df))
return cent
|
44,000 | def wires_to_edges(graph) -> Dict[int, Tuple]:
r"""Maps the wires of a register of qubits to corresponding edges.
**Example**
>>> g = nx.complete_graph(4).to_directed()
>>> wires_to_edges(g)
{0: (0, 1),
1: (0, 2),
2: (0, 3),
3: (1, 0),
4: (1, 2),
5: (1, 3),
6: (2, 0),
7: (2, 1),
8: (2, 3),
9: (3, 0),
10: (3, 1),
11: (3, 2)}
>>> g = rx.generators.directed_mesh_graph(4, [0,1,2,3])
>>> wires_to_edges(g)
{0: (0, 1),
1: (0, 2),
2: (0, 3),
3: (1, 0),
4: (1, 2),
5: (1, 3),
6: (2, 0),
7: (2, 1),
8: (2, 3),
9: (3, 0),
10: (3, 1),
11: (3, 2)}
Args:
graph (nx.Graph or rx.Py(Di)Graph): the graph specifying possible edges
Returns:
Dict[Tuple, int]: a mapping from wires to graph edges
"""
if isinstance(graph, nx.Graph):
return {i: edge for i, edge in enumerate(graph.edges)}
elif isinstance(graph, (rx.PyGraph, rx.PyDiGraph)):
gnodes = graph.nodes()
return {
i: (gnodes.index(e[0]), gnodes.index(e[1]))
for i, e in enumerate(sorted(graph.edge_list()))
}
raise ValueError(
f"Input graph must be a nx.Graph or rx.Py(Di)Graph, got {type(graph).__name__}"
)
| def wires_to_edges(graph) -> Dict[int, Tuple]:
r"""Maps the wires of a register of qubits to corresponding edges.
**Example**
>>> g = nx.complete_graph(4).to_directed()
>>> wires_to_edges(g)
{0: (0, 1),
1: (0, 2),
2: (0, 3),
3: (1, 0),
4: (1, 2),
5: (1, 3),
6: (2, 0),
7: (2, 1),
8: (2, 3),
9: (3, 0),
10: (3, 1),
11: (3, 2)}
>>> g = rx.generators.directed_mesh_graph(4, [0,1,2,3])
>>> wires_to_edges(g)
{0: (0, 1),
1: (0, 2),
2: (0, 3),
3: (1, 0),
4: (1, 2),
5: (1, 3),
6: (2, 0),
7: (2, 1),
8: (2, 3),
9: (3, 0),
10: (3, 1),
11: (3, 2)}
Args:
graph (nx.Graph or rx.PyGraph or rx.PyDiGraph): the graph specifying possible edges
Returns:
Dict[Tuple, int]: a mapping from wires to graph edges
"""
if isinstance(graph, nx.Graph):
return {i: edge for i, edge in enumerate(graph.edges)}
elif isinstance(graph, (rx.PyGraph, rx.PyDiGraph)):
gnodes = graph.nodes()
return {
i: (gnodes.index(e[0]), gnodes.index(e[1]))
for i, e in enumerate(sorted(graph.edge_list()))
}
raise ValueError(
f"Input graph must be a nx.Graph or rx.Py(Di)Graph, got {type(graph).__name__}"
)
|
48,608 | def df_getitem_tuple_at_codegen(self, row, col):
"""
Example of generated implementation:
def _df_getitem_tuple_at_impl(self, idx):
row, _ = idx
check_row = False
for i in prange(len(self._dataframe.index)):
if self._dataframe.index[i] == row:
check_row = True
if check_row:
data = self._dataframe._data[2]
res_data = pandas.Series(data, index=self._dataframe.index)
return res_data.at[row]
raise IndexingError('Index is out of bounds for axis')
"""
func_lines = ['def _df_getitem_tuple_at_impl(self, idx):']
check_col = False
for i in range(len(self.columns)):
if self.columns[i] == col:
check_col = True
col_idx = i
if check_col == True: # noqa
func_lines += [' row, _ = idx',
' check_row = False',
' for i in prange(len(self._dataframe.index)):',
' if self._dataframe.index[i] == row:',
' check_row = True',
' if check_row:',
f' data = self._dataframe._data[{col_idx}]',
' res_data = pandas.Series(data, index=self._dataframe.index)',
' return res_data.at[row]',
" raise IndexingError('Index is out of bounds for axis')"]
else:
raise IndexingError('Index is out of bounds for axis')
func_text = '\n'.join(func_lines)
global_vars = {'pandas': pandas,
'get_dataframe_data': get_dataframe_data,
'prange': prange,
'IndexingError': IndexingError}
return func_text, global_vars
| def df_getitem_tuple_at_codegen(self, row, col):
"""
Example of generated implementation:
def _df_getitem_tuple_at_impl(self, idx):
row, _ = idx
check_row = False
for i in prange(len(self._dataframe.index)):
if self._dataframe.index[i] == row:
check_row = True
if check_row:
data = self._dataframe._data[2]
res_data = pandas.Series(data, index=self._dataframe.index)
return res_data.at[row]
raise IndexingError('Index is out of bounds for axis')
"""
func_lines = ['def _df_getitem_tuple_at_impl(self, idx):']
for i in range(len(self.columns)):
if self.columns[i] == col:
func_lines += [
' row, _ = idx',
' check_row = False',
' for i in prange(len(self._dataframe.index)):',
' if self._dataframe.index[i] == row:',
' check_row = True',
' if check_row:',
f' data = self._dataframe._data[{i}]',
' res_data = pandas.Series(data, index=self._dataframe.index)',
' return res_data.at[row]',
' raise IndexingError("Index is out of bounds for axis")'
]
break
else:
raise IndexingError('Index is out of bounds for axis')
func_text = '\n'.join(func_lines)
global_vars = {'pandas': pandas,
'get_dataframe_data': get_dataframe_data,
'prange': prange,
'IndexingError': IndexingError}
return func_text, global_vars
|
48,606 | def _dataframe_reduce_columns_codegen_isna(func_name, func_params, columns, df):
results = []
joined = ', '.join(func_params)
func_lines = [f'def _df_{func_name}_impl({joined}):']
ind = df_index_codegen_all(df)
for i, c in enumerate(columns):
result_c = f'result_{c}'
func_lines += [f' series_{c} = pandas.Series(get_dataframe_data({func_params[0]}, {i}))',
f' {result_c} = series_{c}.{func_name}()']
results.append((columns[i], result_c))
data = ', '.join(f'"{col}": {data}' for col, data in results)
func_lines += [f' return pandas.DataFrame({{{data}}}{ind})']
func_text = '\n'.join(func_lines)
global_vars = {'pandas': pandas,
'get_dataframe_data': get_dataframe_data}
return func_text, global_vars
| def _dataframe_reduce_columns_codegen_isna(func_name, func_params, columns, df):
results = []
joined = ', '.join(func_params)
func_lines = [f'def _df_{func_name}_impl({joined}):']
ind = df_index_codegen_all(df)
for i, c in enumerate(columns):
result_c = f'result_{c}'
func_lines += [f' series_{c} = pandas.Series(get_dataframe_data({func_params[0]}, {i}))',
f' {result_c} = series_{c}.{func_name}()']
results.append((columns[i], result_c))
data = ', '.join(f'"{col}": {data}' for col, data in results)
index = 'df.index'
func_lines.append(f" return pandas.DataFrame({{{data}}}, index={index})")
func_text = '\n'.join(func_lines)
global_vars = {'pandas': pandas,
'get_dataframe_data': get_dataframe_data}
return func_text, global_vars
|
14,880 | def validate(integrations: Dict[str, Integration], config: Config):
"""Validate coverage."""
codeowners_path = config.root / ".coveragerc"
referenced = set()
with codeowners_path.open("rt") as fp:
for line in fp:
line = line.strip()
if not line.startswith("homeassistant/components/"):
continue
referenced.add(line.split("/")[2])
for domain in integrations:
referenced.discard(domain)
if referenced:
raise RuntimeError(
f".coveragerc references invalid integrations: {', '.join(referenced)}"
)
| def validate(integrations: Dict[str, Integration], config: Config):
"""Validate coverage."""
coverage_path = config.root / ".coveragerc"
referenced = set()
with codeowners_path.open("rt") as fp:
for line in fp:
line = line.strip()
if not line.startswith("homeassistant/components/"):
continue
referenced.add(line.split("/")[2])
for domain in integrations:
referenced.discard(domain)
if referenced:
raise RuntimeError(
f".coveragerc references invalid integrations: {', '.join(referenced)}"
)
|
39,196 | def compute_power_spectral_density_matrix(
specgram: Tensor,
mask: Optional[Tensor] = None,
normalize: bool = True,
eps: float = 1e-10,
) -> Tensor:
"""Compute cross-channel power spectral density (PSD) matrix.
Args:
specgram (Tensor): Multi-channel complex-valued spectrum.
Tensor of dimension `(..., channel, freq, time)`
mask (Tensor or None, optional): Real-valued Time-Frequency mask
for normalization. Tensor of dimension `(..., freq, time)`
(Default: ``None``)
normalize (bool, optional): whether normalize the mask along the time dimension.
eps (float, optional): a value added to the denominator in mask normalization. (Default: ``1e-10``)
Returns:
Tensor: The complex-valued PSD matrix of the input spectrum.
Tensor of dimension `(..., freq, channel, channel)`
"""
specgram = specgram.transpose(-3, -2) # shape (freq, channel, time)
# outer product:
# (..., ch_1, time) x (..., ch_2, time) -> (..., time, ch_1, ch_2)
psd = torch.einsum("...ct,...et->...tce", [specgram, specgram.conj()])
if mask is not None:
# Normalized mask along time dimension:
if normalize:
mask = mask / (mask.sum(dim=-1, keepdim=True) + eps)
psd = psd * mask[..., None, None]
psd = psd.sum(dim=-3)
return psd
| def compute_power_spectral_density_matrix(
specgram: Tensor,
mask: Optional[Tensor] = None,
normalize: bool = True,
eps: float = 1e-10,
) -> Tensor:
"""Compute cross-channel power spectral density (PSD) matrix.
Args:
specgram (Tensor): Multi-channel complex-valued spectrum.
Tensor of dimension `(..., channel, freq, time)`
mask (Tensor or None, optional): Real-valued Time-Frequency mask
mask (Tensor or None, optional): Real-valued time-frequency mask
(Default: ``None``)
normalize (bool, optional): whether normalize the mask along the time dimension.
eps (float, optional): a value added to the denominator in mask normalization. (Default: ``1e-10``)
Returns:
Tensor: The complex-valued PSD matrix of the input spectrum.
Tensor of dimension `(..., freq, channel, channel)`
"""
specgram = specgram.transpose(-3, -2) # shape (freq, channel, time)
# outer product:
# (..., ch_1, time) x (..., ch_2, time) -> (..., time, ch_1, ch_2)
psd = torch.einsum("...ct,...et->...tce", [specgram, specgram.conj()])
if mask is not None:
# Normalized mask along time dimension:
if normalize:
mask = mask / (mask.sum(dim=-1, keepdim=True) + eps)
psd = psd * mask[..., None, None]
psd = psd.sum(dim=-3)
return psd
|
31,955 | def fetch_incidents_command(client):
last_run = demisto.getLastRun()
start_time: Any
# pageToken fetched from demisto lastRun
pageToken = int()
response = {}
incidents = []
if 'start_time' not in last_run.keys():
pageToken = -1
response = client.get_incidents(pageSize=1, pageToken=pageToken, first_fetch=FIRST_FETCH)
if 'incidents' in response.keys():
start_time = response['incidents'][0]['date']
start_time = dateparser.parse(start_time)
message_ids = get_incident_message_ids(client, response['incidents'][0]['id'])
response['incidents'][0]['message_ids'] = message_ids
curr_incident = {'rawJSON': json.dumps(response['incidents'][0]), 'details': json.dumps(response['incidents'][0])}
incidents.append(curr_incident)
if last_run and 'pageToken' in last_run.keys():
pageToken = last_run.get('pageToken')
if last_run and 'start_time' in last_run.keys():
start_time = dateparser.parse(last_run.get('start_time'))
start_time = start_time.timestamp()
incidents_data = get_incidents_list(client, pageToken=pageToken, first_fetch=FIRST_FETCH)
pageToken = get_page_token(client, pageToken=pageToken)
last_time = start_time
for incident in incidents_data:
dt = incident['date']
dt = dateparser.parse(dt).timestamp()
# Update last run and add incident if the incident is newer than last fetch
if dt > start_time:
curr_incident = {'rawJSON': json.dumps(incident), 'details': json.dumps(incident)}
last_time = dt
incidents.append(curr_incident)
demisto.debug(str(len(incidents)))
# Save the next_run as a dict with the start_time key to be stored
demisto.setLastRun({'start_time': str(last_time), 'pageToken': pageToken})
return incidents
| def fetch_incidents_command(client):
last_run = demisto.getLastRun()
start_time: Any
# pageToken fetched from demisto lastRun
pageToken = int()
response = {}
incidents = []
if 'start_time' not in last_run.keys():
pageToken = -1
response = client.get_incidents(pageSize=1, pageToken=pageToken, first_fetch=FIRST_FETCH)
if 'incidents' in response.keys():
start_time = response['incidents'][0]['date']
start_time = dateparser.parse(start_time)
message_ids = get_incident_message_ids(client, response['incidents'][0]['id'])
response['incidents'][0]['message_ids'] = message_ids
curr_incident = {'rawJSON': json.dumps(response['incidents'][0]), 'details': json.dumps(response['incidents'][0])}
incidents.append(curr_incident)
if last_run and 'pageToken' in last_run.keys():
pageToken = last_run.get('pageToken')
if last_run and 'start_time' in last_run.keys():
start_time = dateparser.parse(last_run.get('start_time'))
start_time = start_time.timestamp()
incidents_data = get_incidents_list(client, pageToken=pageToken, first_fetch=FIRST_FETCH)
pageToken = get_page_token(client, pageToken=pageToken)
last_time = start_time
for incident in incidents_data:
dt = incident['date']
dt = dateparser.parse(dt).timestamp()
# Update last run and add incident if the incident is newer than last fetch
if dt > start_time:
curr_incident = {'rawJSON': json.dumps(incident), 'details': json.dumps(incident)}
last_time = dt
incidents.append(curr_incident)
demisto.debug(f'fetched {len(incidents)} incidents')
# Save the next_run as a dict with the start_time key to be stored
demisto.setLastRun({'start_time': str(last_time), 'pageToken': pageToken})
return incidents
|
41,365 | def test_concat_non_standard_index():
# Test that merging two IamDataFrames with identical, non-standard index dimensions
# preserves the index.
df1 = IamDataFrame(
pd.DataFrame(
[["model_a", "scenario_a", "region_a", "variable_a", "unit", 1, 1]],
columns=IAMC_IDX + ["version", 2005],
),
index=META_IDX + ["version"],
)
df2 = IamDataFrame(
pd.DataFrame(
[["model_a", "scenario_a", "region_a", "variable_a", "unit", 2, 2]],
columns=IAMC_IDX + ["version", 2005],
),
index=META_IDX + ["version"],
)
exp = IamDataFrame(
pd.DataFrame(
[
["model_a", "scenario_a", "region_a", "variable_a", "unit", 1, 1],
["model_a", "scenario_a", "region_a", "variable_a", "unit", 2, 2],
],
columns=IAMC_IDX + ["version", 2005],
),
index=META_IDX + ["version"],
)
assert_iamframe_equal(exp, concat([df1, df2]))
| def test_concat_non_default_index():
# Test that merging two IamDataFrames with identical, non-standard index dimensions
# preserves the index.
df1 = IamDataFrame(
pd.DataFrame(
[["model_a", "scenario_a", "region_a", "variable_a", "unit", 1, 1]],
columns=IAMC_IDX + ["version", 2005],
),
index=META_IDX + ["version"],
)
df2 = IamDataFrame(
pd.DataFrame(
[["model_a", "scenario_a", "region_a", "variable_a", "unit", 2, 2]],
columns=IAMC_IDX + ["version", 2005],
),
index=META_IDX + ["version"],
)
exp = IamDataFrame(
pd.DataFrame(
[
["model_a", "scenario_a", "region_a", "variable_a", "unit", 1, 1],
["model_a", "scenario_a", "region_a", "variable_a", "unit", 2, 2],
],
columns=IAMC_IDX + ["version", 2005],
),
index=META_IDX + ["version"],
)
assert_iamframe_equal(exp, concat([df1, df2]))
|
38,241 | def _create_parlai_format(dpath: str):
datatypes = ['train', 'valid', 'test']
languages = ['En_', 'Zh_', 'Fr_', 'Ko_', 'Id_', 'Jp_', 'It_']
for language in languages:
for datatype in datatypes:
datatype_full = language + datatype + '_tmp'
datatype_rename = language + datatype
load_path = os.path.join(dpath, f'{datatype_full}.json')
save_path = os.path.join(dpath, f'{datatype_rename}.txt')
with PathManager.open(load_path, 'r', encoding='utf8') as f_read:
data = json.load(f_read)
with PathManager.open(save_path, 'w', encoding='utf8') as f_write:
for content in data:
line_num = 0
personas = content['persona']
dialogs = content['dialogue']
for persona in personas:
line_num += 1
f_write.write(str(line_num) + ' your persona:' + persona + '\n')
for utterance_A, utterance_B in dialogs:
line_num += 1
f_write.write(
str(line_num)
+ ' '
+ utterance_A
+ '\t'
+ utterance_B
+ '\n'
)
os.remove(load_path)
| def _create_parlai_format(dpath: str):
datatypes = ['train', 'valid', 'test']
languages = ['En_', 'Zh_', 'Fr_', 'Ko_', 'Id_', 'Jp_', 'It_']
for language in languages:
for datatype in datatypes:
datatype_full = language + datatype + '_tmp'
datatype_rename = language + datatype
load_path = os.path.join(dpath, f'{datatype_full}.json')
save_path = os.path.join(dpath, f'{datatype_rename}.txt')
with PathManager.open(load_path, 'r', encoding='utf8') as f_read:
data = json.load(f_read)
with PathManager.open(save_path, 'w', encoding='utf8') as f_write:
for content in data:
line_num = 0
personas = content['persona']
dialogs = content['dialogue']
for persona in personas:
line_num += 1
f_write.write(str(line_num) + ' your persona:' + persona + '\n')
for utterance_A, utterance_B in dialogs:
line_num += 1
f_write.write(f"{line_num} {utterance_A}\t{utterance_B}\n")
os.remove(load_path)
|
30,462 | def main():
# get incident fields
res = demisto.executeCommand('demisto-api-get', {'uri': '/incidentfields'})
if is_error(res):
return_error(res[0]['Contents'])
fields = res[0]['Contents']['response']
# 'fields' contains non-incident fields, as well, so let's make a version containing only incident fields
incident_fields = [field for field in fields if field['id'].startswith('incident_')]
# get arguments
args = demisto.args()
incident_type = args['incident_type']
exclude_system = False
if 'custom' in args and argToBoolean(args['custom']) is True:
exclude_system = True
name_key = 'name'
if 'short_names' in args and argToBoolean(args['short_names']) is True:
name_key = 'cliName'
explicit_only = False
if 'explicit_only' in args and argToBoolean(args['explicit_only']) is True:
explicit_only = True
# generate results
types = []
if exclude_system is True:
# only return non-system fields
for field in incident_fields: # using multiple if statements for readability
if field['system'] is False: # exclude system fields
if field['associatedToAll'] is True and explicit_only is False:
# if explicit_only is false, include fields associated to all incident types
types.append(field[name_key])
elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']:
# include fields where incident type is in associatedTypes
types.append(field[name_key])
else:
# return all fields
for field in incident_fields: # using multiple if statements for readability
if field['associatedToAll'] is True and explicit_only is False:
# if explicit_only is false, include fields associated to all incident types
types.append(field[name_key])
elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']:
# include fields where incident type is in associatedTypes
types.append(field[name_key])
# output results
if 'pprint' in args and argToBoolean(args['pprint']) is True:
demisto.results(pformat(types))
else:
demisto.results(types)
| def main():
# get incident fields
res = demisto.executeCommand('demisto-api-get', {'uri': '/incidentfields'})
if is_error(res):
return_error(res[0]['Contents'])
fields = res[0]['Contents']['response']
# 'fields' contains non-incident fields, as well, so let's make a version containing only incident fields
incident_fields = [field for field in fields if field['id'].startswith('incident_')]
# get arguments
args = demisto.args()
incident_type = args['incident_type']
exclude_system = False
if 'custom' in args and argToBoolean(args['custom']):
exclude_system = True
name_key = 'name'
if 'short_names' in args and argToBoolean(args['short_names']) is True:
name_key = 'cliName'
explicit_only = False
if 'explicit_only' in args and argToBoolean(args['explicit_only']) is True:
explicit_only = True
# generate results
types = []
if exclude_system is True:
# only return non-system fields
for field in incident_fields: # using multiple if statements for readability
if field['system'] is False: # exclude system fields
if field['associatedToAll'] is True and explicit_only is False:
# if explicit_only is false, include fields associated to all incident types
types.append(field[name_key])
elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']:
# include fields where incident type is in associatedTypes
types.append(field[name_key])
else:
# return all fields
for field in incident_fields: # using multiple if statements for readability
if field['associatedToAll'] is True and explicit_only is False:
# if explicit_only is false, include fields associated to all incident types
types.append(field[name_key])
elif field['associatedTypes'] is not None and incident_type in field['associatedTypes']:
# include fields where incident type is in associatedTypes
types.append(field[name_key])
# output results
if 'pprint' in args and argToBoolean(args['pprint']) is True:
demisto.results(pformat(types))
else:
demisto.results(types)
|
41,729 | def test_study_optimize_with_multiple_search_spaces():
# type: () -> None
def objective(trial):
# type: (Trial) -> float
a = trial.suggest_int('a', 0, 100)
b = trial.suggest_uniform('b', -100, 100)
return a * b
# Run 3 trials with a search space.
search_space_0 = {
'a': [0, 50],
'b': [-50, 0, 50]
} # type: Dict[str, List[GridValueType]]
sampler_0 = samplers.GridSampler(search_space_0)
study = optuna.create_study(sampler=sampler_0)
study.optimize(objective, n_trials=3)
assert len(study.trials) == 3
for t in study.trials:
sampler_0._same_search_space(t.system_attrs['search_space'])
# Run 2 trials with another space.
search_space_1 = {'a': [0, 25], 'b': [-50]} # type: Dict[str, List[GridValueType]]
sampler_1 = samplers.GridSampler(search_space_1)
study.sampler = sampler_1
study.optimize(objective, n_trials=2)
assert not sampler_0._same_search_space(sampler_1._search_space)
assert len(study.trials) == 5
for t in study.trials[:3]:
sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3: 5]:
sampler_1._same_search_space(t.system_attrs['search_space'])
# Run 3 trials with the first search space again.
study.sampler = sampler_0
study.optimize(objective, n_trials=3)
assert len(study.trials) == 8
for t in study.trials[:3]:
sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3: 5]:
sampler_1._same_search_space(t.system_attrs['search_space'])
for t in study.trials[5:]:
sampler_0._same_search_space(t.system_attrs['search_space'])
| def test_study_optimize_with_multiple_search_spaces():
# type: () -> None
def objective(trial):
# type: (Trial) -> float
a = trial.suggest_int('a', 0, 100)
b = trial.suggest_uniform('b', -100, 100)
return a * b
# Run 3 trials with a search space.
search_space_0 = {
'a': [0, 50],
'b': [-50, 0, 50]
} # type: Dict[str, List[GridValueType]]
sampler_0 = samplers.GridSampler(search_space_0)
study = optuna.create_study(sampler=sampler_0)
study.optimize(objective, n_trials=3)
assert len(study.trials) == 3
for t in study.trials:
sampler_0._same_search_space(t.system_attrs['search_space'])
# Run 2 trials with another space.
search_space_1 = {'a': [0, 25], 'b': [-50]} # type: Dict[str, List[GridValueType]]
sampler_1 = samplers.GridSampler(search_space_1)
study.sampler = sampler_1
study.optimize(objective, n_trials=2)
assert not sampler_0._same_search_space(sampler_1._search_space)
assert len(study.trials) == 5
for t in study.trials[:3]:
sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3:5]:
sampler_1._same_search_space(t.system_attrs['search_space'])
# Run 3 trials with the first search space again.
study.sampler = sampler_0
study.optimize(objective, n_trials=3)
assert len(study.trials) == 8
for t in study.trials[:3]:
sampler_0._same_search_space(t.system_attrs['search_space'])
for t in study.trials[3: 5]:
sampler_1._same_search_space(t.system_attrs['search_space'])
for t in study.trials[5:]:
sampler_0._same_search_space(t.system_attrs['search_space'])
|
30,123 | def summarize_gather_at(rank, tax_assign, gather_results, *, skip_idents = [],
keep_full_identifiers=False,
keep_identifier_versions=False, best_only=False,
seen_perfect=set(),
estimate_query_ani=False):
"""
Summarize gather results at specified taxonomic rank
"""
# init dictionaries
sum_uniq_weighted = defaultdict(lambda: defaultdict(float))
# store together w/ ^ instead?
sum_uniq_to_query = defaultdict(lambda: defaultdict(float))
sum_uniq_bp = defaultdict(lambda: defaultdict(float))
query_info = {}
ksize,scaled,query_nhashes=None,None,None
for row in gather_results:
# get essential gather info
query_name = row['query_name']
f_unique_to_query = float(row['f_unique_to_query'])
f_uniq_weighted = float(row['f_unique_weighted'])
unique_intersect_bp = int(row['unique_intersect_bp'])
query_md5 = row['query_md5']
query_filename = row['query_filename']
# get query_bp
if query_name not in query_info.keys(): #REMOVING THIS AFFECTS GATHER RESULTS!!! BUT query bp should always be same for same query? bug?
if "query_nhashes" in row.keys():
query_nhashes = int(row["query_nhashes"])
if "query_bp" in row.keys():
query_bp = int(row["query_bp"])
else:
query_bp = unique_intersect_bp + int(row['remaining_bp'])
# store query info
query_info[query_name] = QueryInfo(query_md5=query_md5, query_filename=query_filename, query_bp=query_bp, query_hashes = query_nhashes)
if estimate_query_ani and (not ksize or not scaled): # just need to set these once. BUT, if we have these, should we check for compatibility when loading the gather file?
if "ksize" in row.keys():
ksize = int(row['ksize'])
scaled = int(row['scaled'])
else:
estimate_query_ani=False
notify("WARNING: Please run gather with sourmash >= 4.4 to estimate query ANI at rank. Continuing without ANI...")
match_ident = row['name']
# 100% match? are we looking at something in the database?
if f_unique_to_query >= 1.0 and query_name not in seen_perfect: # only want to notify once, not for each rank
ident = get_ident(match_ident,
keep_full_identifiers=keep_full_identifiers,
keep_identifier_versions=keep_identifier_versions)
seen_perfect.add(query_name)
notify(f'WARNING: 100% match! Is query "{query_name}" identical to its database match, {ident}?')
# get lineage for match
lineage = find_match_lineage(match_ident, tax_assign,
skip_idents=skip_idents,
keep_full_identifiers=keep_full_identifiers,
keep_identifier_versions=keep_identifier_versions)
# ident was in skip_idents
if not lineage:
continue
# summarize at rank!
lineage = pop_to_rank(lineage, rank)
assert lineage[-1].rank == rank, lineage[-1]
# record info
sum_uniq_to_query[query_name][lineage] += f_unique_to_query
sum_uniq_weighted[query_name][lineage] += f_uniq_weighted
sum_uniq_bp[query_name][lineage] += unique_intersect_bp
# sort and store each as SummarizedGatherResult
sum_uniq_to_query_sorted = []
for query_name, lineage_weights in sum_uniq_to_query.items():
qInfo = query_info[query_name]
sumgather_items = list(lineage_weights.items())
sumgather_items.sort(key = lambda x: -x[1])
query_ani = None
if best_only:
lineage, fraction = sumgather_items[0]
if fraction > 1:
raise ValueError(f"The tax summary of query '{query_name}' is {fraction}, which is > 100% of the query!! This should not be possible. Please check that your input files come directly from a single gather run per query.")
elif fraction == 0:
continue
f_weighted_at_rank = sum_uniq_weighted[query_name][lineage]
bp_intersect_at_rank = sum_uniq_bp[query_name][lineage]
if estimate_query_ani:
query_ani = containment_to_distance(fraction, ksize, scaled,
n_unique_kmers= qInfo.query_hashes, sequence_len_bp= qInfo.query_bp).ani
sres = SummarizedGatherResult(query_name, rank, fraction, lineage, qInfo.query_md5,
qInfo.query_filename, f_weighted_at_rank, bp_intersect_at_rank, query_ani)
sum_uniq_to_query_sorted.append(sres)
else:
total_f_weighted= 0.0
total_f_classified = 0.0
total_bp_classified = 0
for lineage, fraction in sumgather_items:
query_ani=None
if fraction > 1:
raise ValueError(f"The tax summary of query '{query_name}' is {fraction}, which is > 100% of the query!! This should not be possible. Please check that your input files come directly from a single gather run per query.")
elif fraction == 0:
continue
total_f_classified += fraction
f_weighted_at_rank = sum_uniq_weighted[query_name][lineage]
total_f_weighted += f_weighted_at_rank
bp_intersect_at_rank = int(sum_uniq_bp[query_name][lineage])
total_bp_classified += bp_intersect_at_rank
if estimate_query_ani:
query_ani = containment_to_distance(fraction, ksize, scaled,
n_unique_kmers=qInfo.query_hashes, sequence_len_bp=qInfo.query_bp).ani
sres = SummarizedGatherResult(query_name, rank, fraction, lineage, query_md5,
query_filename, f_weighted_at_rank, bp_intersect_at_rank, query_ani)
sum_uniq_to_query_sorted.append(sres)
# record unclassified
lineage = ()
query_ani = None
fraction = 1.0 - total_f_classified
if fraction > 0:
f_weighted_at_rank = 1.0 - total_f_weighted
bp_intersect_at_rank = qInfo.query_bp - total_bp_classified
sres = SummarizedGatherResult(query_name, rank, fraction, lineage, query_md5,
query_filename, f_weighted_at_rank, bp_intersect_at_rank, query_ani)
sum_uniq_to_query_sorted.append(sres)
return sum_uniq_to_query_sorted, seen_perfect, estimate_query_ani
| def summarize_gather_at(rank, tax_assign, gather_results, *, skip_idents = [],
keep_full_identifiers=False,
keep_identifier_versions=False, best_only=False,
seen_perfect=set(),
estimate_query_ani=False):
"""
Summarize gather results at specified taxonomic rank
"""
# init dictionaries
sum_uniq_weighted = defaultdict(lambda: defaultdict(float))
# store together w/ ^ instead?
sum_uniq_to_query = defaultdict(lambda: defaultdict(float))
sum_uniq_bp = defaultdict(lambda: defaultdict(float))
query_info = {}
ksize,scaled,query_nhashes=None,None,None
for row in gather_results:
# get essential gather info
query_name = row['query_name']
f_unique_to_query = float(row['f_unique_to_query'])
f_uniq_weighted = float(row['f_unique_weighted'])
unique_intersect_bp = int(row['unique_intersect_bp'])
query_md5 = row['query_md5']
query_filename = row['query_filename']
# get query_bp
if query_name not in query_info.keys(): #REMOVING THIS AFFECTS GATHER RESULTS!!! BUT query bp should always be same for same query? bug?
if "query_nhashes" in row.keys():
query_nhashes = int(row["query_nhashes"])
if "query_bp" in row.keys():
query_bp = int(row["query_bp"])
else:
query_bp = unique_intersect_bp + int(row['remaining_bp'])
# store query info
query_info[query_name] = QueryInfo(query_md5=query_md5, query_filename=query_filename, query_bp=query_bp, query_hashes = query_nhashes)
if estimate_query_ani and (not ksize or not scaled): # just need to set these once. BUT, if we have these, should we check for compatibility when loading the gather file?
if "ksize" in row.keys():
ksize = int(row['ksize'])
scaled = int(row['scaled'])
else:
estimate_query_ani=False
notify("WARNING: Please run gather with sourmash >= 4.4 to estimate query ANI at rank. Continuing without ANI...")
match_ident = row['name']
# 100% match? are we looking at something in the database?
if f_unique_to_query >= 1.0 and query_name not in seen_perfect: # only want to notify once, not for each rank
ident = get_ident(match_ident,
keep_full_identifiers=keep_full_identifiers,
keep_identifier_versions=keep_identifier_versions)
seen_perfect.add(query_name)
notify(f'WARNING: 100% match! Is query "{query_name}" identical to its database match, {ident}?')
# get lineage for match
lineage = find_match_lineage(match_ident, tax_assign,
skip_idents=skip_idents,
keep_full_identifiers=keep_full_identifiers,
keep_identifier_versions=keep_identifier_versions)
# ident was in skip_idents
if not lineage:
continue
# summarize at rank!
lineage = pop_to_rank(lineage, rank)
assert lineage[-1].rank == rank, lineage[-1]
# record info
sum_uniq_to_query[query_name][lineage] += f_unique_to_query
sum_uniq_weighted[query_name][lineage] += f_uniq_weighted
sum_uniq_bp[query_name][lineage] += unique_intersect_bp
# sort and store each as SummarizedGatherResult
sum_uniq_to_query_sorted = []
for query_name, lineage_weights in sum_uniq_to_query.items():
qInfo = query_info[query_name]
sumgather_items = list(lineage_weights.items())
sumgather_items.sort(key = lambda x: -x[1])
query_ani = None
if best_only:
lineage, fraction = sumgather_items[0]
if fraction > 1:
raise ValueError(f"The tax summary of query '{query_name}' is {fraction}, which is > 100% of the query!! This should not be possible. Please check that your input files come directly from a single gather run per query.")
elif fraction == 0:
continue
f_weighted_at_rank = sum_uniq_weighted[query_name][lineage]
bp_intersect_at_rank = sum_uniq_bp[query_name][lineage]
if estimate_query_ani:
query_ani = containment_to_distance(fraction, ksize, scaled,
n_unique_kmers= qInfo.query_hashes, sequence_len_bp= qInfo.query_bp).ani
sres = SummarizedGatherResult(query_name, rank, fraction, lineage, qInfo.query_md5,
qInfo.query_filename, f_weighted_at_rank, bp_intersect_at_rank, query_ani)
sum_uniq_to_query_sorted.append(sres)
else:
total_f_weighted= 0.0
total_f_classified = 0.0
total_bp_classified = 0
for lineage, fraction in sumgather_items:
query_ani = None
if fraction > 1:
raise ValueError(f"The tax summary of query '{query_name}' is {fraction}, which is > 100% of the query!! This should not be possible. Please check that your input files come directly from a single gather run per query.")
elif fraction == 0:
continue
total_f_classified += fraction
f_weighted_at_rank = sum_uniq_weighted[query_name][lineage]
total_f_weighted += f_weighted_at_rank
bp_intersect_at_rank = int(sum_uniq_bp[query_name][lineage])
total_bp_classified += bp_intersect_at_rank
if estimate_query_ani:
query_ani = containment_to_distance(fraction, ksize, scaled,
n_unique_kmers=qInfo.query_hashes, sequence_len_bp=qInfo.query_bp).ani
sres = SummarizedGatherResult(query_name, rank, fraction, lineage, query_md5,
query_filename, f_weighted_at_rank, bp_intersect_at_rank, query_ani)
sum_uniq_to_query_sorted.append(sres)
# record unclassified
lineage = ()
query_ani = None
fraction = 1.0 - total_f_classified
if fraction > 0:
f_weighted_at_rank = 1.0 - total_f_weighted
bp_intersect_at_rank = qInfo.query_bp - total_bp_classified
sres = SummarizedGatherResult(query_name, rank, fraction, lineage, query_md5,
query_filename, f_weighted_at_rank, bp_intersect_at_rank, query_ani)
sum_uniq_to_query_sorted.append(sres)
return sum_uniq_to_query_sorted, seen_perfect, estimate_query_ani
|
37,108 | def pass_manager_drawer(pass_manager, filename, style=None, raw=False):
"""
Draws the pass manager.
This function needs `pydot <https://github.com/erocarrera/pydot>`, which in turn needs
Graphviz <https://www.graphviz.org/>` to be installed.
Args:
pass_manager (PassManager): the pass manager to be drawn
filename (str): file path to save image to
style (dict or OrderedDict): keys are the pass classes and the values are
the colors to make them. An example can be seen in the DEFAULT_STYLE. An ordered
dict can be used to ensure a priority coloring when pass falls into multiple
categories. Any values not included in the provided dict will be filled in from
the default dict
raw (Bool) : True if you want to save the raw Dot output not an image. The
default is False.
Returns:
PIL.Image or None: an in-memory representation of the pass manager. Or None if
no image was generated or PIL is not installed.
Raises:
ImportError: when nxpd or pydot not installed.
VisualizationError: If raw=True and filename=None.
"""
try:
import subprocess
_PROC = subprocess.Popen(['dot', '-V'], # pylint: disable=invalid-name
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_PROC.communicate()
if _PROC.returncode != 0:
has_graphviz = False
else:
has_graphviz = True
except Exception: # pylint: disable=broad-except
# this is raised when the dot command cannot be found, which means GraphViz
# isn't installed
has_graphviz = False
HAS_GRAPHVIZ = has_graphviz # pylint: disable=invalid-name
try:
import pydot
if not HAS_GRAPHVIZ:
raise ImportError
except ImportError:
raise ImportError("pass_manager_drawer requires pydot and graphviz. "
"Run 'pip install pydot'. "
"Graphviz can be installed using 'brew install graphviz' on Mac"
" or by downloading it from the website.")
passes = pass_manager.passes()
if not style:
style = DEFAULT_STYLE
# create the overall graph
graph = pydot.Dot()
# identifiers for nodes need to be unique, so assign an id
# can't just use python's id in case the exact same pass was
# appended more than once
component_id = 0
prev_node = None
for index, controller_group in enumerate(passes):
# label is the name the flow controller parameter
label = "[%s] %s" % (index, ', '.join(controller_group['flow_controllers']))
# create the subgraph for this controller
subgraph = pydot.Cluster(str(component_id), label=label, fontname='helvetica')
component_id += 1
for pass_ in controller_group['passes']:
# label is the name of the pass
node = pydot.Node(str(component_id),
label=str(type(pass_).__name__),
color=_get_node_color(pass_, style),
shape="rectangle",
fontname='helvetica')
subgraph.add_node(node)
component_id += 1
# the arguments that were provided to the pass when it was created
arg_spec = inspect.getfullargspec(pass_.__init__)
# 0 is the args, 1: to remove the self arg
args = arg_spec[0][1:]
num_optional = len(arg_spec[3]) if arg_spec[3] else 0
# add in the inputs to the pass
for arg_index, arg in enumerate(args):
nd_style = 'solid'
# any optional args are dashed
# the num of optional counts from the end towards the start of the list
if arg_index >= (len(args) - num_optional):
nd_style = 'dashed'
input_node = pydot.Node(component_id, label=arg,
color="black",
shape="ellipse",
fontsize=10,
style=nd_style,
fontname='helvetica')
subgraph.add_node(input_node)
component_id += 1
subgraph.add_edge(pydot.Edge(input_node, node))
# if there is a previous node, add an edge between them
if prev_node:
subgraph.add_edge(pydot.Edge(prev_node, node))
prev_node = node
graph.add_subgraph(subgraph)
if raw:
if filename:
graph.write(filename, format='raw')
return None
else:
raise VisualizationError("if format=raw, then a filename is required.")
if not HAS_PIL and filename:
# linter says this isn't a method - it is
graph.write_png(filename) # pylint: disable=no-member
return None
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, 'pass_manager.png')
# linter says this isn't a method - it is
graph.write_png(tmppath) # pylint: disable=no-member
image = Image.open(tmppath)
image = utils._trim(image)
os.remove(tmppath)
if filename:
image.save(filename, 'PNG')
return image
| def pass_manager_drawer(pass_manager, filename, style=None, raw=False):
"""
Draws the pass manager.
This function needs `pydot <https://github.com/erocarrera/pydot>`, which in turn needs
Graphviz <https://www.graphviz.org/>` to be installed.
Args:
pass_manager (PassManager): the pass manager to be drawn
filename (str): file path to save image to
style (dict or OrderedDict): keys are the pass classes and the values are
the colors to make them. An example can be seen in the DEFAULT_STYLE. An ordered
dict can be used to ensure a priority coloring when pass falls into multiple
categories. Any values not included in the provided dict will be filled in from
the default dict
raw (Bool) : True if you want to save the raw Dot output not an image. The
default is False.
Returns:
PIL.Image or None: an in-memory representation of the pass manager. Or None if
no image was generated or PIL is not installed.
Raises:
ImportError: when nxpd or pydot not installed.
VisualizationError: If raw=True and filename=None.
"""
try:
import subprocess
_PROC = subprocess.Popen(['dot', '-V'], # pylint: disable=invalid-name
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
_PROC.communicate()
if _PROC.returncode != 0:
has_graphviz = False
else:
has_graphviz = True
except Exception: # pylint: disable=broad-except
# this is raised when the dot command cannot be found, which means GraphViz
# isn't installed
has_graphviz = False
HAS_GRAPHVIZ = has_graphviz # pylint: disable=invalid-name
try:
import pydot
if not HAS_GRAPHVIZ:
raise ImportError
except ImportError:
raise ImportError("pass_manager_drawer requires pydot and graphviz. "
"Run 'pip install pydot'. "
"Graphviz can be installed using 'brew install graphviz' on Mac"
" or by downloading it from the website.")
passes = pass_manager.passes()
if not style:
style = DEFAULT_STYLE
# create the overall graph
graph = pydot.Dot()
# identifiers for nodes need to be unique, so assign an id
# can't just use python's id in case the exact same pass was
# appended more than once
component_id = 0
prev_node = None
for index, controller_group in enumerate(passes):
# label is the name of the flow controller parameter
label = "[%s] %s" % (index, ', '.join(controller_group['flow_controllers']))
# create the subgraph for this controller
subgraph = pydot.Cluster(str(component_id), label=label, fontname='helvetica')
component_id += 1
for pass_ in controller_group['passes']:
# label is the name of the pass
node = pydot.Node(str(component_id),
label=str(type(pass_).__name__),
color=_get_node_color(pass_, style),
shape="rectangle",
fontname='helvetica')
subgraph.add_node(node)
component_id += 1
# the arguments that were provided to the pass when it was created
arg_spec = inspect.getfullargspec(pass_.__init__)
# 0 is the args, 1: to remove the self arg
args = arg_spec[0][1:]
num_optional = len(arg_spec[3]) if arg_spec[3] else 0
# add in the inputs to the pass
for arg_index, arg in enumerate(args):
nd_style = 'solid'
# any optional args are dashed
# the num of optional counts from the end towards the start of the list
if arg_index >= (len(args) - num_optional):
nd_style = 'dashed'
input_node = pydot.Node(component_id, label=arg,
color="black",
shape="ellipse",
fontsize=10,
style=nd_style,
fontname='helvetica')
subgraph.add_node(input_node)
component_id += 1
subgraph.add_edge(pydot.Edge(input_node, node))
# if there is a previous node, add an edge between them
if prev_node:
subgraph.add_edge(pydot.Edge(prev_node, node))
prev_node = node
graph.add_subgraph(subgraph)
if raw:
if filename:
graph.write(filename, format='raw')
return None
else:
raise VisualizationError("if format=raw, then a filename is required.")
if not HAS_PIL and filename:
# linter says this isn't a method - it is
graph.write_png(filename) # pylint: disable=no-member
return None
with tempfile.TemporaryDirectory() as tmpdirname:
tmppath = os.path.join(tmpdirname, 'pass_manager.png')
# linter says this isn't a method - it is
graph.write_png(tmppath) # pylint: disable=no-member
image = Image.open(tmppath)
image = utils._trim(image)
os.remove(tmppath)
if filename:
image.save(filename, 'PNG')
return image
|
22,304 | def condor_submit(submit_file):
"""
Submit a condor job described by the given file. Parse an external id for
the submission or return None and a reason for the failure.
"""
external_id = None
try:
submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT)
message, _ = submit.communicate()
message=unicodify(message)
if submit.returncode == 0:
external_id = parse_external_id(message, type='condor')
else:
message = PROBLEM_PARSING_EXTERNAL_ID
except Exception as e:
message = unicodify(e)
return external_id, message
| def condor_submit(submit_file):
"""
Submit a condor job described by the given file. Parse an external id for
the submission or return None and a reason for the failure.
"""
external_id = None
try:
submit = Popen(('condor_submit', submit_file), stdout=PIPE, stderr=STDOUT)
message, _ = submit.communicate()
message = unicodify(message)
if submit.returncode == 0:
external_id = parse_external_id(message, type='condor')
else:
message = PROBLEM_PARSING_EXTERNAL_ID
except Exception as e:
message = unicodify(e)
return external_id, message
|
51,479 | def _get_broadcast_dims_map_common_coords(args, exclude):
common_coords = {}
dims_map = {}
for arg in args:
for dim in arg.dims:
if dim not in common_coords and dim not in exclude:
dims_map[dim] = arg.sizes[dim]
if dim in arg.xindexes:
common_coords.update(arg.xindexes[dim].create_variables())
return dims_map, common_coords
| def _get_broadcast_dims_map_common_coords(args, exclude):
common_coords = {}
dims_map = {}
for arg in args:
for dim in arg.dims:
if dim not in common_coords and dim not in exclude:
dims_map[dim] = arg.sizes[dim]
if dim in arg._indexes:
common_coords.update(arg.xindexes.get_all_coords(dim))
return dims_map, common_coords
|
43,836 | def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With the default version, total execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
| def track(dev, version="default", **kwargs):
r"""Creates a tracking context and applies it to a device.
Args:
dev (~.Device): a PennyLane-compatible device
version (str): name of tracker to use. The current options are
`default` and `timing`.
Keyword Args:
reset_on_enter=True (bool): whether or not to reset information
entering the context
**Usage Information**
Note that with backpropagation, this functions should take ``qnode.device``
instead of the device used to create the QNode.
.. code-block:: python
dev = qml.device('default.qubit', wires=1)
@qml.qnode(dev, diff_method="parameter-shift")
def circuit(x):
qml.RX(x, wires=0)
return qml.expval(qml.PauliZ(0))
With ``version="default"``, execution information is printed on
each device execution. The printed data depends on the device and tracker version,
but for standard PennyLane devices, the object will track executions and shots.
>>> with qml.track(circuit.device) as tracker:
... qml.grad(circuit)(0.1, shots=10)
Total: executions = 1 shots = 10
Total: executions = 2 shots = 20
Total: executions = 3 shots = 30
In with the ``'timing'`` implementation, the instance also tracks the time
between entering the context and the completion of an execution.
>>> with qml.track(circuit.device, version='timing') as timing_tracker:
... circuit(0.1)
... circuit(0.2)
Total: executions = 1 time = 0.0011134147644042969
Total: executions = 2 time = 0.0027322769165039062
After completion, one can also access the recorded information:
>>> timing_tracker.totals
defaultdict(int, {'executions': 2, 'shots': 30, 'time': 0.00311279296875})
>>> timing_tracker.history
defaultdict(list,
{'executions': [1, 1],
'shots': [None, None],
'time': [0.0012764930725097656, 0.0018362998962402344]})
By specifying ``reset_on_enter=False``, you can reuse the same tracker accross
multiple runtime contexts.
>>> with qml.track(circuit.device, reset_on_enter=False) as tracker:
... circuit(0.1)
Total: executions = 1
>>> with tracker:
... circuit(0.2)
Total: executions = 2
"""
if version == "timing":
return TimingTracker(dev, **kwargs)
elif version == "default":
return DefaultTracker(dev, **kwargs)
else:
raise qml.QuantumFunctionError(
f"version {version} supplied to track. " f"Current options are `timing` and `default`."
)
|
11,622 | def install_client(master, client, extra_args=(), user=None,
password=None, unattended=True, stdin_text=None):
client.collect_log(paths.IPACLIENT_INSTALL_LOG)
apply_common_fixes(client)
allow_sync_ptr(master)
# Now, for the situations where a client resides in a different subnet from
# master, we need to explicitly tell master to create a reverse zone for
# the client and enable dynamic updates for this zone.
zone, error = prepare_reverse_zone(master, client.ip)
if not error:
master.run_command(["ipa", "dnszone-mod", zone,
"--dynamic-update=TRUE"])
if user is None:
user = client.config.admin_name
if password is None:
password = client.config.admin_password
args = [
'ipa-client-install',
'--domain', client.domain.name,
'--realm', client.domain.realm,
'-p', user,
'-w', password,
'--server', master.hostname
]
if unattended:
args.append('-U')
result = client.run_command(args + list(extra_args), stdin_text=stdin_text)
setup_sssd_debugging(client)
kinit_admin(client)
return result
| def install_client(master, client, extra_args=(), user=None,
password=None, unattended=True, stdin_text=None):
client.collect_log(paths.IPACLIENT_INSTALL_LOG)
apply_common_fixes(client)
allow_sync_ptr(master)
# Now, for the situations where a client resides in a different subnet from
# master, we need to explicitly tell master to create a reverse zone for
# the client and enable dynamic updates for this zone.
zone, error = prepare_reverse_zone(master, client.ip)
if not error:
master.run_command(["ipa", "dnszone-mod", zone,
"--dynamic-update=TRUE"])
if user is None:
user = client.config.admin_name
if password is None:
password = client.config.admin_password
args = [
'ipa-client-install',
'--domain', client.domain.name,
'--realm', client.domain.realm,
'-p', user,
'-w', password,
'--server', master.hostname
]
if unattended:
args.append('-U')
result = client.run_command(args.extend(extra_args), stdin_text=stdin_text)
setup_sssd_debugging(client)
kinit_admin(client)
return result
|
23,033 | def test_check_meta_flag():
from pandas import DataFrame, Series
from dask.delayed import delayed
from dask.dataframe import from_delayed
a = Series(['a', 'b', 'a'], dtype='category')
b = Series(['a', 'c', 'a'], dtype='category')
da = delayed(lambda x: x)(a)
db = delayed(lambda x: x)(b)
c = from_delayed([da, db])
c.compute()
| def test_check_meta_flag():
from pandas import DataFrame, Series
from dask.delayed import delayed
from dask.dataframe import from_delayed
a = Series(['a', 'b', 'a'], dtype='category')
b = Series(['a', 'c', 'a'], dtype='category')
da = delayed(lambda x: x)(a)
db = delayed(lambda x: x)(b)
c = from_delayed([da, db])
assert_eq(c, c)
|
31,158 | def get_markdown(object_type: str, objects=None):
"""
Getting markdown for object type to display the results in human readable format.
:type object_type: ``str``
:param object_type: Type of IdentityIQ object.
:type objects: ``JSON``
:param objects: Single or list of Identity resources object/s.
:return: Markdown for each object type.
"""
markdown = ''
if object_type == 'IdentityIQ.Identity':
headers = ['id', 'userName', 'displayName', 'name', 'emails', 'sailpointUser', 'extendedUser', 'entitlements',
'roles', 'capabilities', 'active']
markdown = tableToMarkdown('Identity', objects, headers=headers)
elif object_type == 'IdentityIQ.PolicyViolation':
headers = ['id', 'policyName', 'constraintName', 'status', 'description', 'identity', 'owner']
markdown = tableToMarkdown('PolicyViolation', objects, headers=headers)
elif object_type == 'IdentityIQ.TaskResult':
headers = ['id', 'name', 'type', 'host', 'progress', 'completionStatus', 'launched', 'taskDefinition',
'pendingSignoffs', 'launcher', 'completed', 'taskSchedule', 'partitioned', 'terminated', 'messages',
'attributes']
markdown = tableToMarkdown('TaskResult', objects, headers=headers)
elif object_type == 'IdentityIQ.Account':
headers = ['id', 'displayName', 'identity', 'hasEntitlements', 'application', 'nativeIdentity', 'active',
'lastRefresh', 'manuallyCorrelated', 'application', 'locked']
markdown = tableToMarkdown('Account', objects, headers=headers)
elif object_type == 'IdentityIQ.Workflow':
headers = ['id', 'name', 'workflowName', 'identityRequestId', 'workflowCaseId', 'launched', 'targetClass',
'targetName', 'type', 'completionStatus', 'launcher', 'terminated', 'attributes', 'partitioned',
'completed', 'pendingSignoffs', 'taskDefinition', 'launchedWorkflow']
markdown = tableToMarkdown('Workflow', objects, headers=headers)
elif object_type == 'IdentityIQ.Role':
headers = ['id', 'name', 'owner', 'active', 'displayableName', 'permits', 'type', 'descriptions',
'requirements']
markdown = tableToMarkdown('Role', objects, headers=headers)
elif object_type == 'IdentityIQ.Entitlement':
headers = ['id', 'displayableName', 'type', 'attribute', 'value', 'owner', 'application', 'descriptions',
'requestable', 'aggregated', 'created']
markdown = tableToMarkdown('Entitlement', objects, headers=headers)
elif object_type == 'IdentityIQ.Alert':
headers = ['id', 'name', 'displayName', 'type', 'targetId', 'targetDisplayName', 'targetType', 'alertInput',
'actions', 'application', 'attributes', 'lastProcessed']
markdown = tableToMarkdown('Alert', objects, headers=headers)
return markdown
| def get_markdown(object_type: str, objects=None):
"""
Getting markdown for object type to display the results in human readable format.
:type object_type: ``str``
:param object_type: Type of IdentityIQ object.
:type objects: ``dict`` or ``list``
:param objects: Single or list of Identity resources object/s.
:return: Markdown for each object type.
"""
markdown = ''
if object_type == 'IdentityIQ.Identity':
headers = ['id', 'userName', 'displayName', 'name', 'emails', 'sailpointUser', 'extendedUser', 'entitlements',
'roles', 'capabilities', 'active']
markdown = tableToMarkdown('Identity', objects, headers=headers)
elif object_type == 'IdentityIQ.PolicyViolation':
headers = ['id', 'policyName', 'constraintName', 'status', 'description', 'identity', 'owner']
markdown = tableToMarkdown('PolicyViolation', objects, headers=headers)
elif object_type == 'IdentityIQ.TaskResult':
headers = ['id', 'name', 'type', 'host', 'progress', 'completionStatus', 'launched', 'taskDefinition',
'pendingSignoffs', 'launcher', 'completed', 'taskSchedule', 'partitioned', 'terminated', 'messages',
'attributes']
markdown = tableToMarkdown('TaskResult', objects, headers=headers)
elif object_type == 'IdentityIQ.Account':
headers = ['id', 'displayName', 'identity', 'hasEntitlements', 'application', 'nativeIdentity', 'active',
'lastRefresh', 'manuallyCorrelated', 'application', 'locked']
markdown = tableToMarkdown('Account', objects, headers=headers)
elif object_type == 'IdentityIQ.Workflow':
headers = ['id', 'name', 'workflowName', 'identityRequestId', 'workflowCaseId', 'launched', 'targetClass',
'targetName', 'type', 'completionStatus', 'launcher', 'terminated', 'attributes', 'partitioned',
'completed', 'pendingSignoffs', 'taskDefinition', 'launchedWorkflow']
markdown = tableToMarkdown('Workflow', objects, headers=headers)
elif object_type == 'IdentityIQ.Role':
headers = ['id', 'name', 'owner', 'active', 'displayableName', 'permits', 'type', 'descriptions',
'requirements']
markdown = tableToMarkdown('Role', objects, headers=headers)
elif object_type == 'IdentityIQ.Entitlement':
headers = ['id', 'displayableName', 'type', 'attribute', 'value', 'owner', 'application', 'descriptions',
'requestable', 'aggregated', 'created']
markdown = tableToMarkdown('Entitlement', objects, headers=headers)
elif object_type == 'IdentityIQ.Alert':
headers = ['id', 'name', 'displayName', 'type', 'targetId', 'targetDisplayName', 'targetType', 'alertInput',
'actions', 'application', 'attributes', 'lastProcessed']
markdown = tableToMarkdown('Alert', objects, headers=headers)
return markdown
|
34,430 | def is_conversation_test_file(file_path: Text) -> bool:
"""Checks if a file is a Rasa conversation test file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a conversation test file, otherwise `False`.
"""
if not file_path.endswith(".md"):
return False
try:
dirname = os.path.dirname(file_path)
return is_story_file(file_path) and DEFAULT_E2E_TESTS_PATH in dirname
except Exception as e:
# catch-all because we might be loading files we are not expecting to load
logger.error(
f"Tried to check if '{file_path}' is a conversation test file, but failed "
f"to read it. If this file contains conversation test data, you should "
f"investigate this error, otherwise it is probably best to "
f"move the file to a different location. "
f"Error: {e}"
)
return False
| def is_conversation_test_file(file_path: Text) -> bool:
"""Checks if a file is an end-to-end conversation test file.
Args:
file_path: Path of the file which should be checked.
Returns:
`True` if it's a conversation test file, otherwise `False`.
"""
if not file_path.endswith(".md"):
return False
try:
dirname = os.path.dirname(file_path)
return is_story_file(file_path) and DEFAULT_E2E_TESTS_PATH in dirname
except Exception as e:
# catch-all because we might be loading files we are not expecting to load
logger.error(
f"Tried to check if '{file_path}' is a conversation test file, but failed "
f"to read it. If this file contains conversation test data, you should "
f"investigate this error, otherwise it is probably best to "
f"move the file to a different location. "
f"Error: {e}"
)
return False
|
12,357 | def setup_swapfile(fname, size=None, maxsize=None):
"""
fname: full path string of filename to setup
size: the size to create. set to "auto" for recommended
maxsize: the maximum size
"""
tdir = os.path.dirname(fname)
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
except IOError:
LOG.debug("Not creating swap: failed to read meminfo")
return
util.ensure_dir(tdir)
size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
memsize=memsize)
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
util.log_time(LOG.debug, msg="Setting up swap file", func=setup_swapfile,
args=[fname, int(size / (2 ** 20))])
return fname
| def setup_swapfile(fname, size=None, maxsize=None):
"""
fname: full path string of filename to setup
size: the size to create. set to "auto" for recommended
maxsize: the maximum size
"""
tdir = os.path.dirname(fname)
if str(size).lower() == "auto":
try:
memsize = util.read_meminfo()['total']
except IOError:
LOG.debug("Not creating swap: failed to read meminfo")
return
util.ensure_dir(tdir)
size = suggested_swapsize(fsys=tdir, maxsize=maxsize,
memsize=memsize)
if not size:
LOG.debug("Not creating swap: suggested size was 0")
return
util.log_time(LOG.debug, msg="Setting up swap file", func=create_swapfile,
args=[fname, int(size / (2 ** 20))])
return fname
|
4,259 | def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
freqs : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, default 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', default 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses complex exponentials windowed with multiple DPSS
tapers.
n_cycles : float | array of float, default 7.0
Number of cycles in the wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, default None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, default None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, default True
Use the FFT for convolutions or not.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape (n_epochs, n_chans, '
'n_times), got %s' % (epoch_data.shape,))
# Check params
freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
decim = _check_decim(decim)
if (freqs > sfreq / 2.).any():
raise ValueError('Cannot compute freq above Nyquist freq of the data '
'(%0.1f Hz), got %0.1f Hz'
% (sfreq / 2., freqs.max()))
# We decimate *after* decomposition, so we need to create our kernels
# for the original sfreq
if method == 'morlet':
W = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
n_freqs = len(freqs)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float64
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex128
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
out = out.transpose(1, 0, 2, 3)
return out
| def _compute_tfr(epoch_data, freqs, sfreq=1.0, method='morlet',
n_cycles=7.0, zero_mean=None, time_bandwidth=None,
use_fft=True, decim=1, output='complex', n_jobs=1,
verbose=None):
"""Compute time-frequency transforms.
Parameters
----------
epoch_data : array of shape (n_epochs, n_channels, n_times)
The epochs.
freqs : array-like of floats, shape (n_freqs)
The frequencies.
sfreq : float | int, default 1.0
Sampling frequency of the data.
method : 'multitaper' | 'morlet', default 'morlet'
The time-frequency method. 'morlet' convolves a Morlet wavelet.
'multitaper' uses complex exponentials windowed with multiple DPSS
tapers.
n_cycles : float | array of float, default 7.0
Number of cycles in the wavelet. Fixed number
or one per frequency.
zero_mean : bool | None, default None
None means True for method='multitaper' and False for method='morlet'.
If True, make sure the wavelets have a mean of zero.
time_bandwidth : float, default None
If None and method=multitaper, will be set to 4.0 (3 tapers).
Time x (Full) Bandwidth product. Only applies if
method == 'multitaper'. The number of good tapers (low-bias) is
chosen automatically based on this to equal floor(time_bandwidth - 1).
use_fft : bool, default True
Use the FFT for convolutions or not.
decim : int | slice, default 1
To reduce memory usage, decimation factor after time-frequency
decomposition.
If `int`, returns tfr[..., ::decim].
If `slice`, returns tfr[..., decim].
.. note::
Decimation may create aliasing artifacts, yet decimation
is done after the convolutions.
output : str, default 'complex'
* 'complex' : single trial complex.
* 'power' : single trial power.
* 'phase' : single trial phase.
* 'avg_power' : average of single trial power.
* 'itc' : inter-trial coherence.
* 'avg_power_itc' : average of single trial power and inter-trial
coherence across trials.
%(n_jobs)s
The number of epochs to process at the same time. The parallelization
is implemented across channels.
%(verbose)s
Returns
-------
out : array
Time frequency transform of epoch_data. If output is in ['complex',
'phase', 'power'], then shape of out is (n_epochs, n_chans, n_freqs,
n_times), else it is (n_chans, n_freqs, n_times). If output is
'avg_power_itc', the real values code for 'avg_power' and the
imaginary values code for the 'itc': out = avg_power + i * itc
"""
# Check data
epoch_data = np.asarray(epoch_data)
if epoch_data.ndim != 3:
raise ValueError('epoch_data must be of shape (n_epochs, n_chans, '
'n_times), got %s' % (epoch_data.shape,))
# Check params
freqs, sfreq, zero_mean, n_cycles, time_bandwidth, decim = \
_check_tfr_param(freqs, sfreq, method, zero_mean, n_cycles,
time_bandwidth, use_fft, decim, output)
decim = _check_decim(decim)
if (freqs > sfreq / 2.).any():
raise ValueError('Cannot compute freq above Nyquist freq of the data '
'(%0.1f Hz), got %0.1f Hz'
% (sfreq / 2., freqs.max()))
# We decimate *after* decomposition, so we need to create our kernels
# for the original sfreq
if method == 'morlet':
W = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
Ws = [W] # to have same dimensionality as the 'multitaper' case
elif method == 'multitaper':
Ws = _make_dpss(sfreq, freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
# Check wavelets
if len(Ws[0][0]) > epoch_data.shape[2]:
raise ValueError('At least one of the wavelets is longer than the '
'signal. Use a longer signal or shorter wavelets.')
# Initialize output
n_freqs = len(freqs)
n_epochs, n_chans, n_times = epoch_data[:, :, decim].shape
if output in ('power', 'phase', 'avg_power', 'itc'):
dtype = np.float64
elif output in ('complex', 'avg_power_itc'):
# avg_power_itc is stored as power + 1i * itc to keep a
# simple dimensionality
dtype = np.complex128
if ('avg_' in output) or ('itc' in output):
out = np.empty((n_chans, n_freqs, n_times), dtype)
else:
out = np.empty((n_chans, n_epochs, n_freqs, n_times), dtype)
# Parallel computation
parallel, my_cwt, _ = parallel_func(_time_frequency_loop, n_jobs)
# Parallelization is applied across channels.
tfrs = parallel(
my_cwt(channel, Ws, output, use_fft, 'same', decim)
for channel in epoch_data.transpose(1, 0, 2))
# FIXME: to avoid overheads we should use np.array_split()
for channel_idx, tfr in enumerate(tfrs):
out[channel_idx] = tfr
if ('avg_' not in output) and ('itc' not in output):
# This is to enforce that the first dimension is for epochs
out = out.transpose(1, 0, 2, 3)
return out
|
34,550 | def run_core_test(args: argparse.Namespace) -> None:
"""Run core tests."""
from rasa.test import test_core_models_in_directory, test_core, test_core_models
stories = rasa.cli.utils.get_validated_path(
args.stories, "stories", DEFAULT_DATA_PATH
)
if args.e2e:
stories = rasa.shared.data.get_test_directory(stories)
else:
stories = rasa.shared.data.get_core_directory(stories)
output = args.out or DEFAULT_RESULTS_PATH
args.errors = not args.no_errors
rasa.shared.utils.io.create_directory(output)
if isinstance(args.model, list) and len(args.model) == 1:
args.model = args.model[0]
if isinstance(args.model, str):
model_path = rasa.cli.utils.get_validated_path(
args.model, "model", DEFAULT_MODELS_PATH
)
if args.evaluate_model_directory:
test_core_models_in_directory(args.model, stories, output)
else:
test_core(
model=model_path,
stories=stories,
output=output,
additional_arguments=vars(args),
)
else:
test_core_models(args.model, stories, output)
print_info("Failed stories written to {}/{}".format(output, FAILED_STORIES_FILE))
| def run_core_test(args: argparse.Namespace) -> None:
"""Run core tests."""
from rasa.test import test_core_models_in_directory, test_core, test_core_models
stories = rasa.cli.utils.get_validated_path(
args.stories, "stories", DEFAULT_DATA_PATH
)
if args.e2e:
stories = rasa.shared.data.get_test_directory(stories)
else:
stories = rasa.shared.data.get_core_directory(stories)
output = args.out or DEFAULT_RESULTS_PATH
args.errors = not args.no_errors
rasa.shared.utils.io.create_directory(output)
if isinstance(args.model, list) and len(args.model) == 1:
args.model = args.model[0]
if isinstance(args.model, str):
model_path = rasa.cli.utils.get_validated_path(
args.model, "model", DEFAULT_MODELS_PATH
)
if args.evaluate_model_directory:
test_core_models_in_directory(args.model, stories, output)
else:
test_core(
model=model_path,
stories=stories,
output=output,
additional_arguments=vars(args),
)
else:
test_core_models(args.model, stories, output)
rasa.shared.utils.cli.print_info(f"Failed stories written to '{output}/{FAILED_STORIES_FILE}'")
|
52,742 | def _extract_substr(ntlm_msg, offset, ln):
"""
Extract the string at te given offset and of the given length from an
NTLM message
"""
s, = struct.unpack('{}s'.format(ln), ntlm_msg[offset:offset + ln])
try:
res = s.decode('utf-16').encode()
if len(res) == ln // 2:
return res
LOGGER.warning("Size too small %r", s)
return None
except UnicodeDecodeError:
LOGGER.warning("Cannot decode %r", s)
return None
| def _extract_substr(ntlm_msg, offset, ln):
"""
Extract the string at te given offset and of the given length from an
NTLM message
"""
s = ntlm_msg[offset:offset + ln]
if len(s) < ln:
LOGGER.warning("Data too small at offset %d [%r, size %d]",
offset, ntlm_msg, msg)
return None
try:
return s.decode('utf-16').encode()
except UnicodeDecodeError:
LOGGER.warning("Cannot decode %r", s)
return None
except UnicodeDecodeError:
LOGGER.warning("Cannot decode %r", s)
return None
|
45,139 | def list_feature_flags(
batch_size: int = 10, client: FeatureFlagClient = None
) -> List[FeatureFlag]:
"""
List all feature flags.
This function always returns an empty list if the setting
PREFECT_CLOUD_ENABLE_FEATURE_FLAGGING is false.
Args:
batch_size: batch size of flags to retrieve at a time
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
List[FeatureFlag]: list of all feature flags in the store
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return []
if not client:
client = get_features_client()
flags = []
offset = 0
while True:
batch = list(client.list(limit=batch_size, offset=offset))
if not batch:
break
flags.extend(batch)
offset += batch_size
return flags
| def list_feature_flags(
batch_size: int = 10, client: FeatureFlagClient = None
) -> List[FeatureFlag]:
"""
List all feature flags.
This function always returns an empty list if the setting
PREFECT_CLOUD_ENABLE_FEATURE_FLAGGING is false.
Args:
batch_size: batch size of flags to retrieve at a time
client: The `FeatureFlagClient` instance to use. Defaults to a client
configured to look at an in-memory feature store.
Returns:
List[FeatureFlag]: list of all feature flags in the store
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return []
if not client:
client = get_features_client()
flags = []
offset = 0
while True:
batch = list(client.list(limit=batch_size, offset=offset))
if not batch:
break
flags.extend(batch)
offset += batch_size
return flags
|
22,833 | def _get_leap_year(year, future):
"""
Iterate through previous or next years until it gets a valid leap year
This is performed to avoid missing or including centurial leap years
"""
difference = 1 if future else -1
leap_year = year + difference
while not calendar.isleap(leap_year):
leap_year = leap_year + difference
return leap_year
| def _get_leap_year(year, future):
"""
Iterate through previous or next years until it gets a valid leap year
This is performed to avoid missing or including centurial leap years
"""
step = 1 if future else -1
leap_year = year + difference
while not calendar.isleap(leap_year):
leap_year = leap_year + difference
return leap_year
|
40,370 | def get_planetoid_dataset(name, normalize_features=False, transform=None):
path = Path.joinpath(Path(__file__).resolve().parent.parent, 'data', name)
dataset = Planetoid(path, name)
if transform is not None and normalize_features:
dataset.transform = T.Compose([T.NormalizeFeatures(), transform])
elif normalize_features:
dataset.transform = T.NormalizeFeatures()
elif transform is not None:
dataset.transform = transform
return dataset
| def get_planetoid_dataset(name, normalize_features=False, transform=None):
path = Path(__file__).parent / '..' / 'data'
dataset = Planetoid(path, name)
if transform is not None and normalize_features:
dataset.transform = T.Compose([T.NormalizeFeatures(), transform])
elif normalize_features:
dataset.transform = T.NormalizeFeatures()
elif transform is not None:
dataset.transform = transform
return dataset
|
6,385 | def create_appointments(number):
for i in range(1, number):
frappe.get_doc({
'doctype': 'Appointment',
'scheduled_time': datetime.datetime.min,
'customer_name': 'Test Customer'+str(i),
'customer_phone_number': '8088',
'customer_skype': 'test'+str(i),
})
| def create_appointments(number):
for count in range(1, number):
frappe.get_doc({
'doctype': 'Appointment',
'scheduled_time': datetime.datetime.min,
'customer_name': 'Test Customer'+str(i),
'customer_phone_number': '8088',
'customer_skype': 'test'+str(i),
})
|
34,492 | def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
):
convert_parser = subparsers.add_parser(
"convert",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help=(
"Converts NLU and Core training data files from Markdown to YAML format."
),
)
convert_parser.set_defaults(func=convert)
convert_artguments.set_convert_arguments(convert_parser)
| def add_subparser(
subparsers: argparse._SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
convert_parser = subparsers.add_parser(
"convert",
parents=parents,
conflict_handler="resolve",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
help=(
"Converts NLU and Core training data files from Markdown to YAML format."
),
)
convert_parser.set_defaults(func=convert)
convert_artguments.set_convert_arguments(convert_parser)
|
44,213 | def unitary_cost(n, rank_r, rank_m, br=7, aleph=10, beth=20):
r"""Return the number of Toffoli gates needed to implement the qubitization unitary operator.
The expression for computing the cost is taken from
[`arXiv:2011.03494 <https://arxiv.org/abs/2011.03494>`_].
Args:
n (int): number of molecular orbitals
rank_r (int): the rank of the first factorization step
rank_m (int): the average rank of the second factorization step
br (int): number of bits for ancilla qubit rotation
aleph (int): number of bits for the keep register
beth (int): number of bits for the rotation angles
Returns:
int: the number of Toffoli gates to implement the qubitization unitary
**Example**
>>> n = 14
>>> rank_r = 26
>>> rank_m = 5.5
>>> br = 7
>>> aleph = 10
>>> beth = 20
>>> unitary_cost(n, norm, error, rank_r, rank_m, br, aleph, beth)
2007
"""
eta = np.array([np.log2(n) for n in range(1, rank_r + 1) if rank_r % n == 0])
eta = int(np.max([n for n in eta if n % 1 == 0]))
nxi = np.ceil(np.log2(rank_m))
nlxi = np.ceil(np.log2(rank_r * rank_m + n / 2))
nl = np.ceil(np.log2(rank_r + 1))
bp1 = nl + aleph
bp2 = nxi + aleph + 2
bo = nxi + nlxi + br + 1
rank_rm = rank_r * rank_m
cost = 9 * nl - 6 * eta + 12 * br + 34 * nxi + 8 * nlxi + 9 * aleph + 3 * n * beth - 6 * n - 43
cost += qrom_cost((rank_r, 1, 0, bp1, -1))[0]
cost += qrom_cost((rank_r, 1, 0, bo, -1))[0]
cost += qrom_cost((rank_r, 1, 0, 1, 0))[0] * 2
cost += qrom_cost((rank_rm, n / 2, rank_rm, n * beth, 0))[0]
cost += qrom_cost((rank_rm, n / 2, rank_rm, 2, 0))[0] * 2
cost += qrom_cost((rank_rm, n / 2, rank_rm, 2 * bp2, -1))[0]
return int(cost)
| def unitary_cost(n, rank_r, rank_m, br=7, aleph=10, beth=20):
r"""Return the number of Toffoli gates needed to implement the qubitization unitary operator.
The expression for computing the cost is taken from
[`arXiv:2011.03494 <https://arxiv.org/abs/2011.03494>`_].
Args:
n (int): number of molecular orbitals
rank_r (int): the rank of the first factorization step
rank_m (int): average rank of the second factorization step
br (int): number of bits for ancilla qubit rotation
aleph (int): number of bits for the keep register
beth (int): number of bits for the rotation angles
Returns:
int: the number of Toffoli gates to implement the qubitization unitary
**Example**
>>> n = 14
>>> rank_r = 26
>>> rank_m = 5.5
>>> br = 7
>>> aleph = 10
>>> beth = 20
>>> unitary_cost(n, norm, error, rank_r, rank_m, br, aleph, beth)
2007
"""
eta = np.array([np.log2(n) for n in range(1, rank_r + 1) if rank_r % n == 0])
eta = int(np.max([n for n in eta if n % 1 == 0]))
nxi = np.ceil(np.log2(rank_m))
nlxi = np.ceil(np.log2(rank_r * rank_m + n / 2))
nl = np.ceil(np.log2(rank_r + 1))
bp1 = nl + aleph
bp2 = nxi + aleph + 2
bo = nxi + nlxi + br + 1
rank_rm = rank_r * rank_m
cost = 9 * nl - 6 * eta + 12 * br + 34 * nxi + 8 * nlxi + 9 * aleph + 3 * n * beth - 6 * n - 43
cost += qrom_cost((rank_r, 1, 0, bp1, -1))[0]
cost += qrom_cost((rank_r, 1, 0, bo, -1))[0]
cost += qrom_cost((rank_r, 1, 0, 1, 0))[0] * 2
cost += qrom_cost((rank_rm, n / 2, rank_rm, n * beth, 0))[0]
cost += qrom_cost((rank_rm, n / 2, rank_rm, 2, 0))[0] * 2
cost += qrom_cost((rank_rm, n / 2, rank_rm, 2 * bp2, -1))[0]
return int(cost)
|
48,415 | def main():
argument_spec = dict(
cluster_name=dict(required=True),
resource=dict(required=False),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent', 'list']),
resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container'])
)
required_if = [('state', 'present', ['tags']), ('state', 'absent', ['tags'])]
module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
resource_type = module.params['resource_type']
cluster_name = module.params['cluster_name']
if resource_type == 'cluster':
resource = cluster_name
else:
resource = module.params['resource']
tags = module.params['tags']
state = module.params['state']
purge_tags = module.params['purge_tags']
result = {'changed': False}
ecs = module.client('ecs')
try:
if resource_type == 'cluster':
description = ecs.describe_clusters(clusters=[resource])
if len(description['clusters']) == 0:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['clusters'][0]['clusterArn']
elif resource_type == 'task':
description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource])
if len(description['tasks']) == 0:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['tasks'][0]['taskArn']
elif resource_type == 'service':
description = ecs.describe_services(cluster=cluster_name, services=[resource])
if len(description['services']) == 0:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['services'][0]['serviceArn']
elif resource_type == 'task_definition':
description = ecs.describe_task_definition(taskDefinition=resource)
if 'taskDefinition' not in description or 'taskDefinitionArn' not in description['taskDefinition']:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['taskDefinition']['taskDefinitionArn']
elif resource_type == 'container':
description = ecs.describe_container_instances(clusters=[resource])
if len(description['containerInstances']) == 0:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['containerInstances'][0]['containerInstanceArn']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource))
current_tags = get_tags(ecs, module, resource_arn)
if state == 'list':
module.exit_json(changed=False, tags=current_tags)
add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
remove_tags = {}
if state == 'absent':
for key in tags:
if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
remove_tags[key] = current_tags[key]
for key in remove:
remove_tags[key] = current_tags[key]
if remove_tags:
result['changed'] = True
result['removed_tags'] = remove_tags
if not module.check_mode:
try:
ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys()))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
if state == 'present' and add_tags:
result['changed'] = True
result['added_tags'] = add_tags
current_tags.update(add_tags)
if not module.check_mode:
try:
tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value')
ecs.tag_resource(resourceArn=resource_arn, tags=tags)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
result['tags'] = get_tags(ecs, module, resource_arn)
module.exit_json(**result)
| def main():
argument_spec = dict(
cluster_name=dict(required=True),
resource=dict(required=False),
tags=dict(type='dict'),
purge_tags=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent', 'list']),
resource_type=dict(default='cluster', choices=['cluster', 'task', 'service', 'task_definition', 'container'])
)
required_if = [
('state', 'present', ['tags']),
('state', 'absent', ['tags']),
('resource_type', 'task', ['resource']),
('resource_type', 'service', ['resource']),
('resource_type', 'task_definition', ['resource']),
('resource_type', 'container', ['resource']),
]
module = AnsibleAWSModule(argument_spec=argument_spec, required_if=required_if, supports_check_mode=True)
resource_type = module.params['resource_type']
cluster_name = module.params['cluster_name']
if resource_type == 'cluster':
resource = cluster_name
else:
resource = module.params['resource']
tags = module.params['tags']
state = module.params['state']
purge_tags = module.params['purge_tags']
result = {'changed': False}
ecs = module.client('ecs')
try:
if resource_type == 'cluster':
description = ecs.describe_clusters(clusters=[resource])
if len(description['clusters']) == 0:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['clusters'][0]['clusterArn']
elif resource_type == 'task':
description = ecs.describe_tasks(cluster=cluster_name, tasks=[resource])
if len(description['tasks']) == 0:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['tasks'][0]['taskArn']
elif resource_type == 'service':
description = ecs.describe_services(cluster=cluster_name, services=[resource])
if len(description['services']) == 0:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['services'][0]['serviceArn']
elif resource_type == 'task_definition':
description = ecs.describe_task_definition(taskDefinition=resource)
if 'taskDefinition' not in description or 'taskDefinitionArn' not in description['taskDefinition']:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['taskDefinition']['taskDefinitionArn']
elif resource_type == 'container':
description = ecs.describe_container_instances(clusters=[resource])
if len(description['containerInstances']) == 0:
module.fail_json(msg='Failed to find {0} {1}'.format(resource_type, resource))
resource_arn = description['containerInstances'][0]['containerInstanceArn']
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to find {0} {1}'.format(resource_type, resource))
current_tags = get_tags(ecs, module, resource_arn)
if state == 'list':
module.exit_json(changed=False, tags=current_tags)
add_tags, remove = compare_aws_tags(current_tags, tags, purge_tags=purge_tags)
remove_tags = {}
if state == 'absent':
for key in tags:
if key in current_tags and (tags[key] is None or current_tags[key] == tags[key]):
remove_tags[key] = current_tags[key]
for key in remove:
remove_tags[key] = current_tags[key]
if remove_tags:
result['changed'] = True
result['removed_tags'] = remove_tags
if not module.check_mode:
try:
ecs.untag_resource(resourceArn=resource_arn, tagKeys=list(remove_tags.keys()))
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to remove tags {0} from resource {1}'.format(remove_tags, resource))
if state == 'present' and add_tags:
result['changed'] = True
result['added_tags'] = add_tags
current_tags.update(add_tags)
if not module.check_mode:
try:
tags = ansible_dict_to_boto3_tag_list(add_tags, tag_name_key_name='key', tag_value_key_name='value')
ecs.tag_resource(resourceArn=resource_arn, tags=tags)
except (BotoCoreError, ClientError) as e:
module.fail_json_aws(e, msg='Failed to set tags {0} on resource {1}'.format(add_tags, resource))
result['tags'] = get_tags(ecs, module, resource_arn)
module.exit_json(**result)
|
2,719 | def _pandas_arff_parser(
gzip_file,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
):
"""ARFF parser using `pandas.read_csv`.
This parser uses the metadata fetched directly from OpenML and skips the metadata
headers of ARFF file itself. The data is loaded as a CSV file.
Parameters
----------
gzip_file : GzipFile instance
The GZip compressed file with the ARFF formatted payload.
output_type : {"numpy", "sparse", "pandas"}
The type of the arrays that will be returned. The possibilities are:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
A list of the feature names to be selected to build `X`.
target_names_to_select : list of str
A list of the target names to be selected to build `y`.
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
import pandas as pd
# read the file until the data section to skip the ARFF metadata headers
for line in gzip_file:
if line.decode("utf-8").lower().startswith("@data"):
break
dtypes = {}
for name in openml_columns_info:
column_dtype = openml_columns_info[name]["data_type"]
if column_dtype.lower() == "integer":
# Use Int64 to infer missing values from data
# XXX: this line is not covered by our tests. Is this really needed?
dtypes[name] = "Int64"
elif column_dtype.lower() == "nominal":
dtypes[name] = "category"
# ARFF represents missing values with "?"
frame = pd.read_csv(
gzip_file,
header=None,
na_values=["?"], # missing values are represented by `?`
comment="%", # skip line starting by `%` since they are comments
names=[name for name in openml_columns_info],
dtype=dtypes,
)
columns_to_select = feature_names_to_select + target_names_to_select
columns_to_keep = [col for col in frame.columns if col in columns_to_select]
frame = frame[columns_to_keep]
# strip quotes to be consistent with LIAC ARFF
re_start_end_quotes = "^[\"'](.*[\"'])"
def strip_quotes(s):
return s.group(0)[1:-1]
for col in make_column_selector(dtype_exclude="number")(frame):
if pd.api.types.is_categorical_dtype(frame[col].dtype):
# modify the categories instead of each dataframe row
frame[col].cat.categories = frame[col].cat.categories.str.replace(
re_start_end_quotes, strip_quotes, regex=True
)
else:
# we deal with an object column
frame[col] = frame[col].replace(
re_start_end_quotes, strip_quotes, regex=True
)
X, y = _post_process_frame(frame, feature_names_to_select, target_names_to_select)
if output_type == "pandas":
return X, y, frame, None
else:
X, y = X.to_numpy(), y.to_numpy()
categories = {
name: dtype.categories.tolist()
for name, dtype in frame.dtypes.items()
if pd.api.types.is_categorical_dtype(dtype)
}
return X, y, None, categories
| def _pandas_arff_parser(
gzip_file,
output_type,
openml_columns_info,
feature_names_to_select,
target_names_to_select,
):
"""ARFF parser using `pandas.read_csv`.
This parser uses the metadata fetched directly from OpenML and skips the metadata
headers of ARFF file itself. The data is loaded as a CSV file.
Parameters
----------
gzip_file : GzipFile instance
The GZip compressed file with the ARFF formatted payload.
output_type : {"numpy", "sparse", "pandas"}
The type of the arrays that will be returned. The possibilities are:
- `"numpy"`: both `X` and `y` will be NumPy arrays;
- `"sparse"`: `X` will be sparse matrix and `y` will be a NumPy array;
- `"pandas"`: `X` will be a pandas DataFrame and `y` will be either a
pandas Series or DataFrame.
openml_columns_info : dict
The information provided by OpenML regarding the columns of the ARFF
file.
feature_names_to_select : list of str
A list of the feature names to be selected to build `X`.
target_names_to_select : list of str
A list of the target names to be selected to build `y`.
Returns
-------
X : {ndarray, sparse matrix, dataframe}
The data matrix.
y : {ndarray, dataframe, series}
The target.
frame : dataframe or None
A dataframe containing both `X` and `y`. `None` if
`output_array_type != "pandas"`.
categories : list of str or None
The names of the features that are categorical. `None` if
`output_array_type == "pandas"`.
"""
import pandas as pd
# read the file until the data section to skip the ARFF metadata headers
for line in gzip_file:
if line.decode("utf-8").lower().startswith("@data"):
break
dtypes = {}
for name in openml_columns_info:
column_dtype = openml_columns_info[name]["data_type"]
if column_dtype.lower() == "integer":
# Use Int64 to infer missing values from data
# XXX: this line is not covered by our tests. Is this really needed?
dtypes[name] = "Int64"
elif column_dtype.lower() == "nominal":
dtypes[name] = "category"
# ARFF represents missing values with "?"
frame = pd.read_csv(
gzip_file,
header=None,
na_values=["?"], # missing values are represented by `?`
comment="%", # skip line starting by `%` since they are comments
names=[name for name in openml_columns_info],
dtype=dtypes,
)
columns_to_select = feature_names_to_select + target_names_to_select
columns_to_keep = [col for col in frame.columns if col in columns_to_select]
frame = frame[columns_to_keep]
# strip quotes to be consistent with LIAC ARFF
re_start_end_quotes = "^[\"'](.*[\"'])$"
def strip_quotes(s):
return s.group(0)[1:-1]
for col in make_column_selector(dtype_exclude="number")(frame):
if pd.api.types.is_categorical_dtype(frame[col].dtype):
# modify the categories instead of each dataframe row
frame[col].cat.categories = frame[col].cat.categories.str.replace(
re_start_end_quotes, strip_quotes, regex=True
)
else:
# we deal with an object column
frame[col] = frame[col].replace(
re_start_end_quotes, strip_quotes, regex=True
)
X, y = _post_process_frame(frame, feature_names_to_select, target_names_to_select)
if output_type == "pandas":
return X, y, frame, None
else:
X, y = X.to_numpy(), y.to_numpy()
categories = {
name: dtype.categories.tolist()
for name, dtype in frame.dtypes.items()
if pd.api.types.is_categorical_dtype(dtype)
}
return X, y, None, categories
|
31,427 | def main():
params = demisto.params()
base_url = urljoin(params.get('url'), 'idapi/v1')
verify_ssl = not params.get('insecure', False)
reliability = params.get('integrationReliability')
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
return_error("Please provide a valid value for the Source Reliability parameter.")
proxy = params.get('proxy')
client = Client(
base_url=base_url,
verify=verify_ssl,
proxy=proxy,
auth=(params.get('credentials', {}).get('identifier'),
params.get('credentials', {}).get('password')),
reliability=reliability
)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module_command,
f'{INTEGRATION_COMMAND_NAME}-get-incidents': get_incidents_command,
f'{INTEGRATION_COMMAND_NAME}-get-incident-by-id': get_incident_by_id_command
}
try:
if command == 'fetch-incidents':
incidents, new_last_run = fetch_incidents_command(client,
fetch_time=params.get('fetchTime'),
last_run=demisto.getLastRun().get('lastRun'),
limit=params.get('fetchLimit'))
demisto.incidents(incidents)
demisto.setLastRun(new_last_run)
else:
readable_output, outputs, raw_response = commands[command](client=client, **demisto.args())
return_outputs(readable_output, outputs, raw_response)
# Log exceptions
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
| def main():
params = demisto.params()
base_url = urljoin(params.get('url'), 'idapi/v1')
verify_ssl = not params.get('insecure', False)
reliability = params.get('integrationReliability', DBotScoreReliability.B)
if DBotScoreReliability.is_valid_type(reliability):
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
return_error("Please provide a valid value for the Source Reliability parameter.")
proxy = params.get('proxy')
client = Client(
base_url=base_url,
verify=verify_ssl,
proxy=proxy,
auth=(params.get('credentials', {}).get('identifier'),
params.get('credentials', {}).get('password')),
reliability=reliability
)
command = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module_command,
f'{INTEGRATION_COMMAND_NAME}-get-incidents': get_incidents_command,
f'{INTEGRATION_COMMAND_NAME}-get-incident-by-id': get_incident_by_id_command
}
try:
if command == 'fetch-incidents':
incidents, new_last_run = fetch_incidents_command(client,
fetch_time=params.get('fetchTime'),
last_run=demisto.getLastRun().get('lastRun'),
limit=params.get('fetchLimit'))
demisto.incidents(incidents)
demisto.setLastRun(new_last_run)
else:
readable_output, outputs, raw_response = commands[command](client=client, **demisto.args())
return_outputs(readable_output, outputs, raw_response)
# Log exceptions
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg, error=e)
|
30,927 | def parse_response(response: Runner, human_readable_name: str, installed_software: str,
additional_vars=None) -> DemistoResult:
""" Parse anible-runner Runner object to demisto
Args:
response: anible-runner Runner object.
human_readable_name: Table header.
installed_software: SW installed in hostname
additional_vars:
Returns:
DemistoResult: Demisto structured response.
"""
stdout = f'\n\n### Stdout:\n```\n{"".join(response.stdout.readlines())}\n```'
if response.status == 'failed' or response.rc != 0:
demisto.results({
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.JSON,
'Contents': {},
'ReadableContentsFormat': EntryFormat.MARKDOWN,
'HumanReadable': stdout,
})
raise DemistoException(f'Installing {installed_software} has failed with return code {response.rc}, See stdout.')
result = {
'Status': response.status,
'ReturnCode': response.rc,
'Canceled': response.canceled,
'Errored': response.errored,
'TimedOut': response.timed_out,
'Stats': response.stats,
'InstalledSoftware': installed_software,
'AdditionalInfo': additional_vars
}
human_readable = tableToMarkdown(human_readable_name, result, removeNull=True) + stdout
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': {'Ansible.Install': result}
}
| def parse_response(response: Runner, human_readable_name: str, installed_software: str,
additional_vars=None) -> DemistoResult:
""" Parse ansible-runner Runner object to demisto
Args:
response: anible-runner Runner object.
human_readable_name: Table header.
installed_software: SW installed in hostname
additional_vars:
Returns:
DemistoResult: Demisto structured response.
"""
stdout = f'\n\n### Stdout:\n```\n{"".join(response.stdout.readlines())}\n```'
if response.status == 'failed' or response.rc != 0:
demisto.results({
'Type': EntryType.NOTE,
'ContentsFormat': EntryFormat.JSON,
'Contents': {},
'ReadableContentsFormat': EntryFormat.MARKDOWN,
'HumanReadable': stdout,
})
raise DemistoException(f'Installing {installed_software} has failed with return code {response.rc}, See stdout.')
result = {
'Status': response.status,
'ReturnCode': response.rc,
'Canceled': response.canceled,
'Errored': response.errored,
'TimedOut': response.timed_out,
'Stats': response.stats,
'InstalledSoftware': installed_software,
'AdditionalInfo': additional_vars
}
human_readable = tableToMarkdown(human_readable_name, result, removeNull=True) + stdout
return {
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': result,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': human_readable,
'EntryContext': {'Ansible.Install': result}
}
|
2,822 | def make_union(*transformers, n_jobs=None, verbose=False):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
One or more estimators.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Returns
-------
f : FeatureUnion
A FeatureUnion object for concatenating the results of multiple transformer
objects.
See Also
--------
FeatureUnion : Class for concatenating the results of multiple transformer
objects.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD())
FeatureUnion(transformer_list=[('pca', PCA()),
('truncatedsvd', TruncatedSVD())])
"""
return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose)
| def make_union(*transformers, n_jobs=None, verbose=False):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
One or more estimators.
n_jobs : int, default=None
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
.. versionchanged:: v0.20
`n_jobs` default changed from 1 to None.
verbose : bool, default=False
If True, the time elapsed while fitting each transformer will be
printed as it is completed.
Returns
-------
f : FeatureUnion
A ``FeatureUnion`` object for concatenating the results of multiple transformer
objects.
See Also
--------
FeatureUnion : Class for concatenating the results of multiple transformer
objects.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD())
FeatureUnion(transformer_list=[('pca', PCA()),
('truncatedsvd', TruncatedSVD())])
"""
return FeatureUnion(_name_estimators(transformers), n_jobs=n_jobs, verbose=verbose)
|
35,291 | def tensor_train_OI(data_tensor, rank, n_iter = 1, trajectory = False, return_errors = True, **context):
""" Perform tensor-train orthogonal iteration (TTOI) for tensor train decomposition
Reference paper: Zhou Y, Zhang AR, Zheng L, Wang Y. "Optimal high-order tensor svd via tensor-train orthogonal iteration."
Parameters
----------
data_tensor: tl.tensor
observed tensor data
rank : tuple
rank of the TT decomposition
must verify rank[0] == rank[-1] == 1 (boundary conditions)
and len(rank) == len(tl.shape(data_tensor))+1
n_iter : int
half the number of iterations
trajectory : bool, optional, default is False
if True, the output of each iteration of TTOI is returned: 2*n_iter outputs
otherwise, only the output of the last iteration is returned
return_errors : bool, optional, default is True
if True, the approximation/reconstruction error of each iteration of TTOI is returned: 2*n_iter outputs
Returns
-------
factors : list of n_iter tensors or one tensor
* n_iter tensors (if `trajectory` is True) : each list contains the output of each iteration, one full tensor and list of tensor factors
* one tensor (otherwise): output of the last iteration, one full tensor and list of tensor factors
full_tensor : list of n_iter tensors or one tensor
* n_iter tensors (if `trajectory` is True) : each list contains the output of each iteration, one full tensor and list of tensor factoros
* one tensor (otherwise): output of the last iteration, one full tensor and list of tensor factors
"""
context = tl.context(data_tensor)
shape = tl.shape(data_tensor)
n_dim = len(shape)
rank = validate_tt_rank(shape, rank)
# Make sure it's not a tuple but a list
rank = list(rank)
# Add two one-dimensional mode to data_tensor
data_tensor_extended = tl.reshape(data_tensor,(1, ) + shape + (1, ))
if trajectory:
factors = list()
full_tensor = list()
if return_errors:
error_list = list()
# perform TTOI for n_iter iterations
for n in range(n_iter):
# first perform forward update
# U_arr will be a list including estimated left singular spaces at the current iteration
U_arr = list()
# initialize R_tilde_arr (sequential unfolding of data_tensor multiplied by U_arr sequentially on the left, useful for backward update to obtain V_arr)
R_tilde_arr = list()
# estimate the first left singular spaces
# Here, R_tmp is the first sequential unfolding compressed on the right by previous updated V_arr (if exists)
R_tmp_l = data_tensor_extended
if n == 0:
R_tmp = R_tmp_l
else:
R_tmp = sequential_prod(R_tmp_l,V_arr,"right")
U_tmp = tl.partial_svd(tl.reshape(R_tmp,(shape[0],-1)),rank[1])[0]
U_arr.append(tl.reshape(U_tmp,(rank[0],shape[0],rank[1])))
# estimate the 2nd to (d-1)th left singular spaces
for k in range(n_dim-2):
# compress the (k+2)th sequential unfolding of data_tensor from the left
R_tmp_l = sequential_prod(R_tmp_l,[U_arr[k]],"left")
# R_tmp_l will be useful for backward update
R_tilde_arr.append(R_tmp_l)
# compress the (k+2)th sequential unfolding of data_tensor from the right (if n>0)
if n == 0:
R_tmp = R_tmp_l
else:
R_tmp = sequential_prod(R_tmp_l,V_arr[0:(n_dim-k-2)],"right")
U_tmp = tl.partial_svd(tl.reshape(R_tmp,(rank[k+1]*shape[k+1],-1)),rank[k+2])[0]
U_arr.append(tl.reshape(U_tmp,(rank[k+1],shape[k+1],rank[k+2])))
# forward update is done; output the final residual
R_tilde_arr.append(sequential_prod(R_tmp_l,[U_arr[n_dim-2]],"left"))
if trajectory or return_errors:
factors_list_tmp = list()
for k in range(n_dim-1):
factors_list_tmp.append(tl.tensor(U_arr[k],**context))
factors_list_tmp.append(tl.tensor(R_tilde_arr[n_dim-2],**context))
full_tensor_tmp = tl.tensor(tt_to_tensor(factors_list_tmp),**context)
if return_errors:
error_list.append(tl.norm(full_tensor_tmp-data_tensor,2))
if trajectory:
factors.append(factors_list_tmp)
full_tensor.append(full_tensor_tmp)
# perform backward update
# initialize V_arr: V_arr will be a list of estimated right singular spaces at the current or previous iteration
V_arr = list()
V_tmp = tl.transpose(tl.partial_svd(tl.reshape(R_tilde_arr[n_dim-2],(rank[n_dim-1],shape[n_dim-1])),rank[n_dim-1])[2])
V_arr.append(tl.reshape(V_tmp,(rank[n_dim],shape[n_dim-1],rank[n_dim-1])))
# estimate the 2nd to (d-1)th right singular spaces
for k in range(n_dim-2):
# compress R_tilde_arr from the right
R_tmp_r = sequential_prod(R_tilde_arr[n_dim-k-3],V_arr[0:(k+1)],"right")
V_tmp = tl.transpose(tl.partial_svd(tl.reshape(R_tmp_r,(rank[n_dim-k-2],shape[n_dim-k-2]*rank[n_dim-k-1])),rank[n_dim-k-2])[2])
V_arr.append(tl.reshape(V_tmp,(rank[n_dim-k-1],shape[n_dim-k-2],rank[n_dim-k-2])))
Residual_right = sequential_prod(data_tensor_extended,V_arr,"right")
if trajectory or return_errors or n==n_iter-1:
factors_list_tmp = list()
factors_list_tmp.append(tl.tensor(Residual_right,**context))
for k in range(n_dim-1):
factors_list_tmp.append(tl.tensor(tl.transpose(V_arr[n_dim-k-2]),**context))
full_tensor_tmp = tl.tensor(tt_to_tensor(factors_list_tmp),**context)
if return_errors:
error_list.append(tl.norm(full_tensor_tmp-data_tensor,2))
if trajectory:
factors.append(factors_list_tmp)
full_tensor.append(full_tensor_tmp)
if n == n_iter-1:
factors = factors_list_tmp
full_tensor = full_tensor_tmp
# return final results
if return_errors:
return factors, full_tensor, error_list
else:
return factors, full_tensor
| def tensor_train_OI(data_tensor, rank, n_iter = 1, trajectory = False, return_errors = True, **context):
""" Perform tensor-train orthogonal iteration (TTOI) for tensor train decomposition
Reference paper: Zhou Y, Zhang AR, Zheng L, Wang Y. "Optimal high-order tensor svd via tensor-train orthogonal iteration."
Parameters
----------
data_tensor: tl.tensor
observed tensor data
rank : tuple
rank of the TT decomposition
must verify rank[0] == rank[-1] == 1 (boundary conditions)
and len(rank) == len(tl.shape(data_tensor))+1
n_iter : int
half the number of iterations
trajectory : bool, optional, default is False
if True, the output of each iteration of TTOI is returned: 2*n_iter outputs
otherwise, only the output of the last iteration is returned
return_errors : bool, optional, default is True
if True, the approximation/reconstruction error of each iteration of TTOI is returned: 2*n_iter outputs
Returns
-------
factors : list of n_iter tensors or one tensor
* n_iter tensors (if `trajectory` is True) : each list contains the output of each iteration, one full tensor and list of tensor factors
* one tensor (otherwise): output of the last iteration, one full tensor and list of tensor factors
full_tensor : list of n_iter tensors or one tensor
* n_iter tensors (if `trajectory` is True) : each list contains the output of each iteration, one full tensor and list of tensor factoros
* one tensor (otherwise): output of the last iteration, one full tensor and list of tensor factors
"""
context = tl.context(data_tensor)
shape = tl.shape(data_tensor)
n_dim = len(shape)
rank = validate_tt_rank(shape, rank)
# Make sure it's not a tuple but a list
rank = list(rank)
# Add two one-dimensional mode to data_tensor
data_tensor_extended = tl.reshape(data_tensor,(1, ) + shape + (1, ))
if trajectory:
factors = list()
full_tensor = list()
if return_errors:
error_list = list()
# perform TTOI for n_iter iterations
for iteration in range(n_iter):
# first perform forward update
# U_arr will be a list including estimated left singular spaces at the current iteration
U_arr = list()
# initialize R_tilde_arr (sequential unfolding of data_tensor multiplied by U_arr sequentially on the left, useful for backward update to obtain V_arr)
R_tilde_arr = list()
# estimate the first left singular spaces
# Here, R_tmp is the first sequential unfolding compressed on the right by previous updated V_arr (if exists)
R_tmp_l = data_tensor_extended
if n == 0:
R_tmp = R_tmp_l
else:
R_tmp = sequential_prod(R_tmp_l,V_arr,"right")
U_tmp = tl.partial_svd(tl.reshape(R_tmp,(shape[0],-1)),rank[1])[0]
U_arr.append(tl.reshape(U_tmp,(rank[0],shape[0],rank[1])))
# estimate the 2nd to (d-1)th left singular spaces
for k in range(n_dim-2):
# compress the (k+2)th sequential unfolding of data_tensor from the left
R_tmp_l = sequential_prod(R_tmp_l,[U_arr[k]],"left")
# R_tmp_l will be useful for backward update
R_tilde_arr.append(R_tmp_l)
# compress the (k+2)th sequential unfolding of data_tensor from the right (if n>0)
if n == 0:
R_tmp = R_tmp_l
else:
R_tmp = sequential_prod(R_tmp_l,V_arr[0:(n_dim-k-2)],"right")
U_tmp = tl.partial_svd(tl.reshape(R_tmp,(rank[k+1]*shape[k+1],-1)),rank[k+2])[0]
U_arr.append(tl.reshape(U_tmp,(rank[k+1],shape[k+1],rank[k+2])))
# forward update is done; output the final residual
R_tilde_arr.append(sequential_prod(R_tmp_l,[U_arr[n_dim-2]],"left"))
if trajectory or return_errors:
factors_list_tmp = list()
for k in range(n_dim-1):
factors_list_tmp.append(tl.tensor(U_arr[k],**context))
factors_list_tmp.append(tl.tensor(R_tilde_arr[n_dim-2],**context))
full_tensor_tmp = tl.tensor(tt_to_tensor(factors_list_tmp),**context)
if return_errors:
error_list.append(tl.norm(full_tensor_tmp-data_tensor,2))
if trajectory:
factors.append(factors_list_tmp)
full_tensor.append(full_tensor_tmp)
# perform backward update
# initialize V_arr: V_arr will be a list of estimated right singular spaces at the current or previous iteration
V_arr = list()
V_tmp = tl.transpose(tl.partial_svd(tl.reshape(R_tilde_arr[n_dim-2],(rank[n_dim-1],shape[n_dim-1])),rank[n_dim-1])[2])
V_arr.append(tl.reshape(V_tmp,(rank[n_dim],shape[n_dim-1],rank[n_dim-1])))
# estimate the 2nd to (d-1)th right singular spaces
for k in range(n_dim-2):
# compress R_tilde_arr from the right
R_tmp_r = sequential_prod(R_tilde_arr[n_dim-k-3],V_arr[0:(k+1)],"right")
V_tmp = tl.transpose(tl.partial_svd(tl.reshape(R_tmp_r,(rank[n_dim-k-2],shape[n_dim-k-2]*rank[n_dim-k-1])),rank[n_dim-k-2])[2])
V_arr.append(tl.reshape(V_tmp,(rank[n_dim-k-1],shape[n_dim-k-2],rank[n_dim-k-2])))
Residual_right = sequential_prod(data_tensor_extended,V_arr,"right")
if trajectory or return_errors or n==n_iter-1:
factors_list_tmp = list()
factors_list_tmp.append(tl.tensor(Residual_right,**context))
for k in range(n_dim-1):
factors_list_tmp.append(tl.tensor(tl.transpose(V_arr[n_dim-k-2]),**context))
full_tensor_tmp = tl.tensor(tt_to_tensor(factors_list_tmp),**context)
if return_errors:
error_list.append(tl.norm(full_tensor_tmp-data_tensor,2))
if trajectory:
factors.append(factors_list_tmp)
full_tensor.append(full_tensor_tmp)
if n == n_iter-1:
factors = factors_list_tmp
full_tensor = full_tensor_tmp
# return final results
if return_errors:
return factors, full_tensor, error_list
else:
return factors, full_tensor
|
29,910 | def main(argsl=None, # type: Optional[List[str]]
args=None, # type: Optional[argparse.Namespace]
job_order_object=None, # type: Optional[MutableMapping[Text, Any]]
stdin=sys.stdin, # type: IO[Any]
stdout=None, # type: Optional[Union[TextIO, StreamWriter]]
stderr=sys.stderr, # type: IO[Any]
versionfunc=versionstring, # type: Callable[[], Text]
logger_handler=None, # type: Optional[logging.Handler]
custom_schema_callback=None, # type: Optional[Callable[[], None]]
executor=None, # type: Optional[JobExecutor]
loadingContext=None, # type: Optional[LoadingContext]
runtimeContext=None, # type: Optional[RuntimeContext]
input_required=True # type: bool
): # type: (...) -> int
if not stdout: # force UTF-8 even if the console is configured differently
if (hasattr(sys.stdout, "encoding")
and sys.stdout.encoding != 'UTF-8'): # type: ignore
if PY3 and hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
else:
stdout = getwriter('utf-8')(sys.stdout) # type: ignore
else:
stdout = cast(TextIO, sys.stdout) # type: ignore
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler = None # type: Optional[logging.StreamHandler]
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = []
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
args = arg_parser().parse_args(addl+argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If on Windows platform, a default Docker Container is used if not
# explicitely provided by user
if onWindows() and not runtimeContext.default_container:
# This docker image is a minimal alpine image with bash installed
# (size 6 mb). source: https://github.com/frol/docker-alpine-bash
runtimeContext.default_container = windows_default_container_id
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in iteritems(get_default_args()):
if not hasattr(args, key):
setattr(args, key, val)
# Configure logging
rdflib_logger = logging.getLogger("rdflib.term")
rdflib_logger.addHandler(stderr_handler)
rdflib_logger.setLevel(logging.ERROR)
if args.quiet:
# Silence STDERR, not an eventual provenance log file
stderr_handler.setLevel(logging.WARN)
if runtimeContext.debug:
# Increase to debug for both stderr and provenance log file
_logger.setLevel(logging.DEBUG)
stderr_handler.setLevel(logging.DEBUG)
rdflib_logger.setLevel(logging.DEBUG)
fmtclass = coloredlogs.ColoredFormatter if args.enable_color else logging.Formatter
formatter = fmtclass("%(levelname)s %(message)s")
if args.timestamps:
formatter = fmtclass(
"[%(asctime)s] %(levelname)s %(message)s",
"%Y-%m-%d %H:%M:%S")
stderr_handler.setFormatter(formatter)
##
if args.version:
print(versionfunc())
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)))
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
setattr(args, "workflow", "CWLFile")
else:
_logger.error("CWL document required, no input file was provided")
arg_parser().print_help()
return 1
if args.relax_path_checks:
command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
res = pkg_resources.resource_stream(__name__, 'extensions.yml')
use_custom_schema("v1.0", "http://commonwl.org/cwltool", res.read())
res.close()
else:
use_standard_schema("v1.0")
if args.provenance:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
return 1
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess),
temp_prefix_ro=args.tmpdir_prefix, orcid=args.orcid,
full_name=args.cwl_full_name)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(cast(IO[str], log_file_io))
class ProvLogFormatter(logging.Formatter):
"""Enforce ISO8601 with both T and Z."""
def __init__(self): # type: () -> None
super(ProvLogFormatter, self).__init__(
"[%(asctime)sZ] %(message)s")
def formatTime(self, record, datefmt=None):
# type: (logging.LogRecord, Optional[str]) -> str
record_time = time.gmtime(record.created)
formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", record_time)
with_msecs = "%s,%03d" % (formatted_time, record.msecs)
return with_msecs
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug(u"[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], u" ".join(argsl))
_logger.debug(u"[cwltool] Arguments: %s", args)
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(loadingContext.fetcher_constructor)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = \
args.disable_js_validation or (not args.do_validate)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
uri, tool_file_uri = resolve_tool_uri(
args.workflow, resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor)
try_again_msg = "" if args.debug else ", try again with --debug for more information"
try:
job_order_object, input_basedir, jobloader = load_job_order(
args, stdin, loadingContext.fetcher_constructor,
loadingContext.overrides_list, tool_file_uri)
if args.overrides:
loadingContext.overrides_list.extend(load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri))
loadingContext, workflowobj, uri = fetch_document(
uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(workflowobj, loadingContext.loader, stdout,
args.relative_deps, uri)
return 0
loadingContext, uri \
= resolve_and_validate_document(loadingContext, workflowobj, uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(CommentedMap, processobj)
if args.pack:
stdout.write(print_pack(loadingContext.loader, processobj, uri, metadata))
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext.loader, processobj, uri, metadata))
if args.print_pre:
stdout.write(json_dumps(processobj, indent=4, sort_keys=True, separators=(',', ': ')))
return 0
tool = make_tool(uri, loadingContext)
if args.make_template:
def my_represent_none(self, data): # pylint: disable=unused-argument
# type: (Any, Any) -> Any
"""Force clean representation of 'null'."""
return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
yaml.RoundTripRepresenter.add_representer(type(None), my_represent_none)
yaml.round_trip_dump(
generate_input_template(tool), sys.stdout,
default_flow_style=False, indent=4, block_seq_indent=2)
return 0
if args.validate:
print("{} is valid CWL.".format(args.workflow))
return 0
if args.print_rdf:
stdout.write(printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer))
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
for f in ("outputs", "steps", "inputs"):
if tool.tool[f]:
_logger.info("%s%s targets:", f[0].upper(), f[1:-1])
stdout.write(" "+"\n ".join([shortname(t["id"]) for t in tool.tool[f]])+"\n")
return 0
if args.target:
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph([tool.tool["id"] + "/" + r for r in args.target], tool)
else:
extracted = get_subgraph([loadingContext.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target],
tool)
else:
_logger.error("Can only use --target on Workflows")
return 1
if isinstance(loadingContext.loader.idx, CommentedMap):
loadingContext.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"],
loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
stdout.write(json_dumps(tool.tool, indent=4, sort_keys=True, separators=(',', ': ')))
return 0
except (validate.ValidationException) as exc:
_logger.error(u"Tool definition failed validation:\n%s", Text(exc),
exc_info=args.debug)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(u"Tool definition failed initialization:\n%s", Text(exc),
exc_info=args.debug)
return 1
except Exception as exc:
_logger.error(
u"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
Text(exc) if not args.debug else "",
exc_info=args.debug)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if getattr(runtimeContext, dirprefix) and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX:
sl = "/" if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir" \
else ""
setattr(runtimeContext, dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception as e:
_logger.error("Failed to create directory: %s", Text(e))
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.secret_store = getdefault(runtimeContext.secret_store, SecretStore())
runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess)
try:
initialized_job_order_object = init_job_order(
job_order_object, args, tool, jobloader, stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required)
except SystemExit as err:
return err.code
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
del args.workflow
del args.job_order
conf_file = getattr(args, "beta_dependency_resolvers_configuration", None) # Text
use_conda_dependencies = getattr(args, "beta_conda_dependencies", None) # Text
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext,
logger=_logger)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(
out, None, True)
def remove_at_id(doc): # type: (MutableMapping[Text, Any]) -> None
for key in list(doc.keys()):
if key == '@id':
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(out, ("File",), functools.partial(
add_sizes, runtimeContext.make_fs_access('')))
def loc_to_path(obj): # type: (Dict[Text, Any]) -> None
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if obj["location"].startswith("file://"):
obj["path"] = uri_file_path(obj["location"])
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File", ), MutationManager().unset_generation)
if isinstance(out, string_types):
stdout.write(out)
else:
stdout.write(json_dumps(out, indent=4, ensure_ascii=False))
stdout.write("\n")
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning(u"Final process status is %s", status)
return 1
_logger.info(u"Final process status is %s", status)
return 0
except (validate.ValidationException) as exc:
_logger.error(u"Input object failed validation:\n%s", Text(exc),
exc_info=args.debug)
return 1
except UnsupportedRequirement as exc:
_logger.error(
u"Workflow or tool uses unsupported feature:\n%s", Text(exc),
exc_info=args.debug)
return 33
except WorkflowException as exc:
_logger.error(
u"Workflow error%s:\n%s", try_again_msg, strip_dup_lineno(Text(exc)),
exc_info=args.debug)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
u"Unhandled error%s:\n %s", try_again_msg, Text(exc), exc_info=args.debug)
return 1
finally:
if args and runtimeContext and runtimeContext.research_obj \
and workflowobj and loadingContext:
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(prov_deps(
workflowobj, loadingContext.loader, uri))
else:
_logger.warning("Unable to generate provenance snapshot "
" due to missing loadingContext.loader.")
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(u"[provenance] Closing provenance log file %s",
prov_log_handler)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
prov_log_handler.stream.close()
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
| def main(argsl=None, # type: Optional[List[str]]
args=None, # type: Optional[argparse.Namespace]
job_order_object=None, # type: Optional[MutableMapping[Text, Any]]
stdin=sys.stdin, # type: IO[Any]
stdout=None, # type: Optional[Union[TextIO, StreamWriter]]
stderr=sys.stderr, # type: IO[Any]
versionfunc=versionstring, # type: Callable[[], Text]
logger_handler=None, # type: Optional[logging.Handler]
custom_schema_callback=None, # type: Optional[Callable[[], None]]
executor=None, # type: Optional[JobExecutor]
loadingContext=None, # type: Optional[LoadingContext]
runtimeContext=None, # type: Optional[RuntimeContext]
input_required=True # type: bool
): # type: (...) -> int
if not stdout: # force UTF-8 even if the console is configured differently
if (hasattr(sys.stdout, "encoding")
and sys.stdout.encoding != 'UTF-8'): # type: ignore
if PY3 and hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
else:
stdout = getwriter('utf-8')(sys.stdout) # type: ignore
else:
stdout = cast(TextIO, sys.stdout) # type: ignore
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler = None # type: Optional[logging.StreamHandler]
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[Text]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
args = arg_parser().parse_args(addl+argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If on Windows platform, a default Docker Container is used if not
# explicitely provided by user
if onWindows() and not runtimeContext.default_container:
# This docker image is a minimal alpine image with bash installed
# (size 6 mb). source: https://github.com/frol/docker-alpine-bash
runtimeContext.default_container = windows_default_container_id
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in iteritems(get_default_args()):
if not hasattr(args, key):
setattr(args, key, val)
# Configure logging
rdflib_logger = logging.getLogger("rdflib.term")
rdflib_logger.addHandler(stderr_handler)
rdflib_logger.setLevel(logging.ERROR)
if args.quiet:
# Silence STDERR, not an eventual provenance log file
stderr_handler.setLevel(logging.WARN)
if runtimeContext.debug:
# Increase to debug for both stderr and provenance log file
_logger.setLevel(logging.DEBUG)
stderr_handler.setLevel(logging.DEBUG)
rdflib_logger.setLevel(logging.DEBUG)
fmtclass = coloredlogs.ColoredFormatter if args.enable_color else logging.Formatter
formatter = fmtclass("%(levelname)s %(message)s")
if args.timestamps:
formatter = fmtclass(
"[%(asctime)s] %(levelname)s %(message)s",
"%Y-%m-%d %H:%M:%S")
stderr_handler.setFormatter(formatter)
##
if args.version:
print(versionfunc())
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)))
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
setattr(args, "workflow", "CWLFile")
else:
_logger.error("CWL document required, no input file was provided")
arg_parser().print_help()
return 1
if args.relax_path_checks:
command_line_tool.ACCEPTLIST_RE = command_line_tool.ACCEPTLIST_EN_RELAXED_RE
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
res = pkg_resources.resource_stream(__name__, 'extensions.yml')
use_custom_schema("v1.0", "http://commonwl.org/cwltool", res.read())
res.close()
else:
use_standard_schema("v1.0")
if args.provenance:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
return 1
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess),
temp_prefix_ro=args.tmpdir_prefix, orcid=args.orcid,
full_name=args.cwl_full_name)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(cast(IO[str], log_file_io))
class ProvLogFormatter(logging.Formatter):
"""Enforce ISO8601 with both T and Z."""
def __init__(self): # type: () -> None
super(ProvLogFormatter, self).__init__(
"[%(asctime)sZ] %(message)s")
def formatTime(self, record, datefmt=None):
# type: (logging.LogRecord, Optional[str]) -> str
record_time = time.gmtime(record.created)
formatted_time = time.strftime("%Y-%m-%dT%H:%M:%S", record_time)
with_msecs = "%s,%03d" % (formatted_time, record.msecs)
return with_msecs
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug(u"[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], u" ".join(argsl))
_logger.debug(u"[cwltool] Arguments: %s", args)
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(loadingContext.fetcher_constructor)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = \
args.disable_js_validation or (not args.do_validate)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
uri, tool_file_uri = resolve_tool_uri(
args.workflow, resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor)
try_again_msg = "" if args.debug else ", try again with --debug for more information"
try:
job_order_object, input_basedir, jobloader = load_job_order(
args, stdin, loadingContext.fetcher_constructor,
loadingContext.overrides_list, tool_file_uri)
if args.overrides:
loadingContext.overrides_list.extend(load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri))
loadingContext, workflowobj, uri = fetch_document(
uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(workflowobj, loadingContext.loader, stdout,
args.relative_deps, uri)
return 0
loadingContext, uri \
= resolve_and_validate_document(loadingContext, workflowobj, uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(CommentedMap, processobj)
if args.pack:
stdout.write(print_pack(loadingContext.loader, processobj, uri, metadata))
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext.loader, processobj, uri, metadata))
if args.print_pre:
stdout.write(json_dumps(processobj, indent=4, sort_keys=True, separators=(',', ': ')))
return 0
tool = make_tool(uri, loadingContext)
if args.make_template:
def my_represent_none(self, data): # pylint: disable=unused-argument
# type: (Any, Any) -> Any
"""Force clean representation of 'null'."""
return self.represent_scalar(u'tag:yaml.org,2002:null', u'null')
yaml.RoundTripRepresenter.add_representer(type(None), my_represent_none)
yaml.round_trip_dump(
generate_input_template(tool), sys.stdout,
default_flow_style=False, indent=4, block_seq_indent=2)
return 0
if args.validate:
print("{} is valid CWL.".format(args.workflow))
return 0
if args.print_rdf:
stdout.write(printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer))
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
for f in ("outputs", "steps", "inputs"):
if tool.tool[f]:
_logger.info("%s%s targets:", f[0].upper(), f[1:-1])
stdout.write(" "+"\n ".join([shortname(t["id"]) for t in tool.tool[f]])+"\n")
return 0
if args.target:
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph([tool.tool["id"] + "/" + r for r in args.target], tool)
else:
extracted = get_subgraph([loadingContext.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target],
tool)
else:
_logger.error("Can only use --target on Workflows")
return 1
if isinstance(loadingContext.loader.idx, CommentedMap):
loadingContext.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"],
loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
stdout.write(json_dumps(tool.tool, indent=4, sort_keys=True, separators=(',', ': ')))
return 0
except (validate.ValidationException) as exc:
_logger.error(u"Tool definition failed validation:\n%s", Text(exc),
exc_info=args.debug)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(u"Tool definition failed initialization:\n%s", Text(exc),
exc_info=args.debug)
return 1
except Exception as exc:
_logger.error(
u"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
Text(exc) if not args.debug else "",
exc_info=args.debug)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if getattr(runtimeContext, dirprefix) and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX:
sl = "/" if getattr(runtimeContext, dirprefix).endswith("/") or dirprefix == "cachedir" \
else ""
setattr(runtimeContext, dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception as e:
_logger.error("Failed to create directory: %s", Text(e))
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.secret_store = getdefault(runtimeContext.secret_store, SecretStore())
runtimeContext.make_fs_access = getdefault(runtimeContext.make_fs_access, StdFsAccess)
try:
initialized_job_order_object = init_job_order(
job_order_object, args, tool, jobloader, stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required)
except SystemExit as err:
return err.code
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
del args.workflow
del args.job_order
conf_file = getattr(args, "beta_dependency_resolvers_configuration", None) # Text
use_conda_dependencies = getattr(args, "beta_conda_dependencies", None) # Text
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext,
logger=_logger)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(
out, None, True)
def remove_at_id(doc): # type: (MutableMapping[Text, Any]) -> None
for key in list(doc.keys()):
if key == '@id':
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(out, ("File",), functools.partial(
add_sizes, runtimeContext.make_fs_access('')))
def loc_to_path(obj): # type: (Dict[Text, Any]) -> None
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if obj["location"].startswith("file://"):
obj["path"] = uri_file_path(obj["location"])
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File", ), MutationManager().unset_generation)
if isinstance(out, string_types):
stdout.write(out)
else:
stdout.write(json_dumps(out, indent=4, ensure_ascii=False))
stdout.write("\n")
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning(u"Final process status is %s", status)
return 1
_logger.info(u"Final process status is %s", status)
return 0
except (validate.ValidationException) as exc:
_logger.error(u"Input object failed validation:\n%s", Text(exc),
exc_info=args.debug)
return 1
except UnsupportedRequirement as exc:
_logger.error(
u"Workflow or tool uses unsupported feature:\n%s", Text(exc),
exc_info=args.debug)
return 33
except WorkflowException as exc:
_logger.error(
u"Workflow error%s:\n%s", try_again_msg, strip_dup_lineno(Text(exc)),
exc_info=args.debug)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
u"Unhandled error%s:\n %s", try_again_msg, Text(exc), exc_info=args.debug)
return 1
finally:
if args and runtimeContext and runtimeContext.research_obj \
and workflowobj and loadingContext:
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(prov_deps(
workflowobj, loadingContext.loader, uri))
else:
_logger.warning("Unable to generate provenance snapshot "
" due to missing loadingContext.loader.")
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(u"[provenance] Closing provenance log file %s",
prov_log_handler)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
prov_log_handler.stream.close()
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
|
29,509 | def fix_stream_name(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
streams = Stream.objects.all()
for stream in streams:
fixed_stream_name = "".join(
character for character in stream.name if character_is_printable(character)
)
if fixed_stream_name == stream.name:
continue
if fixed_stream_name == "":
stream.name = "Unknown stream {}".format(stream.id)
stream.save()
continue
similar_stream_name_count = Stream.objects.filter(name=fixed_stream_name).count()
if similar_stream_name_count > 1:
stream.name = fixed_stream_name + "(#{})".format(stream.id)
stream.save()
| def fix_stream_name(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
Stream = apps.get_model("zerver", "Stream")
streams = Stream.objects.all()
for stream in streams:
fixed_stream_name = "".join(
character for character in stream.name if character_is_printable(character)
)
if fixed_stream_name == stream.name:
continue
if fixed_stream_name == "":
stream.name = "Unknown stream {}".format(stream.id)
stream.save()
continue
similar_stream_name_count = Stream.objects.filter(name=fixed_stream_name).count()
if similar_stream_name_count > 1:
stream.name = fixed_stream_name + " (#{})".format(stream.id)
stream.save()
|
11,733 | def get_suitable_output_file_name_for_current_output_format(output_file, output_format):
""" renames the name given for the output_file if the results for current_output format are returned compressed by default
and the name selected by the user does not contain the correct extension.
output_file : str, optional, default None
file name selected by the user
output_format : str, optional, default 'votable'
results format. Available formats in TAP are: 'votable', 'votable_plain',
'fits', 'csv', 'ecsv' and 'json'. Default is 'votable'.
Returned results for formats 'votable' 'ecsv' and 'fits' are compressed
gzip files.
Returns
-------
A string with the new name for the file.
"""
compressed_extension = ".gz"
format_with_results_compressed = ['votable', 'fits', 'ecsv']
output_file_with_extension = output_file
if output_file is not None:
if output_format in format_with_results_compressed:
# In this case we will have to take also into account the .fits format
if not output_file.endswith(compressed_extension):
warnings.warn('WARNING!!! By default, results in "votable" and "fits" format are returned in '
f'compressed format therefore your file {output_file} '
f'will be renamed to {output_file}.gz')
if output_format == 'votable':
if output_file.endswith('.vot'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.vot.gz'
elif output_format == 'fits':
if output_file.endswith('.fits'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.fits.gz'
elif output_format == 'ecsv':
if output_file.endswith('.ecsv'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.ecsv.gz'
# the output type is not compressed by default by the TAP SERVER but the users gives a .gz extension
elif output_file.endswith(compressed_extension):
output_file_renamed = output_file.removesuffix('.gz')
warnings.warn(f'WARNING!!! The output format selected is not compatible with compression. {output_file}'
f' will be renamed to {output_file}')
return output_file_with_extension
| def get_suitable_output_file_name_for_current_output_format(output_file, output_format):
""" renames the name given for the output_file if the results for current_output format are returned compressed by default
and the name selected by the user does not contain the correct extension.
output_file : str, optional, default None
file name selected by the user
output_format : str, optional, default 'votable'
results format. Available formats in TAP are: 'votable', 'votable_plain',
'fits', 'csv', 'ecsv' and 'json'. Default is 'votable'.
Returned results for formats 'votable' 'ecsv' and 'fits' are compressed
gzip files.
Returns
-------
A string with the new name for the file.
"""
compressed_extension = ".gz"
format_with_results_compressed = ['votable', 'fits', 'ecsv']
output_file_with_extension = output_file
if output_file is not None:
if output_format in format_with_results_compressed:
# In this case we will have to take also into account the .fits format
if not output_file.endswith(compressed_extension):
warnings.warn('WARNING!!! By default, results in "votable" and "fits" format are returned in '
f'compressed format therefore your file {output_file} '
f'will be renamed to {output_file}.gz')
if output_format == 'votable':
if output_file.endswith('.vot'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.vot.gz'
elif output_format == 'fits':
if output_file.endswith('.fits'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.fits.gz'
elif output_format == 'ecsv':
if output_file.endswith('.ecsv'):
output_file_with_extension = output_file + '.gz'
else:
output_file_with_extension = output_file + '.ecsv.gz'
# the output type is not compressed by default by the TAP SERVER but the users gives a .gz extension
elif output_file.endswith(compressed_extension):
output_file_renamed = output_file.removesuffix('.gz')
warnings.warn(f'WARNING!!! The output format selected is not compatible with compression. {output_file}'
f' will be renamed to {output_file_renamed}')
return output_file_with_extension
|
57,715 | def get_reports_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, Any]:
report_id_list = argToList(args.get('report_ids', []))
extended = args.get('extended_report', "False")
screenshot = args.get('get_screenshot', "false")
artifact = args.get('get_artifact', "")
if len(report_id_list) == 0:
raise ValueError('report_id(s) not specified')
report_list: List[Dict[str, Any]] = []
for report_id in report_id_list:
report = client.report_status(report_id=report_id, extended=extended)
if screenshot.lower() == "true":
screenshot = client.report_artifact(report_id=report_id, artifact_type="screenshot")
stored_img = fileResult('screenshot.gif', screenshot)
demisto.results({'Type': entryTypes['image'], 'ContentsFormat': formats['text'],
'File': stored_img['File'], 'FileID': stored_img['FileID'], 'Contents': ''})
if artifact != "":
artifacts = client.report_artifact(report_id=report_id, artifact_type=artifact)
stored_artifacts = fileResult('artifacts.zip', artifacts)
demisto.results({'Type': entryTypes['file'], 'ContentsFormat': formats['text'],
'File': stored_artifacts['File'], 'FileID': stored_artifacts['FileID'], 'Contents': ''})
report_list.append(report)
readable_output = tableToMarkdown('Scan status', report_list)
outputs = {
'FireEyeDoD.Scan(val.report_id == obj.report_id)': report_list
}
return (
readable_output,
outputs,
report_list
)
| def get_reports_command(client: Client, args: Dict[str, Any]) -> Tuple[str, dict, Any]:
report_id_list = argToList(args.get('report_ids', []))
extended = args.get('extended_report', "False")
screenshot = args.get('get_screenshot', "false")
artifact = args.get('get_artifact', "")
if len(report_id_list) == 0:
raise ValueError('report_id(s) not specified')
report_list: List[Dict[str, Any]] = []
for report_id in report_id_list:
report = client.report_status(report_id=report_id, extended=extended)
if screenshot.lower() == "true":
screenshot = client.report_artifact(report_id=report_id, artifact_type="screenshot")
stored_artifacts = fileResult('artifacts.zip', artifacts, entryTypes['image'])
demisto.results(stored_artifacts)
demisto.results({'Type': entryTypes['image'], 'ContentsFormat': formats['text'],
'File': stored_img['File'], 'FileID': stored_img['FileID'], 'Contents': ''})
if artifact != "":
artifacts = client.report_artifact(report_id=report_id, artifact_type=artifact)
stored_artifacts = fileResult('artifacts.zip', artifacts)
demisto.results({'Type': entryTypes['file'], 'ContentsFormat': formats['text'],
'File': stored_artifacts['File'], 'FileID': stored_artifacts['FileID'], 'Contents': ''})
report_list.append(report)
readable_output = tableToMarkdown('Scan status', report_list)
outputs = {
'FireEyeDoD.Scan(val.report_id == obj.report_id)': report_list
}
return (
readable_output,
outputs,
report_list
)
|
31,482 | def reply_email_command(client: MsGraphClient, args):
"""
Reply to an email from user's mailbox, the sent message will appear in Sent Items folder
"""
email_to = argToList(args.get('to'))
email_from = args.get('from', client._mailbox_to_fetch)
message_id = args.get('inReplyTo')
email_body = args.get('body', "")
email_subject = 'Re: ' + args.get('subject', "")
attach_ids = argToList(args.get('attachIDs'))
email_cc = argToList(args.get('cc'))
email_bcc = argToList(args.get('bcc'))
html_body = args.get('htmlBody')
attach_names = argToList(args.get('attachNames'))
attach_cids = argToList(args.get('attachCIDs'))
message_body = html_body if html_body else email_body
suffix_endpoint = f'/users/{email_from}/messages/{message_id}/reply'
reply = client.build_message_to_reply(email_to, email_cc, email_bcc, email_subject, message_body, attach_ids,
attach_names, attach_cids)
client.ms_client.http_request('POST', suffix_endpoint, json_data={'message': reply}, resp_type="text")
return prepare_outputs_for_reply_mail_command(reply, email_to, message_id)
| def reply_email_command(client: MsGraphClient, args):
"""
Reply to an email from user's mailbox, the sent message will appear in Sent Items folder
"""
email_to = argToList(args.get('to'))
email_from = args.get('from', client._mailbox_to_fetch)
message_id = args.get('inReplyTo')
email_body = args.get('body', "")
email_subject = 'Re: ' + args.get('subject', "")
attach_ids = argToList(args.get('attachIDs'))
email_cc = argToList(args.get('cc'))
email_bcc = argToList(args.get('bcc'))
html_body = args.get('htmlBody')
attach_names = argToList(args.get('attachNames'))
attach_cids = argToList(args.get('attachCIDs'))
message_body = html_body or email_body
suffix_endpoint = f'/users/{email_from}/messages/{message_id}/reply'
reply = client.build_message_to_reply(email_to, email_cc, email_bcc, email_subject, message_body, attach_ids,
attach_names, attach_cids)
client.ms_client.http_request('POST', suffix_endpoint, json_data={'message': reply}, resp_type="text")
return prepare_outputs_for_reply_mail_command(reply, email_to, message_id)
|
33,637 | def start_reaper():
"""Start the reaper process.
This is a lightweight process that simply
waits for its parent process to die and then terminates its own
process group. This allows us to ensure that ray processes are always
terminated properly so long as that process itself isn't SIGKILLed.
Returns:
ProcessInfo for the process that was started.
"""
# Make ourselves a process group leader so that the reaper can clean
# up other ray processes without killing the process group of the
# process that started us.
try:
os.setpgrp()
except OSError as e:
if e.errno == errno.EPERM and os.getpgrp() == os.getpid():
# Nothing to do; we're already a session leader
pass
else:
logger.warning("setpgrp failed, processes may not be "
"cleaned up properly: {}.".format(e))
# Don't start the reaper in this case as it could result in killing
# other user processes.
return None
reaper_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "ray_process_reaper.py")
command = [sys.executable, "-u", reaper_filepath]
process_info = start_ray_process(
command, ray_constants.PROCESS_TYPE_REAPER, pipe_stdin=True)
return process_info
| def start_reaper():
"""Start the reaper process.
This is a lightweight process that simply
waits for its parent process to die and then terminates its own
process group. This allows us to ensure that ray processes are always
terminated properly so long as that process itself isn't SIGKILLed.
Returns:
ProcessInfo for the process that was started.
"""
# Make ourselves a process group leader so that the reaper can clean
# up other ray processes without killing the process group of the
# process that started us.
try:
os.setpgrp()
except OSError as e:
if e.errno == errno.EPERM and os.getpgrp() == os.getpid():
# Nothing to do; we're already a session leader.
pass
else:
logger.warning("setpgrp failed, processes may not be "
"cleaned up properly: {}.".format(e))
# Don't start the reaper in this case as it could result in killing
# other user processes.
return None
reaper_filepath = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "ray_process_reaper.py")
command = [sys.executable, "-u", reaper_filepath]
process_info = start_ray_process(
command, ray_constants.PROCESS_TYPE_REAPER, pipe_stdin=True)
return process_info
|
31,918 | def list_attached_group_policies(args, aws_client):
client = aws_client.aws_session(
service=SERVICE,
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
group_name = args.get('groupName', "")
marker = args.get('marker', None)
limit, is_manual, page_size = get_limit(args)
kwargs = {
'GroupName': group_name,
'MaxItems': limit
}
if marker:
kwargs.update({'Marker': marker})
response = client.list_attached_group_policies(**kwargs)
data = response.get('AttachedPolicies', [])
marker = response.get('Marker', None)
if is_manual and page_size and len(data) > page_size:
data = data[-1 * args.get('page_size'):]
policy_data = []
for policy in data:
policy_data.append({
'GroupName': group_name,
'PolicyArn': policy.get('PolicyArn', ''),
'PolicyName': policy.get('PolicyName', '')
})
ec = {'AWS.IAM.AttachedGroupPolicies(val.PolicyArn && val.GroupName && val.PolicyArn === obj.PolicyArn && '
'val.GroupName === obj.GroupName)': policy_data,
'AWS.IAM.Groups(val.GroupName === \'{}\').AttachedPoliciesMarker'.format(group_name): marker}
human_readable = tableToMarkdown('AWS IAM Attached Policies for group {}'.format(group_name),
headers=['PolicyName', 'PolicyArn'],
headerTransform=pascalToSpace,
t=data)
return_outputs(human_readable, ec)
| def list_attached_group_policies(args, aws_client):
client = aws_client.aws_session(
service=SERVICE,
role_arn=args.get('roleArn'),
role_session_name=args.get('roleSessionName'),
role_session_duration=args.get('roleSessionDuration'),
)
group_name = args.get('groupName', "")
marker = args.get('marker', None)
limit, is_manual, page_size = get_limit(args)
kwargs = {
'GroupName': group_name,
'MaxItems': limit
}
if marker:
kwargs.update({'Marker': marker})
response = client.list_attached_group_policies(**kwargs)
data = response.get('AttachedPolicies', [])
marker = response.get('Marker')
if is_manual and page_size and len(data) > page_size:
data = data[-1 * args.get('page_size'):]
policy_data = []
for policy in data:
policy_data.append({
'GroupName': group_name,
'PolicyArn': policy.get('PolicyArn', ''),
'PolicyName': policy.get('PolicyName', '')
})
ec = {'AWS.IAM.AttachedGroupPolicies(val.PolicyArn && val.GroupName && val.PolicyArn === obj.PolicyArn && '
'val.GroupName === obj.GroupName)': policy_data,
'AWS.IAM.Groups(val.GroupName === \'{}\').AttachedPoliciesMarker'.format(group_name): marker}
human_readable = tableToMarkdown('AWS IAM Attached Policies for group {}'.format(group_name),
headers=['PolicyName', 'PolicyArn'],
headerTransform=pascalToSpace,
t=data)
return_outputs(human_readable, ec)
|
31,037 | def parse_data_pattern_rule(report_json, verdict_field, results_field):
"""Parse data pattern matches for a given rule"""
if report_json.get(verdict_field) != "MATCHED":
return []
data_patterns = []
for dp in report_json.get("scanContentRawReport", {}).get(results_field, []):
if (dp.get("state") == "EVALUATED") and (dp.get("unique_detection_frequency", 0) >= 1):
data_patterns.append({
'DataPatternName': dp["name"],
'LowConfidenceFrequency': dp["low_confidence_frequency"],
'HighConfidenceFrequency': dp["high_confidence_frequency"],
'MediumConfidenceFrequency': dp["medium_confidence_frequency"],
'Detections': dp.get("detections")
})
return data_patterns
| def parse_data_pattern_rule(report_json, verdict_field, results_field):
"""Parse data pattern matches for a given rule"""
if report_json.get(verdict_field) != "MATCHED":
return []
data_patterns = []
for dp in report_json.get("scanContentRawReport", {}).get(results_field, []):
if (dp.get("state") == "EVALUATED") and (dp.get("unique_detection_frequency", 0) >= 1):
data_patterns.append({
'DataPatternName': dp["name"],
'LowConfidenceFrequency': dp["low_confidence_frequency"],
'HighConfidenceFrequency': dp.get('high_confidence_frequency'),
'MediumConfidenceFrequency': dp["medium_confidence_frequency"],
'Detections': dp.get("detections")
})
return data_patterns
|
23,883 | def test_api_molecule_drivers_as_attributes():
results = api.drivers()
assert hasattr(results, 'delegated')
assert isinstance(results.delegated, api.Driver)
| def test_api_drivers_as_attributes():
results = api.drivers()
assert hasattr(results, 'delegated')
assert isinstance(results.delegated, api.Driver)
|
30,081 | def geohash_to_polygon(geo):
"""
:param geo: String that represents the geohash.
:return: Returns a Shapely's Polygon instance that represents the geohash.
"""
import geohash
lat_centroid, lng_centroid, lat_offset, lng_offset = geohash.decode_exactly(geo)
corner_1 = (lat_centroid - lat_offset, lng_centroid - lng_offset)[::-1]
corner_2 = (lat_centroid - lat_offset, lng_centroid + lng_offset)[::-1]
corner_3 = (lat_centroid + lat_offset, lng_centroid + lng_offset)[::-1]
corner_4 = (lat_centroid + lat_offset, lng_centroid - lng_offset)[::-1]
return sgeom.Polygon([corner_1, corner_2, corner_3, corner_4, corner_1])
| def geohash_to_polygon(geo):
"""
:param geo: String that represents the geohash.
:return: Returns a Shapely Polygon instance that represents the geohash.
"""
import geohash
lat_centroid, lng_centroid, lat_offset, lng_offset = geohash.decode_exactly(geo)
corner_1 = (lat_centroid - lat_offset, lng_centroid - lng_offset)[::-1]
corner_2 = (lat_centroid - lat_offset, lng_centroid + lng_offset)[::-1]
corner_3 = (lat_centroid + lat_offset, lng_centroid + lng_offset)[::-1]
corner_4 = (lat_centroid + lat_offset, lng_centroid - lng_offset)[::-1]
return sgeom.Polygon([corner_1, corner_2, corner_3, corner_4, corner_1])
|
59,848 | def _parse_byte_size(s):
"""
Convert a string size specification to integer byte size.
This function should be insensitive to case and whitespace.
Examples
--------
>>> parse_byte_size("11B")
11
>>> parse_byte_size("10 kB")
10000
>>> parse_byte_size("67.4GB")
67400000000
"""
try:
s = s.upper()
except AttributeError:
# input is not a string (likely a np.nan)
return pd.NA
val = float(re.search(num_exp, s).group())
unit = re.search(byte_unit_exp, s).group()
exponent = {"B": 0, "KB": 3, "MB": 6, "GB": 9}[unit]
return int(val * 10 ** exponent)
| def _parse_byte_size(s):
"""
Convert a string size specification to integer byte size.
This function should be insensitive to case and whitespace.
Examples
--------
>>> parse_byte_size("11B")
11
>>> parse_byte_size("10 kB")
10000
>>> parse_byte_size("67.4GB")
67400000000
"""
try:
s = s.upper()
except AttributeError:
# input is not a string (likely a np.nan)
return pd.NA
val = float(re.search(num_exp, s).group())
unit = re.search(byte_unit_exp, s).group()
exponent = {"B": 0, "KB": 10, "MB": 20, "GB": 30}[unit]
return int(val * 2 ** exponent)
|
22,082 | def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in a NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
| def _ntlm_authenticate_info(request):
"""
Extract host information in an NTLM_AUTH message
"""
if (len(request) < 52):
LOGGER.warning("NTLM message is too short (%d) but should be at least "
"52 char long", len(request))
return None
value = []
offset, ln = struct.unpack('IH', request[32:36] + request[28:30])
if ln > 0:
value.append("domain:" + \
encode_b64(_extract_substr(request, offset, ln)).decode())
has_version = False
# Flags are not present in an NTLM_AUTH message when the data block starts
# before index 64
if offset >= 64 and len(request) > 64:
flags, = struct.unpack('I', request[60:64])
has_version = flags & flag_version
off, ln = struct.unpack('IH', request[40:44] + request[36:38])
if ln > 0:
value.append("user-name:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
off, ln = struct.unpack('IH', request[48:52] + request[44:46])
if ln > 0:
value.append("workstation:" + \
encode_b64(_extract_substr(request, off, ln)).decode())
# Get OS Version if the `Negotiate Version` is set
# (NTLM_AUTH messages with a data block starting before index 72 do not
# contain information on the version)
if offset >= 72 and len(request) > 72 and has_version:
maj, minor, bld, ntlm_ver = struct.unpack('BBHB', request[64:65] +
request[65:66] +
request[66:68] +
request[71:72])
version = "{}.{}.{}".format(maj, minor, bld).encode()
value.append("ntlm-os:{}".format(encode_b64(version).decode()))
value.append("ntlm-version:{}".format(ntlm_ver))
return 'NTLM ' + ','.join(value)
|
3,931 | def is_path(G, path):
"""Returns whether or not the specified path exists.
For it to return True, every node on the path must exist and
each consecutive pair must be connected via one or more edges.
Parameters
----------
G : graph
A NetworkX graph.
path : list
A list of node which defines the path to traverse
Returns
-------
bool
True if `path` is a valid path in `G`
"""
for node, nbr in nx.utils.pairwise(path):
if (node not in G) or (nbr not in G[node]):
return False
return True
| def is_path(G, path):
"""Returns whether or not the specified path exists.
For it to return True, every node on the path must exist and
each consecutive pair must be connected via one or more edges.
Parameters
----------
G : graph
A NetworkX graph.
path : list
A list of nodes which defines the path to traverse
Returns
-------
bool
True if `path` is a valid path in `G`
"""
for node, nbr in nx.utils.pairwise(path):
if (node not in G) or (nbr not in G[node]):
return False
return True
|
173 | def publish(topic, msg, force=False):
"""
Send a message via AMQP or fedmsg.
This use used to send a message to fedmsg if "fedmsg_enabled" is true in the
application configuration, otherwise it will use AMQP.
Args:
topic (str): The topic suffix. The "bodhi" prefix is applied, along with
the "topic_prefix" and "environment" settings from fedmsg.
msg (dict): The message body to send.
force (bool): If False (the default), the message is only sent after the
currently active database transaction successfully commits. If true,
the messages is sent immediately.
"""
if bodhi.server.config.config.get('fedmsg_enabled'):
_fedmsg_publish(topic, msg, force)
return
# Dirty, nasty hack that I feel shame for: fedmsg modifies messages quietly
# if they have objects with __json__ methods on them. For now, copy that
# behavior. In the future, callers should pass fedora_messaging.api.Message
# sub-classes or this whole API should go away.
body = fedmsg.encoding.loads(fedmsg.encoding.dumps(msg))
# Dirty, nasty hack #2: fedmsg mangles the topic in various ways. Duplicate
# that behavior here for now. In the future callers should pass proper topics.
fedmsg_conf = fedmsg.config.load_config()
topic = "{prefix}.{env}.bodhi.{t}".format(
prefix=fedmsg_conf["topic_prefix"], env=fedmsg_conf["environment"], t=topic)
message = api.Message(topic=topic, body=body)
if force:
api.publish(message)
return
session = Session()
if 'messages' not in session.info:
session.info['messages'] = []
session.info['messages'].append(message)
_log.debug('Queuing message %r for delivery on session commit', message.id)
| def publish(topic, msg, force=False):
"""
Send a message via AMQP or fedmsg.
This is used to send a message to fedmsg if "fedmsg_enabled" is true in the
application configuration, otherwise it will use AMQP.
Args:
topic (str): The topic suffix. The "bodhi" prefix is applied, along with
the "topic_prefix" and "environment" settings from fedmsg.
msg (dict): The message body to send.
force (bool): If False (the default), the message is only sent after the
currently active database transaction successfully commits. If true,
the messages is sent immediately.
"""
if bodhi.server.config.config.get('fedmsg_enabled'):
_fedmsg_publish(topic, msg, force)
return
# Dirty, nasty hack that I feel shame for: fedmsg modifies messages quietly
# if they have objects with __json__ methods on them. For now, copy that
# behavior. In the future, callers should pass fedora_messaging.api.Message
# sub-classes or this whole API should go away.
body = fedmsg.encoding.loads(fedmsg.encoding.dumps(msg))
# Dirty, nasty hack #2: fedmsg mangles the topic in various ways. Duplicate
# that behavior here for now. In the future callers should pass proper topics.
fedmsg_conf = fedmsg.config.load_config()
topic = "{prefix}.{env}.bodhi.{t}".format(
prefix=fedmsg_conf["topic_prefix"], env=fedmsg_conf["environment"], t=topic)
message = api.Message(topic=topic, body=body)
if force:
api.publish(message)
return
session = Session()
if 'messages' not in session.info:
session.info['messages'] = []
session.info['messages'].append(message)
_log.debug('Queuing message %r for delivery on session commit', message.id)
|
55,818 | def prepare_training_ncf(df_train, df_test):
df_train.sort_values(["userID"], axis=0, ascending=[True], inplace=True)
df_test.sort_values(["userID"], axis=0, ascending=[True], inplace=True)
train = "./df_train.csv"
test = "./df_test.csv"
df_train.to_csv(train, index=False)
df_test.to_csv(test, index=False)
return NCFDataset(
train_file=train,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
seed=SEED,
)
| def prepare_training_ncf(df_train, df_test):
df_train.sort_values(["userID"], axis=0, ascending=[True], inplace=True)
df_test.sort_values(["userID"], axis=0, ascending=[True], inplace=True)
tmp_dir = TemporaryDirectory()
train = os.path.join(tmp_dir.name, "df_train.csv")
test = os.path.join(tmp_dir.name, "df_test.csv")
df_train.to_csv(train, index=False)
df_test.to_csv(test, index=False)
return NCFDataset(
train_file=train,
col_user=DEFAULT_USER_COL,
col_item=DEFAULT_ITEM_COL,
col_rating=DEFAULT_RATING_COL,
seed=SEED,
)
|
41,710 | def calculate_exports(line: list[str], export_all: bool) -> Iterable[str]:
"""
Collect up all the object files and archive files being linked and list out
symbols in them that are marked as public. If ``export_all`` is ``True``,
then return all public symbols. If not, return only the public symbols that
begin with `PyInit`.
"""
objects = [arg for arg in line if arg.endswith(".a") or arg.endswith(".o")]
exports = None
# Using emnm is simpler but it cannot handle bitcode. If we're only
# exporting the PyInit symbols, save effort by using nm.
if export_all:
exports = calculate_object_exports_readobj(objects)
if exports is None:
# Either export_all is false or we are linking at least one bitcode
# object. Fall back to a more conservative estimate of the symbols
# exported. This can export things with `__visibility__("hidden")`
exports = calculate_object_exports_nm(objects)
if export_all:
return exports
return (x for x in exports if x.startswith("PyInit"))
| def calculate_exports(line: list[str], export_all: bool) -> Iterable[str]:
"""
Collect up all the object files and archive files being linked and list out
symbols in them that are marked as public. If ``export_all`` is ``True``,
then return all public symbols. If not, return only the public symbols that
begin with `PyInit`.
"""
objects = [arg for arg in line if arg.endswith((".a", ".o"))]
exports = None
# Using emnm is simpler but it cannot handle bitcode. If we're only
# exporting the PyInit symbols, save effort by using nm.
if export_all:
exports = calculate_object_exports_readobj(objects)
if exports is None:
# Either export_all is false or we are linking at least one bitcode
# object. Fall back to a more conservative estimate of the symbols
# exported. This can export things with `__visibility__("hidden")`
exports = calculate_object_exports_nm(objects)
if export_all:
return exports
return (x for x in exports if x.startswith("PyInit"))
|
42,676 | def test_events_filter_params(rotkehlchen_api_server, ethereum_accounts):
"""Tests filtering by transaction's events' properties
Test cases:
- Filtering by asset
- Filtering by protocol (counterparty)
- Filtering by both asset and a protocol
- Transaction has multiple related events
- Transaction has no related events
- Multiple transactions are queried
"""
logging.getLogger('rotkehlchen.externalapis.etherscan').disabled = True
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
db = rotki.data.db
tx1 = create_tx(tx_hash=b'1')
tx2 = create_tx(tx_hash=b'2')
tx3 = create_tx(tx_hash=b'3')
event1 = create_tx_event(tx_hash=b'1', index=1, asset=A_ETH)
event2 = create_tx_event(tx_hash=b'1', index=2, asset=A_ETH, counterparty='EXAMPLE_PROTOCOL')
event3 = create_tx_event(tx_hash=b'1', index=3, asset=A_BTC, counterparty='EXAMPLE_PROTOCOL')
event4 = create_tx_event(tx_hash=b'2', index=4, asset=A_BTC)
dbethtx = DBEthTx(db)
dbethtx.add_ethereum_transactions([tx1, tx2, tx3], relevant_address=ethereum_accounts[0])
dbevents = DBHistoryEvents(db)
dbevents.add_history_events([event1, event2, event3, event4])
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_ETH.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event1, event2])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3]), (tx2, [event4])])
# For some reason data this data can be reversed,
# and we avoid failing with a help of this ugly check.
# Dicts are not hashable, so it's not possible to use better and simpler way
assert result['entries'] == expected or result['entries'] == list(reversed(expected))
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event2, event3])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3])])
assert result['entries'] == expected
| def test_events_filter_params(rotkehlchen_api_server, ethereum_accounts):
"""Tests filtering by transaction's events' properties
Test cases:
- Filtering by asset
- Filtering by protocol (counterparty)
- Filtering by both asset and a protocol
- Transaction has multiple related events
- Transaction has no related events
- Multiple transactions are queried
"""
logging.getLogger('rotkehlchen.externalapis.etherscan').disabled = True
rotki = rotkehlchen_api_server.rest_api.rotkehlchen
db = rotki.data.db
tx1 = create_tx(tx_hash=b'1')
tx2 = create_tx(tx_hash=b'2')
tx3 = create_tx(tx_hash=b'3')
event1 = create_tx_event(tx_hash=b'1', index=1, asset=A_ETH)
event2 = create_tx_event(tx_hash=b'1', index=2, asset=A_ETH, counterparty='EXAMPLE_PROTOCOL')
event3 = create_tx_event(tx_hash=b'1', index=3, asset=A_BTC, counterparty='EXAMPLE_PROTOCOL')
event4 = create_tx_event(tx_hash=b'2', index=4, asset=A_BTC)
dbethtx = DBEthTx(db)
dbethtx.add_ethereum_transactions([tx1, tx2, tx3], relevant_address=ethereum_accounts[0])
dbevents = DBHistoryEvents(db)
dbevents.add_history_events([event1, event2, event3, event4])
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_ETH.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event1, event2])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3]), (tx2, [event4])])
# For some reason data this data can be reversed,
# and we avoid failing with a help of this ugly check.
# Dicts are not hashable, so it's not possible to use better and simpler way
assert result['entries'] == expected or result['entries'] == list(reversed(expected))
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={'protocol': 'EXAMPLE_PROTOCOL'},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event2, event3])])
assert result['entries'] == expected
response = requests.get(
api_url_for(
rotkehlchen_api_server,
'ethereumtransactionsresource',
),
json={
'asset': A_BTC.serialize(),
'protocol': 'EXAMPLE_PROTOCOL',
},
)
result = assert_proper_response_with_result(response)
expected = generate_tx_entries_response([(tx1, [event3])])
assert result['entries'] == expected
|
31,860 | def validate_common_args(args: Dict[str, str]) -> Dict[str, Any]:
"""
Validate page_size and page_number argument, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Dict[str, Any]``
"""
params: Dict[str, Any] = {}
page_size = arg_to_number(args.get("page_size"))
params["page[size]"] = 50
if page_size is not None:
if page_size <= 0 or page_size > 100:
raise ValueError(MESSAGES['PAGE_SIZE'].format(page_size))
params["page[size]"] = page_size
page_number = arg_to_number(args.get("page_number"))
if page_number is not None:
if page_number < 0 or page_number >= 2147483648:
raise ValueError(MESSAGES['PAGE_NUMBER'].format(page_number))
params["page[number]"] = page_number
return params
| def validate_common_args(args: Dict[str, str]) -> Dict[str, Any]:
"""
Validate page_size and page_number argument, raise ValueError on invalid arguments.
:type args: ``Dict[str, str]``
:param args: The command arguments provided by the user.
:return: Parameters to send in request
:rtype: ``Dict[str, Any]``
"""
params: Dict[str, Any] = {}
page_size = arg_to_number(args.get("page_size", 50))
params["page[size]"] = 50
if page_size is not None:
if page_size <= 0 or page_size > 100:
raise ValueError(MESSAGES['PAGE_SIZE'].format(page_size))
params["page[size]"] = page_size
page_number = arg_to_number(args.get("page_number"))
if page_number is not None:
if page_number < 0 or page_number >= 2147483648:
raise ValueError(MESSAGES['PAGE_NUMBER'].format(page_number))
params["page[number]"] = page_number
return params
|
43,766 | def indices_up_to(n_max, has_tuple=False):
"""Returns an iterator over the number of qubits and output dimension, up to value n_max.
The output dimension never exceeds the number of qubits."""
if has_tuple:
# If the output_dim is to be used as a tuple. First element is for n_qubits and
# the second is for output_dim. For example, for n_max = 3 it will return,
# [(1, (2, 2)), (2, (2, 2)), (3, (2, 2)), (3, (3, 3))]
a, b = np.tril_indices(n_max)
return zip(*[a + 1], zip(*[2 ** (b + 1), 2 ** (b + 1)]))
else:
a, b = np.tril_indices(n_max)
return zip(*[a + 1, b + 1])
| def indices_up_to(n_max, has_tuple=False):
"""Returns an iterator over the number of qubits and output dimension, up to value n_max.
The output dimension never exceeds the number of qubits."""
if has_tuple:
# If the output_dim is to be used as a tuple. First element is for n_qubits and
# the second is for output_dim. For example, for n_max = 3 it will return,
# [(1, (2, 2)), (2, (2, 2)), (2, (4, 4)), (3, (2, 2)), (3, (4, 4)), (3, (8, 8))]
a, b = np.tril_indices(n_max)
return zip(*[a + 1], zip(*[2 ** (b + 1), 2 ** (b + 1)]))
else:
a, b = np.tril_indices(n_max)
return zip(*[a + 1, b + 1])
|
12,281 | def _gate_sequence_product(U_list, ind_list, expand_N=None):
"""
Calculate the overall unitary matrix for a given list of unitary operations
that are still of original dimension.
Parameters
----------
U_list : list of Qobj
List of gates(unitaries) implementing the quantum circuit.
ind_list : list of list of int
List of qubit indices corresponding to each gate in tensor_list.
expand_N : int, optional
Total number of qubits.
Returns
-------
U_overall : qobj
Unitary matrix corresponding to U_list.
overall_inds : list of int
List of qubit indices on which U_overall applies.
"""
if not expand_N:
expand_N = len(set(chain(*ind_list)))
sorted_inds = sorted(set(_flatten(ind_list)))
ind_list = [[sorted_inds.index(ind) for ind in inds] for inds in ind_list]
U_overall = 1
overall_inds = []
for i, (U, inds) in enumerate(zip(U_list, ind_list)):
if len(overall_inds) == 1 and len(overall_inds[0]) == expand_N:
U_overall = tensor(tensor_list)
overall_inds = _flatten(overall_inds)
U_left, rem_inds = _gate_sequence_product(U_list[i:],
ind_list[i:])
U_left = expand_operator(U_left, expand_N, rem_inds)
return U_left * U_overall, [sorted_inds[ind] for ind in overall_inds]
if U_overall == 1:
U_overall = U_overall * U
overall_inds = [ind_list[0]]
tensor_list = [U_overall]
continue
elif len(set(_flatten(overall_inds)).intersection(set(inds))) > 0:
tensor_list, overall_inds = mult_sublists(tensor_list,
overall_inds,
U, inds)
else:
# only need to expand stuff !
overall_inds.append(inds)
tensor_list.append(U)
U_overall = tensor(tensor_list)
return U_overall, [sorted_inds[ind] for ind in _flatten(overall_inds)]
| def _gate_sequence_product(U_list, ind_list, tot_num_qubits=None):
"""
Calculate the overall unitary matrix for a given list of unitary operations
that are still of original dimension.
Parameters
----------
U_list : list of Qobj
List of gates(unitaries) implementing the quantum circuit.
ind_list : list of list of int
List of qubit indices corresponding to each gate in tensor_list.
expand_N : int, optional
Total number of qubits.
Returns
-------
U_overall : qobj
Unitary matrix corresponding to U_list.
overall_inds : list of int
List of qubit indices on which U_overall applies.
"""
if not expand_N:
expand_N = len(set(chain(*ind_list)))
sorted_inds = sorted(set(_flatten(ind_list)))
ind_list = [[sorted_inds.index(ind) for ind in inds] for inds in ind_list]
U_overall = 1
overall_inds = []
for i, (U, inds) in enumerate(zip(U_list, ind_list)):
if len(overall_inds) == 1 and len(overall_inds[0]) == expand_N:
U_overall = tensor(tensor_list)
overall_inds = _flatten(overall_inds)
U_left, rem_inds = _gate_sequence_product(U_list[i:],
ind_list[i:])
U_left = expand_operator(U_left, expand_N, rem_inds)
return U_left * U_overall, [sorted_inds[ind] for ind in overall_inds]
if U_overall == 1:
U_overall = U_overall * U
overall_inds = [ind_list[0]]
tensor_list = [U_overall]
continue
elif len(set(_flatten(overall_inds)).intersection(set(inds))) > 0:
tensor_list, overall_inds = mult_sublists(tensor_list,
overall_inds,
U, inds)
else:
# only need to expand stuff !
overall_inds.append(inds)
tensor_list.append(U)
U_overall = tensor(tensor_list)
return U_overall, [sorted_inds[ind] for ind in _flatten(overall_inds)]
|
3,905 | def shortest_path(G, source=None, target=None, weight=None, method="dijkstra"):
"""Compute shortest paths in the graph.
Parameters
----------
G : NetworkX graph
source : node, optional
Starting node for path. If not specified, compute shortest
paths for each possible starting node.
target : node, optional
Ending node for path. If not specified, compute shortest
paths to all possible nodes.
weight : None, string or function, optional (default = None)
If None, every edge has weight/distance/cost 1.
If a string, use this edge attribute as the edge weight.
Any edge attribute not present defaults to 1.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly
three positional arguments: the two endpoints of an edge and
the dictionary of edge attributes for that edge.
The function must return a number.
method : string, optional (default = 'dijkstra')
The algorithm to use to compute the path.
Supported options: 'dijkstra', 'bellman-ford'.
Other inputs produce a ValueError.
If `weight` is None, unweighted graph methods are used, and this
suggestion is ignored.
Returns
-------
path: list or dictionary
All returned paths include both the source and target in the path.
If the source and target are both specified, return a single list
of nodes in a shortest path from the source to the target.
If only the source is specified, return a dictionary keyed by
targets with a list of nodes in a shortest path from the source
to one of the targets.
If only the target is specified, return a dictionary keyed by
sources with a list of nodes in a shortest path from one of the
sources to the target.
If neither the source nor target are specified return a dictionary
of dictionaries with path[source][target]=[list of nodes in path].
Raises
------
NodeNotFound
If `source` is not in `G`.
ValueError
If `method` is not among the supported options.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.shortest_path(G, source=0, target=4)) # shortest path from source=0 to source=4
[0, 1, 2, 3, 4]
>>> p = nx.shortest_path(G, source=0) # target not specified
>>> p[3] # shortest path from source=0 to source=3
[0, 1, 2, 3]
>>> p = nx.shortest_path(G, target=4) # source not specified
>>> p[1] # shortest path from source=1 to source=4
[1, 2, 3, 4]
>>> p = nx.shortest_path(G) # source, target not specified
>>> p[2][4] # shortest path from source=2 to source=4
[2, 3, 4]
Notes
-----
There may be more than one shortest path between a source and target.
This returns only one of them.
See Also
--------
all_pairs_shortest_path
all_pairs_dijkstra_path
all_pairs_bellman_ford_path
single_source_shortest_path
single_source_dijkstra_path
single_source_bellman_ford_path
"""
if method not in ("dijkstra", "bellman-ford"):
# so we don't need to check in each branch later
raise ValueError(f"method not supported: {method}")
method = "unweighted" if weight is None else method
if source is None:
if target is None:
# Find paths between all pairs.
if method == "unweighted":
paths = dict(nx.all_pairs_shortest_path(G))
elif method == "dijkstra":
paths = dict(nx.all_pairs_dijkstra_path(G, weight=weight))
else: # method == 'bellman-ford':
paths = dict(nx.all_pairs_bellman_ford_path(G, weight=weight))
else:
# Find paths from all nodes co-accessible to the target.
if G.is_directed():
G = G.reverse(copy=False)
if method == "unweighted":
paths = nx.single_source_shortest_path(G, target)
elif method == "dijkstra":
paths = nx.single_source_dijkstra_path(G, target, weight=weight)
else: # method == 'bellman-ford':
paths = nx.single_source_bellman_ford_path(G, target, weight=weight)
# Now flip the paths so they go from a source to the target.
for target in paths:
paths[target] = list(reversed(paths[target]))
else:
if target is None:
# Find paths to all nodes accessible from the source.
if method == "unweighted":
paths = nx.single_source_shortest_path(G, source)
elif method == "dijkstra":
paths = nx.single_source_dijkstra_path(G, source, weight=weight)
else: # method == 'bellman-ford':
paths = nx.single_source_bellman_ford_path(G, source, weight=weight)
else:
# Find shortest source-target path.
if method == "unweighted":
paths = nx.bidirectional_shortest_path(G, source, target)
elif method == "dijkstra":
_, paths = nx.bidirectional_dijkstra(G, source, target, weight)
else: # method == 'bellman-ford':
paths = nx.bellman_ford_path(G, source, target, weight)
return paths
| def shortest_path(G, source=None, target=None, weight=None, method="dijkstra"):
"""Compute shortest paths in the graph.
Parameters
----------
G : NetworkX graph
source : node, optional
Starting node for path. If not specified, compute shortest
paths for each possible starting node.
target : node, optional
Ending node for path. If not specified, compute shortest
paths to all possible nodes.
weight : None, string or function, optional (default = None)
If None, every edge has weight/distance/cost 1.
If a string, use this edge attribute as the edge weight.
Any edge attribute not present defaults to 1.
If this is a function, the weight of an edge is the value
returned by the function. The function must accept exactly
three positional arguments: the two endpoints of an edge and
the dictionary of edge attributes for that edge.
The function must return a number.
method : string, optional (default = 'dijkstra')
The algorithm to use to compute the path.
Supported options: 'dijkstra', 'bellman-ford'.
Other inputs produce a ValueError.
If `weight` is None, unweighted graph methods are used, and this
suggestion is ignored.
Returns
-------
path: list or dictionary
All returned paths include both the source and target in the path.
If the source and target are both specified, return a single list
of nodes in a shortest path from the source to the target.
If only the source is specified, return a dictionary keyed by
targets with a list of nodes in a shortest path from the source
to one of the targets.
If only the target is specified, return a dictionary keyed by
sources with a list of nodes in a shortest path from one of the
sources to the target.
If neither the source nor target are specified return a dictionary
of dictionaries with path[source][target]=[list of nodes in path].
Raises
------
NodeNotFound
If `source` is not in `G`.
ValueError
If `method` is not among the supported options.
Examples
--------
>>> G = nx.path_graph(5)
>>> print(nx.shortest_path(G, source=0, target=4)) # shortest path from source=0 to source=4
[0, 1, 2, 3, 4]
>>> p = nx.shortest_path(G, source=0) # target not specified
>>> p[3] # shortest path from source=0 to target=3
[0, 1, 2, 3]
>>> p = nx.shortest_path(G, target=4) # source not specified
>>> p[1] # shortest path from source=1 to source=4
[1, 2, 3, 4]
>>> p = nx.shortest_path(G) # source, target not specified
>>> p[2][4] # shortest path from source=2 to source=4
[2, 3, 4]
Notes
-----
There may be more than one shortest path between a source and target.
This returns only one of them.
See Also
--------
all_pairs_shortest_path
all_pairs_dijkstra_path
all_pairs_bellman_ford_path
single_source_shortest_path
single_source_dijkstra_path
single_source_bellman_ford_path
"""
if method not in ("dijkstra", "bellman-ford"):
# so we don't need to check in each branch later
raise ValueError(f"method not supported: {method}")
method = "unweighted" if weight is None else method
if source is None:
if target is None:
# Find paths between all pairs.
if method == "unweighted":
paths = dict(nx.all_pairs_shortest_path(G))
elif method == "dijkstra":
paths = dict(nx.all_pairs_dijkstra_path(G, weight=weight))
else: # method == 'bellman-ford':
paths = dict(nx.all_pairs_bellman_ford_path(G, weight=weight))
else:
# Find paths from all nodes co-accessible to the target.
if G.is_directed():
G = G.reverse(copy=False)
if method == "unweighted":
paths = nx.single_source_shortest_path(G, target)
elif method == "dijkstra":
paths = nx.single_source_dijkstra_path(G, target, weight=weight)
else: # method == 'bellman-ford':
paths = nx.single_source_bellman_ford_path(G, target, weight=weight)
# Now flip the paths so they go from a source to the target.
for target in paths:
paths[target] = list(reversed(paths[target]))
else:
if target is None:
# Find paths to all nodes accessible from the source.
if method == "unweighted":
paths = nx.single_source_shortest_path(G, source)
elif method == "dijkstra":
paths = nx.single_source_dijkstra_path(G, source, weight=weight)
else: # method == 'bellman-ford':
paths = nx.single_source_bellman_ford_path(G, source, weight=weight)
else:
# Find shortest source-target path.
if method == "unweighted":
paths = nx.bidirectional_shortest_path(G, source, target)
elif method == "dijkstra":
_, paths = nx.bidirectional_dijkstra(G, source, target, weight)
else: # method == 'bellman-ford':
paths = nx.bellman_ford_path(G, source, target, weight)
return paths
|
38,967 | def run_tests(classes, cases, repeats, json=False):
if json:
classes = [c for c in classes if hasattr(c, 'to_json')]
lpad = max([len(t.package) for t in classes]) + 4
print(f'testing {", ".join([t.package for t in classes])}, {repeats} times each')
results = []
csv_results = []
for test_class in classes:
times = []
p = test_class.package
for i in range(repeats):
count, pass_count = 0, 0
start = datetime.now()
test = test_class(True)
for j in range(3):
for case in cases:
if json:
passed, result = test.to_json(case)
else:
passed, result = test.validate(case)
count += 1
pass_count += passed
time = (datetime.now() - start).total_seconds()
success = pass_count / count * 100
print(f'{p:>{lpad}} ({i+1:>{len(str(repeats))}}/{repeats}) time={time:0.3f}s, success={success:0.2f}%')
times.append(time)
print(f'{p:>{lpad}} best={min(times):0.3f}s, avg={mean(times):0.3f}s, stdev={stdev(times):0.3f}s')
model_count = 3 * len(cases)
avg = mean(times) / model_count * 1e6
sd = stdev(times) / model_count * 1e6
results.append(f'{p:>{lpad}} best={min(times) / model_count * 1e6:0.3f}μs/iter '
f'avg={avg:0.3f}μs/iter stdev={sd:0.3f}μs/iter version={test_class.version}')
csv_results.append([p, test_class.version, avg])
print()
return results, csv_results
| def run_tests(classes, cases, repeats, json=False):
if json:
classes = [c for c in classes if hasattr(c, 'to_json')]
lpad = max(len(t.package) for t in classes) + 4
print(f'testing {", ".join([t.package for t in classes])}, {repeats} times each')
results = []
csv_results = []
for test_class in classes:
times = []
p = test_class.package
for i in range(repeats):
count, pass_count = 0, 0
start = datetime.now()
test = test_class(True)
for j in range(3):
for case in cases:
if json:
passed, result = test.to_json(case)
else:
passed, result = test.validate(case)
count += 1
pass_count += passed
time = (datetime.now() - start).total_seconds()
success = pass_count / count * 100
print(f'{p:>{lpad}} ({i+1:>{len(str(repeats))}}/{repeats}) time={time:0.3f}s, success={success:0.2f}%')
times.append(time)
print(f'{p:>{lpad}} best={min(times):0.3f}s, avg={mean(times):0.3f}s, stdev={stdev(times):0.3f}s')
model_count = 3 * len(cases)
avg = mean(times) / model_count * 1e6
sd = stdev(times) / model_count * 1e6
results.append(f'{p:>{lpad}} best={min(times) / model_count * 1e6:0.3f}μs/iter '
f'avg={avg:0.3f}μs/iter stdev={sd:0.3f}μs/iter version={test_class.version}')
csv_results.append([p, test_class.version, avg])
print()
return results, csv_results
|
21,124 | def read_iob(raw_sents):
sentences = []
for line in raw_sents:
if not line.strip():
continue
tokens = [re.split('[^\w\-]', line.strip())]
if len(tokens[0]) == 3:
words, pos, iob = zip(*tokens)
elif len(tokens[0]) == 2:
words, iob = zip(*tokens)
pos = ['-'] * len(words)
else:
raise Exception('The iob/iob2 file is not formatted correctly. Try checking whitespace and delimiters.')
biluo = iob_to_biluo(iob)
sentences.append([
{'orth': w, 'tag': p, 'ner': ent}
for (w, p, ent) in zip(words, pos, biluo)
])
sentences = [{'tokens': sent} for sent in sentences]
paragraphs = [{'sentences': [sent]} for sent in sentences]
docs = [{'id': 0, 'paragraphs': [para]} for para in paragraphs]
return docs
| def read_iob(raw_sents):
sentences = []
for line in raw_sents:
if not line.strip():
continue
tokens = [re.split('[^\w\-]', line.strip())]
if len(tokens[0]) == 3:
words, pos, iob = zip(*tokens)
elif len(tokens[0]) == 2:
words, iob = zip(*tokens)
pos = ['-'] * len(words)
else:
raise ValueError('The iob/iob2 file is not formatted correctly. Try checking whitespace and delimiters.')
biluo = iob_to_biluo(iob)
sentences.append([
{'orth': w, 'tag': p, 'ner': ent}
for (w, p, ent) in zip(words, pos, biluo)
])
sentences = [{'tokens': sent} for sent in sentences]
paragraphs = [{'sentences': [sent]} for sent in sentences]
docs = [{'id': 0, 'paragraphs': [para]} for para in paragraphs]
return docs
|
41,509 | def qmu(mu, data, pdf, init_pars, par_bounds, fixed_vals):
r"""
The test statistic, :math:`q_{\mu}`, for establishing an upper
limit on the strength parameter, :math:`\mu`, as defiend in
Equation (14) in :xref:`arXiv:1007.1727`
.. math::
:nowrap:
\begin{equation}
q_{\mu} = \left\{\begin{array}{ll}
-2\ln\lambda\left(\mu\right), &\hat{\mu} < \mu,\\
0, & \hat{\mu} > \mu
\end{array}\right.
\end{equation}
where :math:`\lambda\left(\mu\right)` is the profile likelihood ratio as defined in Equation (7)
.. math::
\lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}\,.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> test_mu = 1.0
>>> init_pars = model.config.suggested_init()
>>> par_bounds = model.config.suggested_bounds()
>>> par_bounds[model.config.poi_index] = [-10.0, 10.0]
>>> fixed_vals = []
>>> pyhf.infer.test_statistics.qmu(test_mu, data, model, init_pars, par_bounds, [])
array(3.9549891)
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
fixed_vals (`list`): Parameters held constant in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] == 0:
log.warning(
'qmu test statistic used for fit configuration with POI bounded at zero.\n'
+ 'Use the qmu_tilde test statistic (pyhf.infer.test_statistics.qmu_tilde) instead.'
)
return _qmu_like(mu, data, pdf, init_pars, par_bounds, fixed_vals)
| def qmu(mu, data, pdf, init_pars, par_bounds, fixed_vals):
r"""
The test statistic, :math:`q_{\mu}`, for establishing an upper
limit on the strength parameter, :math:`\mu`, as defiend in
Equation (14) in :xref:`arXiv:1007.1727`
.. math::
:nowrap:
\begin{equation}
q_{\mu} = \left\{\begin{array}{ll}
-2\ln\lambda\left(\mu\right), &\hat{\mu} < \mu,\\
0, & \hat{\mu} > \mu
\end{array}\right.
\end{equation}
where :math:`\lambda\left(\mu\right)` is the profile likelihood ratio as defined in Equation (7)
.. math::
\lambda\left(\mu\right) = \frac{L\left(\mu, \hat{\hat{\boldsymbol{\theta}}}\right)}{L\left(\hat{\mu}, \hat{\boldsymbol{\theta}}\right)}\,.
Example:
>>> import pyhf
>>> pyhf.set_backend("numpy")
>>> model = pyhf.simplemodels.hepdata_like(
... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]
... )
>>> observations = [51, 48]
>>> data = pyhf.tensorlib.astensor(observations + model.config.auxdata)
>>> test_mu = 1.0
>>> init_pars = model.config.suggested_init()
>>> par_bounds = model.config.suggested_bounds()
>>> par_bounds[model.config.poi_index] = [-10.0, 10.0]
>>> fixed_vals = []
>>> pyhf.infer.test_statistics.qmu(test_mu, data, model, init_pars, par_bounds, fixed_vals)
array(3.9549891)
Args:
mu (Number or Tensor): The signal strength parameter
data (Tensor): The data to be considered
pdf (~pyhf.pdf.Model): The HistFactory statistical model used in the likelihood ratio calculation
init_pars (`list`): Values to initialize the model parameters at for the fit
par_bounds (`list` of `list`\s or `tuple`\s): The extrema of values the model parameters are allowed to reach in the fit
fixed_vals (`list`): Parameters held constant in the fit
Returns:
Float: The calculated test statistic, :math:`q_{\mu}`
"""
if pdf.config.poi_index is None:
raise UnspecifiedPOI(
'No POI is defined. A POI is required for profile likelihood based test statistics.'
)
if par_bounds[pdf.config.poi_index][0] == 0:
log.warning(
'qmu test statistic used for fit configuration with POI bounded at zero.\n'
+ 'Use the qmu_tilde test statistic (pyhf.infer.test_statistics.qmu_tilde) instead.'
)
return _qmu_like(mu, data, pdf, init_pars, par_bounds, fixed_vals)
|
53,466 | def add_cost():
global price # [global-statement]
price = price + 10
return price
| def add_vat():
global price # [global-statement]
return price * 0.20
|
58,972 | def _read_from_stream(
container, start_offset, end_offset, pts_unit, stream, stream_name
):
global _CALLED_TIMES, _GC_COLLECTION_INTERVAL
_CALLED_TIMES += 1
if _CALLED_TIMES % _GC_COLLECTION_INTERVAL == _GC_COLLECTION_INTERVAL - 1:
gc.collect()
if pts_unit == "sec":
start_offset = int(math.floor(start_offset * (1 / stream.time_base)))
if end_offset != float("inf"):
end_offset = int(math.ceil(end_offset * (1 / stream.time_base)))
else:
warnings.warn(
"The pts_unit 'pts' gives wrong results and will be removed in a "
+ "follow-up version. Please use pts_unit 'sec'."
)
frames = {}
should_buffer = True
max_buffer_size = 5
if stream.type == "video":
# DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt)
# so need to buffer some extra frames to sort everything
# properly
extradata = stream.codec_context.extradata
# overly complicated way of finding if `divx_packed` is set, following
# https://github.com/FFmpeg/FFmpeg/commit/d5a21172283572af587b3d939eba0091484d3263
if extradata and b"DivX" in extradata:
# can't use regex directly because of some weird characters sometimes...
pos = extradata.find(b"DivX")
d = extradata[pos:]
o = re.search(br"DivX(\d+)Build(\d+)(\w)", d)
if o is None:
o = re.search(br"DivX(\d+)b(\d+)(\w)", d)
if o is not None:
should_buffer = o.group(3) == b"p"
seek_offset = start_offset
# some files don't seek to the right location, so better be safe here
seek_offset = max(seek_offset - 1, 0)
if should_buffer:
# FIXME this is kind of a hack, but we will jump to the previous keyframe
# so this will be safe
seek_offset = max(seek_offset - max_buffer_size, 0)
try:
# TODO check if stream needs to always be the video stream here or not
container.seek(seek_offset, any_frame=False, backward=True, stream=stream)
except av.AVError:
# TODO add some warnings in this case
# print("Corrupted file?", container.name)
return []
buffer_count = 0
try:
max_pts = float('-inf')
for _idx, frame in enumerate(container.decode(**stream_name)):
max_pts = max(max_pts, frame.pts)
frames[frame.pts] = frame
if frame.pts >= end_offset:
if should_buffer and buffer_count < max_buffer_size:
buffer_count += 1
continue
break
if max_pts < seek_offset:
raise BufferError(
' %s-stream decoder is seeking for a pts out of the video.' % stream.type
+ ' Have you chosen proper start_pts?')
except av.AVError:
# TODO add a warning
pass
# ensure that the results are sorted wrt the pts
result = [
frames[i] for i in sorted(frames) if start_offset <= frames[i].pts <= end_offset
]
if len(frames) > 0 and start_offset > 0 and start_offset not in frames:
# if there is no frame that exactly matches the pts of start_offset
# add the last frame smaller than start_offset, to guarantee that
# we will have all the necessary data. This is most useful for audio
preceding_frames = [i for i in frames if i < start_offset]
if len(preceding_frames) > 0:
first_frame_pts = max(preceding_frames)
result.insert(0, frames[first_frame_pts])
return result
| def _read_from_stream(
container, start_offset, end_offset, pts_unit, stream, stream_name
):
global _CALLED_TIMES, _GC_COLLECTION_INTERVAL
_CALLED_TIMES += 1
if _CALLED_TIMES % _GC_COLLECTION_INTERVAL == _GC_COLLECTION_INTERVAL - 1:
gc.collect()
if pts_unit == "sec":
start_offset = int(math.floor(start_offset * (1 / stream.time_base)))
if end_offset != float("inf"):
end_offset = int(math.ceil(end_offset * (1 / stream.time_base)))
else:
warnings.warn(
"The pts_unit 'pts' gives wrong results and will be removed in a "
+ "follow-up version. Please use pts_unit 'sec'."
)
frames = {}
should_buffer = True
max_buffer_size = 5
if stream.type == "video":
# DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt)
# so need to buffer some extra frames to sort everything
# properly
extradata = stream.codec_context.extradata
# overly complicated way of finding if `divx_packed` is set, following
# https://github.com/FFmpeg/FFmpeg/commit/d5a21172283572af587b3d939eba0091484d3263
if extradata and b"DivX" in extradata:
# can't use regex directly because of some weird characters sometimes...
pos = extradata.find(b"DivX")
d = extradata[pos:]
o = re.search(br"DivX(\d+)Build(\d+)(\w)", d)
if o is None:
o = re.search(br"DivX(\d+)b(\d+)(\w)", d)
if o is not None:
should_buffer = o.group(3) == b"p"
seek_offset = start_offset
# some files don't seek to the right location, so better be safe here
seek_offset = max(seek_offset - 1, 0)
if should_buffer:
# FIXME this is kind of a hack, but we will jump to the previous keyframe
# so this will be safe
seek_offset = max(seek_offset - max_buffer_size, 0)
try:
# TODO check if stream needs to always be the video stream here or not
container.seek(seek_offset, any_frame=False, backward=True, stream=stream)
except av.AVError:
# TODO add some warnings in this case
# print("Corrupted file?", container.name)
return []
buffer_count = 0
try:
max_pts = float('-inf')
for _idx, frame in enumerate(container.decode(**stream_name)):
max_pts = max(max_pts, frame.pts)
frames[frame.pts] = frame
if frame.pts >= end_offset:
if should_buffer and buffer_count < max_buffer_size:
buffer_count += 1
continue
break
if max_pts < seek_offset:
raise BufferError(
f'{stream.type}-stream decoder is seeking for a pts out of the video. '
f'Have you chosen proper start_pts?')
except av.AVError:
# TODO add a warning
pass
# ensure that the results are sorted wrt the pts
result = [
frames[i] for i in sorted(frames) if start_offset <= frames[i].pts <= end_offset
]
if len(frames) > 0 and start_offset > 0 and start_offset not in frames:
# if there is no frame that exactly matches the pts of start_offset
# add the last frame smaller than start_offset, to guarantee that
# we will have all the necessary data. This is most useful for audio
preceding_frames = [i for i in frames if i < start_offset]
if len(preceding_frames) > 0:
first_frame_pts = max(preceding_frames)
result.insert(0, frames[first_frame_pts])
return result
|
52,914 | def get_ops(name: str, **kwargs) -> Ops:
"""Get a backend object.
The special name "cpu" returns the best available CPU backend."""
ops_by_name = {ops_cls.name: ops_cls for ops_cls in registry.ops.get_all().values()} # type: ignore
cls: Optional[Callable[..., Ops]] = None
if name == "cpu":
_import_extra_cpu_backends()
cls = ops_by_name.get("apple", ops_by_name.get("numpy"))
if "bigendian" in ops_by_name:
cls = ops_by_name.get("bigendian", ops_by_name.get("numpy"))
else:
cls = ops_by_name.get(name)
if cls is None:
raise ValueError(f"Invalid backend: {name}")
return cls(**kwargs)
| def get_ops(name: str, **kwargs) -> Ops:
"""Get a backend object.
The special name "cpu" returns the best available CPU backend."""
ops_by_name = {ops_cls.name: ops_cls for ops_cls in registry.ops.get_all().values()} # type: ignore
cls: Optional[Callable[..., Ops]] = None
if name == "cpu":
_import_extra_cpu_backends()
cls = ops_by_name.get("numpy")
cls = ops_by_name.get("apple", cls)
cls = ops_by_name.get("bigendian", cls)
else:
cls = ops_by_name.get(name)
if cls is None:
raise ValueError(f"Invalid backend: {name}")
return cls(**kwargs)
|
13,815 | def _load_csv(csv_file):
"""
Reads CSV of large cookie data and returns a dict of details.
Arguments:
csv_file (string): File name for the csv
Returns a list of dicts containing parsed details for each cookie header log entry.
"""
with open(csv_file) as file:
csv_data = file.read()
reader = csv.DictReader(csv_data.splitlines())
# Regex to match against log messages like the following:
# BEGIN-COOKIE-SIZES(total=3773) user-info: 903, csrftoken: 64, ... END-COOKIE-SIZES
cookie_log_regex = re.compile(r"BEGIN-COOKIE-SIZES\(total=(?P<total>\d+)\)(?P<cookie_sizes>.*)END-COOKIE-SIZES")
cookie_sizes_strings_processed = set()
# Regex to match against just a single size, like the following:
# csrftoken: 64
cookie_size_regex = re.compile(r"(?P<name>.*): (?P<size>\d+)")
cookie_headers = []
for row in reader:
cookie_header_sizes = {}
raw_cookie_log = row.get("_raw")
cookie_begin_count = raw_cookie_log.count("BEGIN-COOKIE-SIZES")
if cookie_begin_count == 0:
logging.info("No BEGIN-COOKIE-SIZES delimeter found. Skipping row.")
elif cookie_begin_count > 1:
# Note: this wouldn't parse correctly right now, and it isn't worth coding for.
logging.warning("Multiple cookie entries found in same row. Skipping row.")
continue
match = cookie_log_regex.search(raw_cookie_log)
if not match:
logging.error("Multiple cookie entries found in same row. Skipping row.")
continue
cookie_header_size = int(match.group("total"))
if cookie_header_size == 0:
continue
cookie_sizes_str = match.group("cookie_sizes").strip()
if cookie_sizes_str in cookie_sizes_strings_processed:
logging.debug("Skipping already processed cookies.")
continue
cookie_sizes_strings_processed.add(cookie_sizes_str)
cookie_sizes = cookie_sizes_str.split(", ")
for cookie_size in cookie_sizes:
match = cookie_size_regex.search(cookie_size)
if not match:
logging.error(f"Could not parse cookie size from: {cookie_size}")
continue
cookie_header_sizes[match.group("name")] = int(match.group("size"))
cookie_header_size_computed = max(
0, sum(len(name) + size + 3 for (name, size) in cookie_header_sizes.items()) - 2
)
cookie_headers.append({
"datetime": parser.parse(row.get("_time")),
"env": row.get("index"),
"cookie_header_size": cookie_header_size,
"cookie_header_size_computed": cookie_header_size_computed,
"cookie_sizes": cookie_header_sizes,
})
return cookie_headers
| def _load_csv(csv_file):
"""
Reads CSV of large cookie data and returns a dict of details.
Arguments:
csv_file (string): File name for the csv
Returns a list of dicts containing parsed details for each cookie header log entry.
"""
with open(csv_file) as file:
csv_data = file.read()
reader = csv.DictReader(csv_data.splitlines())
# Regex to match against log messages like the following:
# BEGIN-COOKIE-SIZES(total=3773) user-info: 903, csrftoken: 64, ... END-COOKIE-SIZES
cookie_log_regex = re.compile(r"BEGIN-COOKIE-SIZES\(total=(?P<total>\d+)\)(?P<cookie_sizes>.*)END-COOKIE-SIZES")
cookie_sizes_strings_processed = set()
# Regex to match against just a single size, like the following:
# csrftoken: 64
cookie_size_regex = re.compile(r"(?P<name>.*): (?P<size>\d+)")
cookie_headers = []
for row in reader:
cookie_header_sizes = {}
raw_cookie_log = row.get("_raw")
cookie_begin_count = raw_cookie_log.count("BEGIN-COOKIE-SIZES")
if cookie_begin_count == 0:
logging.info("No BEGIN-COOKIE-SIZES delimiter found. Skipping row.")
elif cookie_begin_count > 1:
# Note: this wouldn't parse correctly right now, and it isn't worth coding for.
logging.warning("Multiple cookie entries found in same row. Skipping row.")
continue
match = cookie_log_regex.search(raw_cookie_log)
if not match:
logging.error("Multiple cookie entries found in same row. Skipping row.")
continue
cookie_header_size = int(match.group("total"))
if cookie_header_size == 0:
continue
cookie_sizes_str = match.group("cookie_sizes").strip()
if cookie_sizes_str in cookie_sizes_strings_processed:
logging.debug("Skipping already processed cookies.")
continue
cookie_sizes_strings_processed.add(cookie_sizes_str)
cookie_sizes = cookie_sizes_str.split(", ")
for cookie_size in cookie_sizes:
match = cookie_size_regex.search(cookie_size)
if not match:
logging.error(f"Could not parse cookie size from: {cookie_size}")
continue
cookie_header_sizes[match.group("name")] = int(match.group("size"))
cookie_header_size_computed = max(
0, sum(len(name) + size + 3 for (name, size) in cookie_header_sizes.items()) - 2
)
cookie_headers.append({
"datetime": parser.parse(row.get("_time")),
"env": row.get("index"),
"cookie_header_size": cookie_header_size,
"cookie_header_size_computed": cookie_header_size_computed,
"cookie_sizes": cookie_header_sizes,
})
return cookie_headers
|
512 | def log_user_change(domain, couch_user, changed_by_user, changed_via=None,
message=None, fields_changed=None, action=ModelAction.UPDATE):
"""
Log changes done to a user.
For a new user or a deleted user, log only specific fields.
:param domain: domain where the update was initiated
:param couch_user: user being changed
:param changed_by_user: user making the change
:param changed_via: changed via medium i.e API/Web
:param message: Optional Message text
:param fields_changed: dict of user fields that have changed with their current value
:param action: action on the user
"""
from corehq.apps.users.models import UserHistory
# domain is essential to filter changes done in a domain
if not domain:
raise ValueError("Please pass domain")
# for an update, there should always be fields that have changed
if action == ModelAction.UPDATE and not fields_changed:
raise ValueError("'fields_changed' is required for update.")
return UserHistory.objects.create(
domain=domain,
user_type=couch_user.doc_type,
user_id=couch_user.get_id,
by_user_id=changed_by_user.get_id,
details={
'changes': _get_changed_details(couch_user, action, fields_changed),
'changed_via': changed_via,
},
message=message,
action=action.value,
)
| def log_user_change(domain, couch_user, changed_by_user, changed_via=None,
message=None, fields_changed=None, action=ModelAction.UPDATE):
"""
Log changes done to a user.
For a new user or a deleted user, log only specific fields.
:param domain: domain where the update was initiated
:param couch_user: user being changed
:param changed_by_user: user making the change
:param changed_via: changed via medium i.e API/Web
:param message: Optional Message text
:param fields_changed: dict of user fields that have changed with their current value
:param action: action on the user
"""
from corehq.apps.users.models import UserHistory
# domain is essential to filter changes done in a domain
if not domain:
raise ValueError("'domain' required")
# for an update, there should always be fields that have changed
if action == ModelAction.UPDATE and not fields_changed:
raise ValueError("'fields_changed' is required for update.")
return UserHistory.objects.create(
domain=domain,
user_type=couch_user.doc_type,
user_id=couch_user.get_id,
by_user_id=changed_by_user.get_id,
details={
'changes': _get_changed_details(couch_user, action, fields_changed),
'changed_via': changed_via,
},
message=message,
action=action.value,
)
|
41,072 | def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
if args.num_encs > 1:
args = format_mulenc_args(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
# get input and output dimension info
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
idim_list = [int(valid_json[utts[0]]['input'][i]['shape'][-1]) for i in range(args.num_encs)]
odim = int(valid_json[utts[0]]['output'][0]['shape'][-1])
for i in range(args.num_encs):
logging.info('stream{}: input dims : {}'.format(i + 1, idim_list[i]))
logging.info('#output dims: ' + str(odim))
# specify attention, CTC, hybrid mode
if args.mtlalpha == 1.0:
mtl_mode = 'ctc'
logging.info('Pure CTC mode')
elif args.mtlalpha == 0.0:
mtl_mode = 'att'
logging.info('Pure attention mode')
else:
mtl_mode = 'mtl'
logging.info('Multitask learning mode')
if (args.enc_init is not None or args.dec_init is not None) and args.num_encs == 1:
model = load_trained_modules(idim_list[0], odim, args)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim_list[0] if args.num_encs == 1 else idim_list, odim, args)
assert isinstance(model, ASRInterface)
logging.warning(' Total parameter of the model = ' + str(sum(p.numel() for p in model.parameters())) )
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to ' + model_conf)
f.write(json.dumps((idim_list[0] if args.num_encs == 1 else idim_list, odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
if args.num_encs > 1:
# TODO(ruizhili): implement data parallel for multi-encoder setup.
raise NotImplementedError("Data parallel is not supported for multi-encoder setup.")
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
# Setup an optimizer
if args.opt == 'adadelta':
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps,
weight_decay=args.weight_decay)
elif args.opt == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
weight_decay=args.weight_decay)
elif args.opt == 'noam':
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux")
raise e
if args.opt == 'noam':
model, optimizer.optimizer = amp.initialize(model, optimizer.optimizer, opt_level=args.train_dtype)
else:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.train_dtype)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
if args.num_encs == 1:
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
else:
converter = CustomConverterMulEnc([i[0] for i in model.subsample_list], dtype=dtype)
# read json data
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0, oaxis=0)
valid = make_batchset(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0, oaxis=0)
load_tr = LoadInputsAndTargets(
mode='asr', load_output=True, preprocess_conf=args.preprocess_conf,
preprocess_args={'train': True} # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode='asr', load_output=True, preprocess_conf=args.preprocess_conf,
preprocess_args={'train': False} # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1, num_workers=args.n_iter_processes,
shuffle=not use_sortagrad, collate_fn=lambda x: x[0])}
valid_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1, shuffle=False, collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes)}
# Set up a trainer
updater = CustomUpdater(
model, args.grad_clip, train_iter, optimizer,
device, args.ngpu, args.grad_noise, args.accum_grad, use_apex=use_apex)
trainer = training.Trainer(
updater, (args.epochs, 'epoch'), out=args.outdir)
if use_sortagrad:
trainer.extend(ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch'))
# Resume from a snapshot
if args.resume:
logging.info('resumed from %s' % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
if args.save_interval_iters > 0:
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, 'iteration'))
else:
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu))
# Save attention weight each epoch
if args.num_save_attention > 0 and args.mtlalpha != 1.0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn, data, args.outdir + "/att_ws",
converter=converter, transform=load_cv, device=device)
trainer.extend(att_reporter, trigger=(1, 'epoch'))
else:
att_reporter = None
# Make a plot for training and validation values
if args.num_encs > 1:
report_keys_loss_ctc = ['main/loss_ctc{}'.format(i + 1) for i in range(model.num_encs)] + [
'validation/main/loss_ctc{}'.format(i + 1) for i in range(model.num_encs)]
report_keys_cer_ctc = ['main/cer_ctc{}'.format(i + 1) for i in range(model.num_encs)] + [
'validation/main/cer_ctc{}'.format(i + 1) for i in range(model.num_encs)]
trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss',
'main/loss_ctc', 'validation/main/loss_ctc',
'main/loss_att',
'validation/main/loss_att'] +
([] if args.num_encs == 1 else report_keys_loss_ctc),
'epoch', file_name='loss.png'))
trainer.extend(extensions.PlotReport(['main/acc', 'validation/main/acc'],
'epoch', file_name='acc.png'))
trainer.extend(extensions.PlotReport(
['main/cer_ctc', 'validation/main/cer_ctc'] + ([] if args.num_encs == 1 else report_keys_loss_ctc),
'epoch', file_name='cer.png'))
# Save best models
trainer.extend(snapshot_object(model, 'model.loss.best'),
trigger=training.triggers.MinValueTrigger('validation/main/loss'))
if mtl_mode != 'ctc':
trainer.extend(snapshot_object(model, 'model.acc.best'),
trigger=training.triggers.MaxValueTrigger('validation/main/acc'))
# save snapshot which contains model and optimizer states
if args.save_interval_iters > 0:
trainer.extend(torch_snapshot(filename='snapshot.iter.{.updater.iteration}'),
trigger=(args.save_interval_iters, 'iteration'))
else:
trainer.extend(torch_snapshot(), trigger=(1, 'epoch'))
# epsilon decay in the optimizer
if args.opt == 'adadelta':
if args.criterion == 'acc' and mtl_mode != 'ctc':
trainer.extend(restore_snapshot(model, args.outdir + '/model.acc.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
elif args.criterion == 'loss':
trainer.extend(restore_snapshot(model, args.outdir + '/model.loss.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=(args.report_interval_iters, 'iteration')))
report_keys = ['epoch', 'iteration', 'main/loss', 'main/loss_ctc', 'main/loss_att',
'validation/main/loss', 'validation/main/loss_ctc', 'validation/main/loss_att',
'main/acc', 'validation/main/acc', 'main/cer_ctc', 'validation/main/cer_ctc',
'elapsed_time'] + ([] if args.num_encs == 1 else report_keys_cer_ctc + report_keys_loss_ctc)
if args.opt == 'adadelta':
trainer.extend(extensions.observe_value(
'eps', lambda trainer: trainer.updater.get_optimizer('main').param_groups[0]["eps"]),
trigger=(args.report_interval_iters, 'iteration'))
report_keys.append('eps')
if args.report_cer:
report_keys.append('validation/main/cer')
if args.report_wer:
report_keys.append('validation/main/wer')
trainer.extend(extensions.PrintReport(
report_keys), trigger=(args.report_interval_iters, 'iteration'))
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
trainer.extend(TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter),
trigger=(args.report_interval_iters, "iteration"))
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
| def train(args):
"""Train with the given args.
Args:
args (namespace): The program arguments.
"""
set_deterministic_pytorch(args)
if args.num_encs > 1:
args = format_mulenc_args(args)
# check cuda availability
if not torch.cuda.is_available():
logging.warning('cuda is not available')
# get input and output dimension info
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
utts = list(valid_json.keys())
idim_list = [int(valid_json[utts[0]]['input'][i]['shape'][-1]) for i in range(args.num_encs)]
odim = int(valid_json[utts[0]]['output'][0]['shape'][-1])
for i in range(args.num_encs):
logging.info('stream{}: input dims : {}'.format(i + 1, idim_list[i]))
logging.info('#output dims: ' + str(odim))
# specify attention, CTC, hybrid mode
if args.mtlalpha == 1.0:
mtl_mode = 'ctc'
logging.info('Pure CTC mode')
elif args.mtlalpha == 0.0:
mtl_mode = 'att'
logging.info('Pure attention mode')
else:
mtl_mode = 'mtl'
logging.info('Multitask learning mode')
if (args.enc_init is not None or args.dec_init is not None) and args.num_encs == 1:
model = load_trained_modules(idim_list[0], odim, args)
else:
model_class = dynamic_import(args.model_module)
model = model_class(idim_list[0] if args.num_encs == 1 else idim_list, odim, args)
assert isinstance(model, ASRInterface)
logging.info(' Total parameter of the model = ' + str(sum(p.numel() for p in model.parameters())) )
if args.rnnlm is not None:
rnnlm_args = get_model_conf(args.rnnlm, args.rnnlm_conf)
rnnlm = lm_pytorch.ClassifierWithState(
lm_pytorch.RNNLM(
len(args.char_list), rnnlm_args.layer, rnnlm_args.unit))
torch_load(args.rnnlm, rnnlm)
model.rnnlm = rnnlm
# write model config
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
model_conf = args.outdir + '/model.json'
with open(model_conf, 'wb') as f:
logging.info('writing a model config file to ' + model_conf)
f.write(json.dumps((idim_list[0] if args.num_encs == 1 else idim_list, odim, vars(args)),
indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8'))
for key in sorted(vars(args).keys()):
logging.info('ARGS: ' + key + ': ' + str(vars(args)[key]))
reporter = model.reporter
# check the use of multi-gpu
if args.ngpu > 1:
if args.batch_size != 0:
logging.warning('batch size is automatically increased (%d -> %d)' % (
args.batch_size, args.batch_size * args.ngpu))
args.batch_size *= args.ngpu
if args.num_encs > 1:
# TODO(ruizhili): implement data parallel for multi-encoder setup.
raise NotImplementedError("Data parallel is not supported for multi-encoder setup.")
# set torch device
device = torch.device("cuda" if args.ngpu > 0 else "cpu")
if args.train_dtype in ("float16", "float32", "float64"):
dtype = getattr(torch, args.train_dtype)
else:
dtype = torch.float32
model = model.to(device=device, dtype=dtype)
# Setup an optimizer
if args.opt == 'adadelta':
optimizer = torch.optim.Adadelta(
model.parameters(), rho=0.95, eps=args.eps,
weight_decay=args.weight_decay)
elif args.opt == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
weight_decay=args.weight_decay)
elif args.opt == 'noam':
from espnet.nets.pytorch_backend.transformer.optimizer import get_std_opt
optimizer = get_std_opt(model, args.adim, args.transformer_warmup_steps, args.transformer_lr)
else:
raise NotImplementedError("unknown optimizer: " + args.opt)
# setup apex.amp
if args.train_dtype in ("O0", "O1", "O2", "O3"):
try:
from apex import amp
except ImportError as e:
logging.error(f"You need to install apex for --train-dtype {args.train_dtype}. "
"See https://github.com/NVIDIA/apex#linux")
raise e
if args.opt == 'noam':
model, optimizer.optimizer = amp.initialize(model, optimizer.optimizer, opt_level=args.train_dtype)
else:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.train_dtype)
use_apex = True
else:
use_apex = False
# FIXME: TOO DIRTY HACK
setattr(optimizer, "target", reporter)
setattr(optimizer, "serialize", lambda s: reporter.serialize(s))
# Setup a converter
if args.num_encs == 1:
converter = CustomConverter(subsampling_factor=model.subsample[0], dtype=dtype)
else:
converter = CustomConverterMulEnc([i[0] for i in model.subsample_list], dtype=dtype)
# read json data
with open(args.train_json, 'rb') as f:
train_json = json.load(f)['utts']
with open(args.valid_json, 'rb') as f:
valid_json = json.load(f)['utts']
use_sortagrad = args.sortagrad == -1 or args.sortagrad > 0
# make minibatch list (variable length)
train = make_batchset(train_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
shortest_first=use_sortagrad,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0, oaxis=0)
valid = make_batchset(valid_json, args.batch_size,
args.maxlen_in, args.maxlen_out, args.minibatches,
min_batch_size=args.ngpu if args.ngpu > 1 else 1,
count=args.batch_count,
batch_bins=args.batch_bins,
batch_frames_in=args.batch_frames_in,
batch_frames_out=args.batch_frames_out,
batch_frames_inout=args.batch_frames_inout,
iaxis=0, oaxis=0)
load_tr = LoadInputsAndTargets(
mode='asr', load_output=True, preprocess_conf=args.preprocess_conf,
preprocess_args={'train': True} # Switch the mode of preprocessing
)
load_cv = LoadInputsAndTargets(
mode='asr', load_output=True, preprocess_conf=args.preprocess_conf,
preprocess_args={'train': False} # Switch the mode of preprocessing
)
# hack to make batchsize argument as 1
# actual bathsize is included in a list
# default collate function converts numpy array to pytorch tensor
# we used an empty collate function instead which returns list
train_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(train, lambda data: converter([load_tr(data)])),
batch_size=1, num_workers=args.n_iter_processes,
shuffle=not use_sortagrad, collate_fn=lambda x: x[0])}
valid_iter = {'main': ChainerDataLoader(
dataset=TransformDataset(valid, lambda data: converter([load_cv(data)])),
batch_size=1, shuffle=False, collate_fn=lambda x: x[0],
num_workers=args.n_iter_processes)}
# Set up a trainer
updater = CustomUpdater(
model, args.grad_clip, train_iter, optimizer,
device, args.ngpu, args.grad_noise, args.accum_grad, use_apex=use_apex)
trainer = training.Trainer(
updater, (args.epochs, 'epoch'), out=args.outdir)
if use_sortagrad:
trainer.extend(ShufflingEnabler([train_iter]),
trigger=(args.sortagrad if args.sortagrad != -1 else args.epochs, 'epoch'))
# Resume from a snapshot
if args.resume:
logging.info('resumed from %s' % args.resume)
torch_resume(args.resume, trainer)
# Evaluate the model with the test dataset for each epoch
if args.save_interval_iters > 0:
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu),
trigger=(args.save_interval_iters, 'iteration'))
else:
trainer.extend(CustomEvaluator(model, valid_iter, reporter, device, args.ngpu))
# Save attention weight each epoch
if args.num_save_attention > 0 and args.mtlalpha != 1.0:
data = sorted(list(valid_json.items())[:args.num_save_attention],
key=lambda x: int(x[1]['input'][0]['shape'][1]), reverse=True)
if hasattr(model, "module"):
att_vis_fn = model.module.calculate_all_attentions
plot_class = model.module.attention_plot_class
else:
att_vis_fn = model.calculate_all_attentions
plot_class = model.attention_plot_class
att_reporter = plot_class(
att_vis_fn, data, args.outdir + "/att_ws",
converter=converter, transform=load_cv, device=device)
trainer.extend(att_reporter, trigger=(1, 'epoch'))
else:
att_reporter = None
# Make a plot for training and validation values
if args.num_encs > 1:
report_keys_loss_ctc = ['main/loss_ctc{}'.format(i + 1) for i in range(model.num_encs)] + [
'validation/main/loss_ctc{}'.format(i + 1) for i in range(model.num_encs)]
report_keys_cer_ctc = ['main/cer_ctc{}'.format(i + 1) for i in range(model.num_encs)] + [
'validation/main/cer_ctc{}'.format(i + 1) for i in range(model.num_encs)]
trainer.extend(extensions.PlotReport(['main/loss', 'validation/main/loss',
'main/loss_ctc', 'validation/main/loss_ctc',
'main/loss_att',
'validation/main/loss_att'] +
([] if args.num_encs == 1 else report_keys_loss_ctc),
'epoch', file_name='loss.png'))
trainer.extend(extensions.PlotReport(['main/acc', 'validation/main/acc'],
'epoch', file_name='acc.png'))
trainer.extend(extensions.PlotReport(
['main/cer_ctc', 'validation/main/cer_ctc'] + ([] if args.num_encs == 1 else report_keys_loss_ctc),
'epoch', file_name='cer.png'))
# Save best models
trainer.extend(snapshot_object(model, 'model.loss.best'),
trigger=training.triggers.MinValueTrigger('validation/main/loss'))
if mtl_mode != 'ctc':
trainer.extend(snapshot_object(model, 'model.acc.best'),
trigger=training.triggers.MaxValueTrigger('validation/main/acc'))
# save snapshot which contains model and optimizer states
if args.save_interval_iters > 0:
trainer.extend(torch_snapshot(filename='snapshot.iter.{.updater.iteration}'),
trigger=(args.save_interval_iters, 'iteration'))
else:
trainer.extend(torch_snapshot(), trigger=(1, 'epoch'))
# epsilon decay in the optimizer
if args.opt == 'adadelta':
if args.criterion == 'acc' and mtl_mode != 'ctc':
trainer.extend(restore_snapshot(model, args.outdir + '/model.acc.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/acc',
lambda best_value, current_value: best_value > current_value))
elif args.criterion == 'loss':
trainer.extend(restore_snapshot(model, args.outdir + '/model.loss.best', load_fn=torch_load),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
trainer.extend(adadelta_eps_decay(args.eps_decay),
trigger=CompareValueTrigger(
'validation/main/loss',
lambda best_value, current_value: best_value < current_value))
# Write a log of evaluation statistics for each epoch
trainer.extend(extensions.LogReport(trigger=(args.report_interval_iters, 'iteration')))
report_keys = ['epoch', 'iteration', 'main/loss', 'main/loss_ctc', 'main/loss_att',
'validation/main/loss', 'validation/main/loss_ctc', 'validation/main/loss_att',
'main/acc', 'validation/main/acc', 'main/cer_ctc', 'validation/main/cer_ctc',
'elapsed_time'] + ([] if args.num_encs == 1 else report_keys_cer_ctc + report_keys_loss_ctc)
if args.opt == 'adadelta':
trainer.extend(extensions.observe_value(
'eps', lambda trainer: trainer.updater.get_optimizer('main').param_groups[0]["eps"]),
trigger=(args.report_interval_iters, 'iteration'))
report_keys.append('eps')
if args.report_cer:
report_keys.append('validation/main/cer')
if args.report_wer:
report_keys.append('validation/main/wer')
trainer.extend(extensions.PrintReport(
report_keys), trigger=(args.report_interval_iters, 'iteration'))
trainer.extend(extensions.ProgressBar(update_interval=args.report_interval_iters))
set_early_stop(trainer, args)
if args.tensorboard_dir is not None and args.tensorboard_dir != "":
trainer.extend(TensorboardLogger(SummaryWriter(args.tensorboard_dir), att_reporter),
trigger=(args.report_interval_iters, "iteration"))
# Run the training
trainer.run()
check_early_stop(trainer, args.epochs)
|
36,286 | def post_json(session, url, json):
"""
Post JSON to the Forest endpoint.
"""
logger.debug(f"Sending request to {url}: {json}")
res = session.post(url, json=json)
if res.status_code >= 400:
raise parse_error(res)
return res
| def post_json(session, url, json):
"""
Post JSON to the Forest endpoint.
"""
logger.debug(f"Sending POST request to {url}: {json}")
res = session.post(url, json=json)
if res.status_code >= 400:
raise parse_error(res)
return res
|
27,862 | def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale):
candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap()
for y in outputs:
creator = y.creator_node
if creator is not None:
push_candidate(creator)
input_nodes = set(x.node for x in inputs)
ret_dict = {}
base_hooks = chainer.get_function_hooks().values()
while candidate_funcs:
func = pop_candidate()
# Collect the gradients w.r.t. the outputs
ys = [y() for y in func.outputs] # access via weak ref
gys = tuple([grads.pop(y) for y in ys])
for node, gy in six.moves.zip(ys, gys):
if node is not None:
if node in input_nodes:
ret_dict[node] = gy
if retain_grad:
y = node.get_variable_or_none()
if y is not None:
y.grad_var = gy
y._loss_scale = loss_scale
# Collect the gradients w.r.t. the inputs
input_indexes = []
x_grads = collections.OrderedDict()
for i, x in enumerate(func.inputs):
if x not in grad_required:
continue
input_indexes.append(i)
if x not in x_grads:
x_grads[x] = grads.get_as_list(x)
if not input_indexes:
continue
input_indexes = tuple(input_indexes)
# Do backward
# Call pre-backward hooks
if func._n_local_function_hooks != 0:
local_hooks = collections.OrderedDict(chainer.get_function_hooks())
local_hooks.update(func.local_function_hooks)
hooks = local_hooks.values() # avoid six for performance
else:
hooks = base_hooks
in_data = [x.data for x in func.inputs]
out_grad_data = [None if g is None else g.data for g in gys]
with cuda.get_device_from_array(*in_data):
for hook in hooks:
hook.backward_preprocess(
func, tuple(in_data), tuple(out_grad_data))
_backprop_utils.backprop_step(func, input_indexes, gys, x_grads)
# Call post-backward hooks
for hook in hooks:
hook.backward_postprocess(
func, tuple(in_data), tuple(out_grad_data))
# Update grads
for node, g in x_grads.items():
if not g: # gradient == None
continue
creator = node.creator_node
if creator is not None:
push_candidate(creator)
for x in input_nodes:
if x not in ret_dict:
ret_dict[x] = grads.pop(x)
return ret_dict
| def _backprop(outputs, inputs, grad_required, retain_grad, grads, loss_scale):
candidate_funcs, push_candidate, pop_candidate = _get_ordered_func_heap()
for y in outputs:
creator = y.creator_node
if creator is not None:
push_candidate(creator)
input_nodes = set(x.node for x in inputs)
ret_dict = {}
base_hooks = chainer.get_function_hooks().values()
while candidate_funcs:
func = pop_candidate()
# Collect the gradients w.r.t. the outputs
ys = [y() for y in func.outputs] # access via weak ref
gys = tuple([grads.pop(y) for y in ys])
for node, gy in six.moves.zip(ys, gys):
if node is not None:
if node in input_nodes:
ret_dict[node] = gy
if retain_grad:
y = node.get_variable_or_none()
if y is not None:
y.grad_var = gy
y._loss_scale = loss_scale
# Collect the gradients w.r.t. the inputs
input_indexes = []
x_grads = collections.OrderedDict()
for i, x in enumerate(func.inputs):
if x not in grad_required:
continue
input_indexes.append(i)
if x not in x_grads:
x_grads[x] = grads.get_as_list(x)
if not input_indexes:
continue
input_indexes = tuple(input_indexes)
# Do backward
# Call pre-backward hooks
if func._n_local_function_hooks != 0:
local_hooks = collections.OrderedDict(chainer.get_function_hooks())
local_hooks.update(func.local_function_hooks)
hooks = local_hooks.values() # avoid six for performance
else:
hooks = base_hooks
in_data = [x.data for x in func.inputs]
in_data = [x.array for x in func.inputs]
with cuda.get_device_from_array(*in_data):
for hook in hooks:
hook.backward_preprocess(
func, tuple(in_data), tuple(out_grad_data))
_backprop_utils.backprop_step(func, input_indexes, gys, x_grads)
# Call post-backward hooks
for hook in hooks:
hook.backward_postprocess(
func, tuple(in_data), tuple(out_grad_data))
# Update grads
for node, g in x_grads.items():
if not g: # gradient == None
continue
creator = node.creator_node
if creator is not None:
push_candidate(creator)
for x in input_nodes:
if x not in ret_dict:
ret_dict[x] = grads.pop(x)
return ret_dict
|
2,880 | def log_loss(
y_true, y_pred, *, eps="auto", normalize=True, sample_weight=None, labels=None
):
r"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of a logistic model that returns ``y_pred`` probabilities
for its training data ``y_true``.
The log loss is only defined for two or more labels.
For a single sample with true label :math:`y \in \{0,1\}` and
a probability estimate :math:`p = \operatorname{Pr}(y = 1)`, the log
loss is:
.. math::
L_{\log}(y, p) = -(y \log (p) + (1 - y) \log (1 - p))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float or "auto", default="auto" or 1e-15
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
Default value will be the epsilon of the dtype of y_pred if it is
a numpy array or 1e-15 if a non-numpy input is passed.
normalize : bool, default=True
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Log loss, aka logistic loss or cross-entropy loss.
Notes
-----
The logarithm used is the natural logarithm (base-e).
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
"""
if eps == "auto":
if "numpy" in str(type(y_pred)) and "float" in str(y_pred.dtype):
eps = np.finfo(y_pred.dtype).eps
else:
warnings.warn(
"eps set to 1e-15 as y_pred is not a numpy float array",
DataConversionWarning,
)
eps = 1e-15
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError(
"y_true contains only one label ({0}). Please "
"provide the true labels explicitly through the "
"labels argument.".format(lb.classes_[0])
)
else:
raise ValueError(
"The labels array needs to contain at least two "
"labels for log_loss, "
"got {0}.".format(lb.classes_)
)
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(
1 - transformed_labels, transformed_labels, axis=1
)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError(
"y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(
transformed_labels.shape[1], y_pred.shape[1], lb.classes_
)
)
else:
raise ValueError(
"The number of classes in labels is different "
"from that in y_pred. Classes found in "
"labels: {0}".format(lb.classes_)
)
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
| def log_loss(
y_true, y_pred, *, eps="auto", normalize=True, sample_weight=None, labels=None
):
r"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of a logistic model that returns ``y_pred`` probabilities
for its training data ``y_true``.
The log loss is only defined for two or more labels.
For a single sample with true label :math:`y \in \{0,1\}` and
a probability estimate :math:`p = \operatorname{Pr}(y = 1)`, the log
loss is:
.. math::
L_{\log}(y, p) = -(y \log (p) + (1 - y) \log (1 - p))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes) or (n_samples,)
Predicted probabilities, as returned by a classifier's
predict_proba method. If ``y_pred.shape = (n_samples,)``
the probabilities provided are assumed to be that of the
positive class. The labels in ``y_pred`` are assumed to be
ordered alphabetically, as done by
:class:`preprocessing.LabelBinarizer`.
eps : float or "auto", default="auto"
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
Default value will be the epsilon of the dtype of y_pred if it is
a numpy array or 1e-15 if a non-numpy input is passed.
normalize : bool, default=True
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
labels : array-like, default=None
If not provided, labels will be inferred from y_true. If ``labels``
is ``None`` and ``y_pred`` has shape (n_samples,) the labels are
assumed to be binary and are inferred from ``y_true``.
.. versionadded:: 0.18
Returns
-------
loss : float
Log loss, aka logistic loss or cross-entropy loss.
Notes
-----
The logarithm used is the natural logarithm (base-e).
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Examples
--------
>>> from sklearn.metrics import log_loss
>>> log_loss(["spam", "ham", "ham", "spam"],
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
"""
if eps == "auto":
if "numpy" in str(type(y_pred)) and "float" in str(y_pred.dtype):
eps = np.finfo(y_pred.dtype).eps
else:
warnings.warn(
"eps set to 1e-15 as y_pred is not a numpy float array",
DataConversionWarning,
)
eps = 1e-15
y_pred = check_array(y_pred, ensure_2d=False)
check_consistent_length(y_pred, y_true, sample_weight)
lb = LabelBinarizer()
if labels is not None:
lb.fit(labels)
else:
lb.fit(y_true)
if len(lb.classes_) == 1:
if labels is None:
raise ValueError(
"y_true contains only one label ({0}). Please "
"provide the true labels explicitly through the "
"labels argument.".format(lb.classes_[0])
)
else:
raise ValueError(
"The labels array needs to contain at least two "
"labels for log_loss, "
"got {0}.".format(lb.classes_)
)
transformed_labels = lb.transform(y_true)
if transformed_labels.shape[1] == 1:
transformed_labels = np.append(
1 - transformed_labels, transformed_labels, axis=1
)
# Clipping
y_pred = np.clip(y_pred, eps, 1 - eps)
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if y_pred.ndim == 1:
y_pred = y_pred[:, np.newaxis]
if y_pred.shape[1] == 1:
y_pred = np.append(1 - y_pred, y_pred, axis=1)
# Check if dimensions are consistent.
transformed_labels = check_array(transformed_labels)
if len(lb.classes_) != y_pred.shape[1]:
if labels is None:
raise ValueError(
"y_true and y_pred contain different number of "
"classes {0}, {1}. Please provide the true "
"labels explicitly through the labels argument. "
"Classes found in "
"y_true: {2}".format(
transformed_labels.shape[1], y_pred.shape[1], lb.classes_
)
)
else:
raise ValueError(
"The number of classes in labels is different "
"from that in y_pred. Classes found in "
"labels: {0}".format(lb.classes_)
)
# Renormalize
y_pred /= y_pred.sum(axis=1)[:, np.newaxis]
loss = -(transformed_labels * np.log(y_pred)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
|
26,659 | def _get_message_attribute(o):
if isinstance(o, bytes):
return {'DataType': 'Binary', 'BinaryValue': o}
elif isinstance(o, str):
return {'DataType': 'String', 'StringValue': o}
elif isinstance(o, (int, float)):
return {'DataType': 'Number', 'StringValue': str(o)}
elif hasattr(o, '__iter__'):
return {'DataType': 'String.Array', 'StringValue': json.dumps(o)}
else:
raise TypeError('Values in MessageAttributes must be one of bytes, str, int, float, or iterable; '
f'got {type(o)}')
| def _get_message_attribute(o):
if isinstance(o, bytes):
return {'DataType': 'Binary', 'BinaryValue': o}
if isinstance(o, str):
return {'DataType': 'String', 'StringValue': o}
elif isinstance(o, (int, float)):
return {'DataType': 'Number', 'StringValue': str(o)}
elif hasattr(o, '__iter__'):
return {'DataType': 'String.Array', 'StringValue': json.dumps(o)}
else:
raise TypeError('Values in MessageAttributes must be one of bytes, str, int, float, or iterable; '
f'got {type(o)}')
|
20,740 | def test_getMoreProjects_hasNewProjects(api_client):
api_client.hasMoreProjectsToLoad = MagicMock(return_value=True)
http_manager = MagicMock()
api_client._http = http_manager
finished_callback = MagicMock()
failed_callback = MagicMock()
api_client.getMoreProjects(finished_callback, failed_callback)
http_manager.get.assert_called_once()
| def test_getMoreProjects_hasNewProjects(api_client):
api_client.hasMoreProjectsToLoad = MagicMock(return_value = True)
http_manager = MagicMock()
api_client._http = http_manager
finished_callback = MagicMock()
failed_callback = MagicMock()
api_client.getMoreProjects(finished_callback, failed_callback)
http_manager.get.assert_called_once()
|
31,905 | def get_first_available_iam_user_attr(user_profile: IAMUserProfile, iam_attrs: List[str], mapper_out: str,
use_old_user_data: bool = True):
mapped_old_user_data: Dict = demisto.mapObject(user_profile.old_user_data, mapper_out,
IAMUserProfile.UPDATE_INCIDENT_TYPE)
user_profile_mapped_data = user_profile.map_object(mapper_out, IAMUserProfile.UPDATE_INCIDENT_TYPE)
for iam_attr in iam_attrs:
if use_old_user_data and mapped_old_user_data.get(iam_attr):
return iam_attr, mapped_old_user_data.get(iam_attr)
if user_profile_mapped_data.get(iam_attr):
return iam_attr, user_profile_mapped_data.get(iam_attr)
raise DemistoException('Could not find any of the needed attributes. Please make sure you send one of the '
f'following attributes: {iam_attrs}, or have the outgoing mapper configured to map to'
'one of the listed attributes.')
| def get_first_available_iam_user_attr(user_profile: IAMUserProfile, iam_attrs: List[str], mapper_out: str,
use_old_user_data: bool = True):
mapped_old_user_data: Dict = demisto.mapObject(user_profile.old_user_data, mapper_out,
IAMUserProfile.UPDATE_INCIDENT_TYPE)
user_profile_mapped_data = user_profile.map_object(mapper_out, IAMUserProfile.UPDATE_INCIDENT_TYPE)
for iam_attr in iam_attrs:
if use_old_user_data and mapped_old_user_data.get(iam_attr):
return iam_attr, mapped_old_user_data.get(iam_attr)
if user_profile_mapped_data.get(iam_attr):
return iam_attr, user_profile_mapped_data.get(iam_attr)
raise DemistoException(f'Your user profile argument must contain at least one attribute that is mapped into one of the following attributes in the {mapper_out} mapper: {iam_attrs}')
|
44,002 | def dot(tensor1, tensor2):
"""Returns the matrix or dot product of two tensors.
* If both tensors are 0-dimensional, elementwise multiplication
is performed and a 0-dimensional scalar returned.
* If both tensors are 1-dimensional, the dot product is returned.
* If the first array is 2-dimensional and the second array 1-dimensional,
the matrix-vector product is returned.
* If both tensors are 2-dimensional, the matrix product is returned.
* Finally, if the the first array is N-dimensional and the second array
M-dimensional, a sum product over the last dimension of the first array,
and the second-to-last dimension of the second array is returned.
Args:
tensor1 (tensor_like): input tensor
tensor2 (tensor_like): input tensor
Returns:
tensor_like: the matrix or dot product of two tensors
"""
interface = _multi_dispatch([tensor1, tensor2])
x, y = np.coerce([tensor1, tensor2], like=interface)
if interface == "torch":
if x.ndim == 0 and y.ndim == 0:
return x * y
if x.ndim <= 2 and y.ndim <= 2:
return x @ y
return np.tensordot(x, y, dims=[[-1], [-2]], like=interface)
if interface == "tensorflow":
if x.ndim == 0 and y.ndim == 0:
return x * y
if y.ndim == 1:
return np.tensordot(x, y, axes=[[-1], [0]], like=interface)
if x.ndim == 2 and y.ndim == 2:
return x @ y
return np.tensordot(x, y, axes=[[-1], [-2]], like=interface)
return np.dot(x, y, like=interface)
| def dot(tensor1, tensor2):
"""Returns the matrix or dot product of two tensors.
* If both tensors are 0-dimensional, elementwise multiplication
is performed and a 0-dimensional scalar returned.
* If both tensors are 1-dimensional, the dot product is returned.
* If the first array is 2-dimensional and the second array 1-dimensional,
the matrix-vector product is returned.
* If both tensors are 2-dimensional, the matrix product is returned.
* Finally, if the the first array is N-dimensional and the second array
M-dimensional, a sum product over the last dimension of the first array,
and the second-to-last dimension of the second array is returned.
Args:
tensor1 (tensor_like): input tensor
tensor2 (tensor_like): input tensor
Returns:
tensor_like: the matrix or dot product of two tensors
``k``-th tuple is interpreted to contain the ``k``-th entry of all indices:
"""
interface = _multi_dispatch([tensor1, tensor2])
x, y = np.coerce([tensor1, tensor2], like=interface)
if interface == "torch":
if x.ndim == 0 and y.ndim == 0:
return x * y
if x.ndim <= 2 and y.ndim <= 2:
return x @ y
return np.tensordot(x, y, dims=[[-1], [-2]], like=interface)
if interface == "tensorflow":
if x.ndim == 0 and y.ndim == 0:
return x * y
if y.ndim == 1:
return np.tensordot(x, y, axes=[[-1], [0]], like=interface)
if x.ndim == 2 and y.ndim == 2:
return x @ y
return np.tensordot(x, y, axes=[[-1], [-2]], like=interface)
return np.dot(x, y, like=interface)
|
55,419 | def _get_experiment_id_from_env():
experiment_name = env.get_env(_EXPERIMENT_NAME_ENV_VAR)
experiment_id = env.get_env(_EXPERIMENT_ID_ENV_VAR)
if experiment_name is not None:
exp = MlflowClient().get_experiment_by_name(experiment_name)
if exp:
if experiment_id and experiment_id != exp.experiment_id:
raise MlflowException(
message=f"The provided environment variable {_EXPERIMENT_ID_ENV_VAR} "
f"`{experiment_id}` does not match the experiment id "
f"`{exp.experiment_id}` for experiment name `{experiment_name}`",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return exp.experiment_id
else:
raise MlflowException(
message=f"The provided environment variable {_EXPERIMENT_NAME_ENV_VAR} "
f"`{experiment_name}` does not exist. Create an experiment with this name "
f"by using `mlflow.create_experiment(name='{experiment_name}')`",
error_code=INVALID_PARAMETER_VALUE,
)
if experiment_id is not None:
try:
exp = MlflowClient().get_experiment(experiment_id)
return exp.experiment_id
except MlflowException as exc:
raise MlflowException(
message=f"The provided environment variable {_EXPERIMENT_ID_ENV_VAR} "
f"`{experiment_id}` does not exist in the tracking server. Provide a valid "
f"experiment_id.\nTracker exception: {exc.message}",
error_code=INVALID_PARAMETER_VALUE,
)
| def _get_experiment_id_from_env():
experiment_name = env.get_env(_EXPERIMENT_NAME_ENV_VAR)
experiment_id = env.get_env(_EXPERIMENT_ID_ENV_VAR)
if experiment_name is not None:
exp = MlflowClient().get_experiment_by_name(experiment_name)
if exp:
if experiment_id and experiment_id != exp.experiment_id:
raise MlflowException(
message=f"The provided environment variable {_EXPERIMENT_ID_ENV_VAR} "
f"`{experiment_id}` does not match the experiment id "
f"`{exp.experiment_id}` for experiment name `{experiment_name}`",
error_code=INVALID_PARAMETER_VALUE,
)
else:
return exp.experiment_id
else:
raise MlflowException(
message=f"The provided environment variable {_EXPERIMENT_NAME_ENV_VAR} "
f"`{experiment_name}` does not exist. Create an experiment with this name "
f"by using `mlflow.create_experiment(name='{experiment_name}')`",
error_code=INVALID_PARAMETER_VALUE,
)
if experiment_id is not None:
try:
exp = MlflowClient().get_experiment(experiment_id)
return exp.experiment_id
except MlflowException as exc:
raise MlflowException(
message=f"The provided environment variable {_EXPERIMENT_ID_ENV_VAR} "
f"`{experiment_id}` does not exist in the tracking server. Provide a valid "
f"experiment_id.",
error_code=INVALID_PARAMETER_VALUE,
) from exc
|
50,483 | def collect_coverage_from_gcov(covdata, options, logger):
datafiles = set()
find_files = find_datafiles
process_file = process_datafile
if options.gcov_files:
find_files = find_existing_gcov_files
process_file = process_existing_gcov_file
# Get data files
if not options.search_paths:
options.search_paths = [options.root]
if options.objdir is not None:
options.search_paths.append(options.objdir)
if options.use_canonical_paths:
normalized_search_paths = [os.path.realpath(search_path) for search_path in options.search_paths]
for normalized_search_path, search_path in zip(normalized_search_paths, options.search_paths):
if normalized_search_path != search_path:
logger.msg(f"search_path {search_path} normalized to {normalized_search_path}.")
options.search_paths = normalized_search_paths
for search_path in options.search_paths:
datafiles.update(find_files(search_path, logger, options.exclude_dirs))
# Get coverage data
with Workers(options.gcov_parallel, lambda: {
'covdata': dict(),
'toerase': set(),
'options': options}) as pool:
logger.verbose_msg("Pool started with {} threads", pool.size())
for file_ in datafiles:
pool.add(process_file, file_)
contexts = pool.wait()
toerase = set()
for context in contexts:
for fname, cov in context['covdata'].items():
if fname not in covdata:
covdata[fname] = cov
else:
covdata[fname].update(cov)
toerase.update(context['toerase'])
for filepath in toerase:
if os.path.exists(filepath):
os.remove(filepath)
| def collect_coverage_from_gcov(covdata, options, logger):
datafiles = set()
find_files = find_datafiles
process_file = process_datafile
if options.gcov_files:
find_files = find_existing_gcov_files
process_file = process_existing_gcov_file
# Get data files
if not options.search_paths:
options.search_paths = [options.root]
if options.objdir is not None:
options.search_paths.append(options.objdir)
normalized_search_paths = []
for search_path in options.search_paths:
normalized_search_path = os.path.realpath(search_path)
if normalized_search_path != search_path:
logger.msg(f"search_path {search_path} normalized to {normalized_search_path}.")
normalized_search_paths.push(normalized_search_path)
options.search_paths = normalized_search_paths
for search_path in options.search_paths:
datafiles.update(find_files(search_path, logger, options.exclude_dirs))
# Get coverage data
with Workers(options.gcov_parallel, lambda: {
'covdata': dict(),
'toerase': set(),
'options': options}) as pool:
logger.verbose_msg("Pool started with {} threads", pool.size())
for file_ in datafiles:
pool.add(process_file, file_)
contexts = pool.wait()
toerase = set()
for context in contexts:
for fname, cov in context['covdata'].items():
if fname not in covdata:
covdata[fname] = cov
else:
covdata[fname].update(cov)
toerase.update(context['toerase'])
for filepath in toerase:
if os.path.exists(filepath):
os.remove(filepath)
|
13,415 | def test_12_a_system_dataset_can_be_moved_to_second_pool_root_dataset(request):
depends(request, ["second_pool"])
results = PUT("/systemdataset/", {'pool': 'second_pool'})
assert results.status_code == 200, results.text
assert isinstance(results.json(), int), results.text
job_status = wait_on_job(results.json(), 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'second_pool', results.text
assert results.json()['basename'] == 'second_pool/.system', results.text
| def test_12_move_sysds_to_second_pool(request):
depends(request, ["second_pool"])
results = PUT("/systemdataset/", {'pool': 'second_pool'})
assert results.status_code == 200, results.text
assert isinstance(results.json(), int), results.text
job_status = wait_on_job(results.json(), 120)
assert job_status['state'] == 'SUCCESS', str(job_status['results'])
results = GET("/systemdataset/")
assert results.status_code == 200, results.text
assert isinstance(results.json(), dict), results.text
assert results.json()['pool'] == 'second_pool', results.text
assert results.json()['basename'] == 'second_pool/.system', results.text
|
59,151 | def _regress_out_chunk(data):
# data is a tuple containing the selected columns from adata.X
# and the regressors dataFrame
data_chunk = data[0]
regressors = data[1]
variable_is_categorical = data[2]
responses_chunk_list = []
import statsmodels.api as sm
from statsmodels.tools.sm_exceptions import PerfectSeparationError
for col_index in range(data_chunk.shape[1]):
# if all values are identical, the statsmodel.api.GLM throws an error; but then no regression is necessary anyways...
if (data_chunk[:, col_index] != data_chunk[0, col_index]).sum() == 0:
responses_chunk_list.append(data_chunk[:, col_index])
continue
if variable_is_categorical:
regres = np.c_[np.ones(regressors.shape[0]), regressors[:, col_index]]
else:
regres = regressors
try:
result = sm.GLM(data_chunk[:, col_index], regres, family=sm.families.Gaussian()).fit()
new_column = result.resid_response
except PerfectSeparationError: # this emulates R's behavior
logg.warning('Encountered PerfectSeparationError, setting to 0 as in R.')
new_column = np.zeros(data_chunk.shape[0])
responses_chunk_list.append(new_column)
return np.vstack(responses_chunk_list)
| def _regress_out_chunk(data):
# data is a tuple containing the selected columns from adata.X
# and the regressors dataFrame
data_chunk = data[0]
regressors = data[1]
variable_is_categorical = data[2]
responses_chunk_list = []
import statsmodels.api as sm
from statsmodels.tools.sm_exceptions import PerfectSeparationError
for col_index in range(data_chunk.shape[1]):
# if all values are identical, the statsmodel.api.GLM throws an error;
# but then no regression is necessary anyways...
if (data_chunk[:, col_index] != data_chunk[0, col_index]).sum() == 0:
responses_chunk_list.append(data_chunk[:, col_index])
continue
if variable_is_categorical:
regres = np.c_[np.ones(regressors.shape[0]), regressors[:, col_index]]
else:
regres = regressors
try:
result = sm.GLM(data_chunk[:, col_index], regres, family=sm.families.Gaussian()).fit()
new_column = result.resid_response
except PerfectSeparationError: # this emulates R's behavior
logg.warning('Encountered PerfectSeparationError, setting to 0 as in R.')
new_column = np.zeros(data_chunk.shape[0])
responses_chunk_list.append(new_column)
return np.vstack(responses_chunk_list)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.