id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
3,316 |
def _get_constrained_date_range(params, allow_minute_resolution=False):
interval = parse_stats_period(params.get("interval", "1h"))
interval = int(3600 if interval is None else interval.total_seconds())
smallest_interval = ONE_MINUTE if allow_minute_resolution else ONE_HOUR
if interval % smallest_interval != 0 or interval < smallest_interval:
interval_str = "one minute" if allow_minute_resolution else "one hour"
raise InvalidParams(
f"The interval has to be a multiple of the minimum interval of {interval_str}."
)
if interval > ONE_DAY:
raise InvalidParams("The interval has to be less than one day.")
if ONE_DAY % interval != 0:
raise InvalidParams("The interval should divide one day without a remainder.")
using_minute_resolution = interval % ONE_HOUR != 0
start, end = get_date_range_from_params(params)
now = datetime.now(tz=pytz.utc)
# if `end` is explicitly given, we add a second to it, so it is treated as
# inclusive. the rounding logic down below will take care of the rest.
if params.get("end"):
end += timedelta(seconds=1)
date_range = end - start
# round the range up to a multiple of the interval.
# the minimum is 1h so the "totals" will not go out of sync, as they will
# use the materialized storage due to no grouping on the `started` column.
# NOTE: we can remove the difference between `interval` / `rounding_interval`
# as soon as snuba can provide us with grouped totals in the same query
# as the timeseries (using `WITH ROLLUP` in clickhouse)
rounding_interval = int(math.ceil(interval / ONE_HOUR) * ONE_HOUR)
date_range = timedelta(
seconds=int(rounding_interval * math.ceil(date_range.total_seconds() / rounding_interval))
)
if using_minute_resolution:
if date_range.total_seconds() > 6 * ONE_HOUR:
raise InvalidParams(
"The time-range when using one-minute resolution intervals is restricted to 6 hours."
)
if (now - start).total_seconds() > 30 * ONE_DAY:
raise InvalidParams(
"The time-range when using one-minute resolution intervals is restricted to the last 30 days."
)
if date_range.total_seconds() / interval > MAX_POINTS:
raise InvalidParams(
"Your interval and date range would create too many results. "
"Use a larger interval, or a smaller date range."
)
end_ts = int(rounding_interval * math.ceil(to_timestamp(end) / rounding_interval))
end = to_datetime(end_ts)
# when expanding the rounding interval, we would adjust the end time too far
# to the future, in which case the start time would not actually contain our
# desired date range. adjust for this by extend the time by another interval.
# for example, when "45m" means the range from 08:49:00-09:34:00, our rounding
# has to go from 08:00:00 to 10:00:00.
if rounding_interval > interval and (end - date_range) > start:
date_range += timedelta(seconds=rounding_interval)
start = end - date_range
# snuba <-> sentry has a 5 minute cache for *exact* queries, which these
# are because of the way we do our rounding. For that reason we round the end
# of "realtime" queries to one minute to get a one-minute cache instead.
if end > now:
end = to_datetime(ONE_MINUTE * (math.floor(to_timestamp(now) / ONE_MINUTE) + 1))
return start, end, interval
|
def _get_constrained_date_range(params, allow_minute_resolution=False):
interval = parse_stats_period(params.get("interval", "1h"))
interval = int(3600 if interval is None else interval.total_seconds())
smallest_interval = ONE_MINUTE if allow_minute_resolution else ONE_HOUR
if interval % smallest_interval != 0 or interval < smallest_interval:
interval_str = "one minute" if allow_minute_resolution else "one hour"
raise InvalidParams(
f"The interval has to be a multiple of the minimum interval of {interval_str}."
)
if interval > ONE_DAY:
raise InvalidParams("The interval has to be less than one day.")
if ONE_DAY % interval != 0:
raise InvalidParams("The interval should divide one day without a remainder.")
using_minute_resolution = interval % ONE_HOUR != 0
start, end = get_date_range_from_params(params)
now = datetime.now(tz=pytz.utc)
# if `end` is explicitly given, we add a second to it, so it is treated as
# inclusive. the rounding logic down below will take care of the rest.
if params.get("end"):
end += timedelta(seconds=1)
date_range = end - start
# round the range up to a multiple of the interval.
# the minimum is 1h so the "totals" will not go out of sync, as they will
# use the materialized storage due to no grouping on the `started` column.
# NOTE: we can remove the difference between `interval` / `rounding_interval`
# as soon as snuba can provide us with grouped totals in the same query
# as the timeseries (using `WITH ROLLUP` in clickhouse)
rounding_interval = int(math.ceil(interval / ONE_HOUR) * ONE_HOUR)
date_range = timedelta(
seconds=int(rounding_interval * math.ceil(date_range.total_seconds() / rounding_interval))
)
if using_minute_resolution:
if date_range.total_seconds() > 6 * ONE_HOUR:
raise InvalidParams(
"The time-range when using one-minute resolution intervals is restricted to 6 hours."
)
if (now - start).total_seconds() > 30 * ONE_DAY:
raise InvalidParams(
"The time-range when using one-minute resolution intervals is restricted to the last 30 days."
)
if date_range.total_seconds() / interval > MAX_POINTS:
raise InvalidParams(
"Your interval and date range would create too many results. "
"Use a larger interval, or a smaller date range."
)
end_ts = int(rounding_interval * math.ceil(to_timestamp(end) / rounding_interval))
end = to_datetime(end_ts)
# when expanding the rounding interval, we would adjust the end time too far
# to the future, in which case the start time would not actually contain our
# desired date range. adjust for this by extend the time by another interval.
# for example, when "45m" means the range from 08:49:00-09:34:00, our rounding
# has to go from 08:00:00 to 10:00:00.
if rounding_interval > interval and (end - date_range) > start:
date_range += timedelta(seconds=rounding_interval)
start = end - date_range
# snuba <-> sentry has a 5 minute cache for *exact* queries, which these
# are because of the way we do our rounding. For that reason we round the end
# of "realtime" queries to one minute into the future to get a one-minute cache instead.
if end > now:
end = to_datetime(ONE_MINUTE * (math.floor(to_timestamp(now) / ONE_MINUTE) + 1))
return start, end, interval
|
8,388 |
def _chi_sqaure_for_templates(observed_spectrum, template_spectrum, resample_method):
"""
Resample the template spectrum to match the wavelength of the observed
spectrum. Then, calculate chi2 on the flux of the two spectra.
Parameters
----------
observed_spectrum : :class:`~specutils.Spectrum1D`
The observed spectrum.
template_spectrum : :class:`~specutils.Spectrum1D`
The template spectrum, which will be resampled to match the wavelength
of the observed spectrum.
Returns
-------
normalized_template_spectrum : :class:`~specutils.Spectrum1D`
The normalized spectrum template.
chi2 : `float`
The chi2 of the flux of the observed spectrum and the flux of the
normalized template spectrum.
"""
# Resample template
if _resample(resample_method) != 0:
fluxc_resample = _resample(resample_method)
template_obswavelength = fluxc_resample(template_spectrum,
observed_spectrum.wavelength)
# Normalize spectra
normalization = _normalize_for_template_matching(observed_spectrum,
template_obswavelength)
# Numerator
num_right = normalization * template_obswavelength.flux
num = observed_spectrum.flux - num_right
# Denominator
denom = observed_spectrum.uncertainty.array * observed_spectrum.flux.unit
# Get chi square
result = (num/denom)**2
chi2 = np.sum(result.value)
# Create normalized template spectrum, which will be returned with
# corresponding chi2
normalized_template_spectrum = Spectrum1D(
spectral_axis=template_spectrum.spectral_axis,
flux=template_spectrum.flux*normalization)
return normalized_template_spectrum, chi2
|
def _chi_square_for_templates(observed_spectrum, template_spectrum, resample_method):
"""
Resample the template spectrum to match the wavelength of the observed
spectrum. Then, calculate chi2 on the flux of the two spectra.
Parameters
----------
observed_spectrum : :class:`~specutils.Spectrum1D`
The observed spectrum.
template_spectrum : :class:`~specutils.Spectrum1D`
The template spectrum, which will be resampled to match the wavelength
of the observed spectrum.
Returns
-------
normalized_template_spectrum : :class:`~specutils.Spectrum1D`
The normalized spectrum template.
chi2 : `float`
The chi2 of the flux of the observed spectrum and the flux of the
normalized template spectrum.
"""
# Resample template
if _resample(resample_method) != 0:
fluxc_resample = _resample(resample_method)
template_obswavelength = fluxc_resample(template_spectrum,
observed_spectrum.wavelength)
# Normalize spectra
normalization = _normalize_for_template_matching(observed_spectrum,
template_obswavelength)
# Numerator
num_right = normalization * template_obswavelength.flux
num = observed_spectrum.flux - num_right
# Denominator
denom = observed_spectrum.uncertainty.array * observed_spectrum.flux.unit
# Get chi square
result = (num/denom)**2
chi2 = np.sum(result.value)
# Create normalized template spectrum, which will be returned with
# corresponding chi2
normalized_template_spectrum = Spectrum1D(
spectral_axis=template_spectrum.spectral_axis,
flux=template_spectrum.flux*normalization)
return normalized_template_spectrum, chi2
|
13,566 |
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
def eigs(A, E=None, k=3, which='LM', b=None, l=None, maxiter=1000, tol=1e-13):
"""Approximate a few eigenvalues of an |Operator|.
Computes `k` eigenvalues `w[i]` with corresponding eigenvectors `v[i]` which solve
the eigenvalue problem
.. math::
A v[i] = w[i] v[i]
or the generalized eigenvalue problem
.. math::
A v[i] = w[i] E v[i]
if `E` is not `None`.
The implementation is based on Algorithm 4.2 in [RL95]_.
Parameters
----------
A
The real |Operator| for which the eigenvalues are to be computed.
E
The |Operator| which defines the generalized eigenvalue problem.
k
The number of eigenvalues and eigenvectors which are to be computed.
which
A string specifying which `k` eigenvalues and eigenvectors to compute:
- `'LM'`: select eigenvalues with largest |v[i]|
- `'SM'`: select eigenvalues with smallest |v[i]|
- `'LR'`: select eigenvalues with largest Re(v[i])
- `'SR'`: select eigenvalues with smallest Re(v[i])
- `'LI'`: select eigenvalues with largest Im(v[i])
- `'SI'`: select eigenvalues with smallest Im(v[i])
b
Initial vector for Arnoldi iteration. Default is a random vector.
l
The size of the Arnoldi factorization. Default is `min(n - 1, max(2*k + 1, 20))`.
maxiter
The maximum number of iterations.
tol
The relative error tolerance for the Ritz estimates.
Returns
-------
w
A |NumPy array| which contains the computed eigenvalues.
v
A |VectorArray| which contains the computed eigenvectors.
"""
n = A.source.dim
if l is None:
l = np.min((n - 1, np.max((2 * k + 1, 20))))
if E is None:
E = IdentityOperator(A.source)
assert A.source == A.range
assert E.source == A.source
assert E.range == A.source
assert k < n
assert l > k
if b is None:
b = A.source.random()
V, H, f = arnoldi(A, E, k, b)
k0 = k
i = 0
while True:
i = i + 1
V, H, f = extend_arnoldi(A, E, V, H, f, l - k)
ew, ev = spla.eig(H)
# truncate small imaginary parts
ew.imag[np.abs(ew.imag) / np.abs(ew) < 1e-12] = 0
if which == 'LM':
idx = np.argsort(-np.abs(ew))
elif which == 'SM':
idx = np.argsort(np.abs(ew))
elif which == 'LR':
idx = np.argsort(-np.real(ew))
elif which == 'SR':
idx = np.argsort(np.real(ew))
elif which == 'LI':
idx = np.argsort(-np.abs(np.imag(ew)))
elif which == 'SI':
idx = np.argsort(np.abs(np.imag(ew)))
k = k0
ews = ew[idx]
evs = ev[:, idx]
rres = f.l2_norm()[0] * np.abs(evs[l - 1]) / np.abs(ews)
# increase k by one in order to keep complex conjugate pairs together
if ews[k - 1].imag != 0 and ews[k - 1].imag + ews[k].imag < 1e-12:
k = k + 1
if np.all(rres[:k] <= tol) or i >= maxiter:
break
# increase k in order to prevent stagnation
k = np.min((l - 1, k + np.min((np.count_nonzero(rres[:k] <= tol), (l - k) // 2))))
# sort shifts for QR iteration based on their residual
shifts = ews[k:l]
srres = rres[k:l]
idx = np.argsort(-srres)
srres = srres[idx]
shifts = shifts[idx]
# don't use converged unwanted ritzvalues as shifts
shifts = np.delete(shifts, np.where(srres == 0))
k = k + np.count_nonzero(srres == 0)
if shifts[0].imag != 0 and shifts[0].imag + ews[1].imag >= 1e-12:
shifts = shifts[1:]
k = k + 1
H, Qs = QR_iteration(H, shifts)
V = V.lincomb(Qs.T)
f = V[k] * H[k, k - 1] + f * Qs[l - 1, k - 1]
V = V[:k]
H = H[:k, :k]
return ews[:k0], V.lincomb(evs[:, :k0].T)
|
30,869 |
def query_malops_command():
total_result_limit = demisto.getArg('totalResultLimit')
per_group_limit = demisto.getArg('perGroupLimit')
template_context = demisto.getArg('templateContext')
filters = json.loads(demisto.getArg('filters')) if demisto.getArg('filters') else []
within_last_days = demisto.getArg('withinLastDays')
guid_list = argToList(demisto.getArg('malopGuid'))
if within_last_days:
current_timestamp = time.time()
current_datetime = datetime.fromtimestamp(current_timestamp)
within_last_days_datetime = current_datetime - timedelta(days=int(within_last_days))
within_last_days_timestamp = (time.mktime(
within_last_days_datetime.timetuple()) + within_last_days_datetime.microsecond / 1E6) # Converting datetime to time
within_last_days_timestamp *= 1000
filters.append({
'facetName': 'malopLastUpdateTime',
'values': [within_last_days_timestamp],
'filterType': 'GreaterThan'
})
malop_process_type, malop_loggon_session_type = query_malops(total_result_limit, per_group_limit,
template_context, filters, guid_list=guid_list)
outputs = []
for response in (malop_process_type, malop_loggon_session_type):
data = response.get('data', {})
malops_map = dict_safe_get(data, ['resultIdToElementDataMap'], {}, dict)
if not data or not malops_map:
continue
for guid, malop in malops_map.iteritems():
simple_values = dict_safe_get(malop, ['simpleValues'], {}, dict)
management_status = dict_safe_get(simple_values, ['managementStatus', 'values', 0], u'', unicode)
if management_status == 'CLOSED':
continue
creation_time = translate_timestamp(dict_safe_get(simple_values, ['creationTime', 'values', 0]))
malop_last_update_time = translate_timestamp(dict_safe_get(simple_values, ['malopLastUpdateTime', 'values', 0]))
raw_decision_failure = dict_safe_get(simple_values, ['decisionFeature', 'values', 0], u'', unicode)
decision_failure = raw_decision_failure.replace('Process.', '')
raw_suspects = dict_safe_get(malop, ['elementValues', 'suspects'], {}, dict)
suspects_string = ''
if raw_suspects:
suspects = dict_safe_get(raw_suspects, ['elementValues', 0], {}, dict)
suspects_string = '{}: {}'.format(suspects.get('elementType'), suspects.get('name'))
affected_machines = []
elementValues = dict_safe_get(malop, ['elementValues', 'affectedMachines', 'elementValues'], [], list)
for machine in elementValues:
if not isinstance(machine, dict):
raise ValueError("Cybereason raw response is not valid, machine in elementValues is not dict")
affected_machines.append(machine.get('name', ''))
involved_hashes = [] # type: List[str]
cause_elements_amount = dict_safe_get(simple_values, ['rootCauseElementHashes', 'totalValues'])
if cause_elements_amount != 0:
involved_hashes.append(cause_elements_amount)
malop_output = {
'GUID': guid,
'Link': SERVER + '/#/malop/' + guid,
'CreationTime': creation_time,
'DecisionFailure': re.sub(r'\([^)]*\)', '', decision_failure),
'Suspects': suspects_string,
'LastUpdateTime': malop_last_update_time,
'Status': management_status,
'AffectedMachine': affected_machines,
'InvolvedHash': involved_hashes
}
outputs.append(malop_output)
ec = {
'Cybereason.Malops(val.GUID && val.GUID === obj.GUID)': outputs
}
demisto.results({
'Type': entryTypes.get('note'),
'Contents': data,
'ContentsFormat': formats.get('json'),
'ReadableContentsFormat': formats.get('markdown'),
'HumanReadable': tableToMarkdown('Cybereason Malops',
outputs,
['GUID', 'Link', 'CreationTime', 'Status',
'LastUpdateTime', 'DecisionFailure', 'Suspects',
'AffectedMachine', 'InvolvedHash']) if outputs else 'No malops found',
'EntryContext': ec
})
|
def query_malops_command():
total_result_limit = demisto.getArg('totalResultLimit')
per_group_limit = demisto.getArg('perGroupLimit')
template_context = demisto.getArg('templateContext')
filters = json.loads(demisto.getArg('filters')) if demisto.getArg('filters') else []
within_last_days = demisto.getArg('withinLastDays')
guid_list = argToList(demisto.getArg('malopGuid'))
if within_last_days:
current_timestamp = time.time()
current_datetime = datetime.fromtimestamp(current_timestamp)
within_last_days_datetime = current_datetime - timedelta(days=int(within_last_days))
within_last_days_timestamp = (time.mktime(
within_last_days_datetime.timetuple()) + within_last_days_datetime.microsecond / 1E6) # Converting datetime to time
within_last_days_timestamp *= 1000
filters.append({
'facetName': 'malopLastUpdateTime',
'values': [within_last_days_timestamp],
'filterType': 'GreaterThan'
})
malop_process_type, malop_loggon_session_type = query_malops(total_result_limit, per_group_limit,
template_context, filters, guid_list=guid_list)
outputs = []
for response in (malop_process_type, malop_loggon_session_type):
data = response.get('data', {})
malops_map = dict_safe_get(data, ['resultIdToElementDataMap'], {}, dict)
if not data or not malops_map:
continue
for guid, malop in malops_map.iteritems():
simple_values = dict_safe_get(malop, ['simpleValues'], {}, dict)
management_status = dict_safe_get(simple_values, ['managementStatus', 'values', 0], u'', unicode)
if management_status == 'CLOSED':
continue
creation_time = translate_timestamp(dict_safe_get(simple_values, ['creationTime', 'values', 0]))
malop_last_update_time = translate_timestamp(dict_safe_get(simple_values, ['malopLastUpdateTime', 'values', 0]))
raw_decision_failure = dict_safe_get(simple_values, ['decisionFeature', 'values', 0], u'', unicode)
decision_failure = raw_decision_failure.replace('Process.', '')
raw_suspects = dict_safe_get(malop, ['elementValues', 'suspects'], {}, dict)
suspects_string = ''
if raw_suspects:
suspects = dict_safe_get(raw_suspects, ['elementValues', 0], {}, dict)
suspects_string = '{}: {}'.format(suspects.get('elementType'), suspects.get('name'))
affected_machines = []
elementValues = dict_safe_get(malop, ['elementValues', 'affectedMachines', 'elementValues'], [], list)
for machine in elementValues:
if not isinstance(machine, dict):
raise ValueError("Cybereason raw response is not valid, machine in elementValues is not a dict")
affected_machines.append(machine.get('name', ''))
involved_hashes = [] # type: List[str]
cause_elements_amount = dict_safe_get(simple_values, ['rootCauseElementHashes', 'totalValues'])
if cause_elements_amount != 0:
involved_hashes.append(cause_elements_amount)
malop_output = {
'GUID': guid,
'Link': SERVER + '/#/malop/' + guid,
'CreationTime': creation_time,
'DecisionFailure': re.sub(r'\([^)]*\)', '', decision_failure),
'Suspects': suspects_string,
'LastUpdateTime': malop_last_update_time,
'Status': management_status,
'AffectedMachine': affected_machines,
'InvolvedHash': involved_hashes
}
outputs.append(malop_output)
ec = {
'Cybereason.Malops(val.GUID && val.GUID === obj.GUID)': outputs
}
demisto.results({
'Type': entryTypes.get('note'),
'Contents': data,
'ContentsFormat': formats.get('json'),
'ReadableContentsFormat': formats.get('markdown'),
'HumanReadable': tableToMarkdown('Cybereason Malops',
outputs,
['GUID', 'Link', 'CreationTime', 'Status',
'LastUpdateTime', 'DecisionFailure', 'Suspects',
'AffectedMachine', 'InvolvedHash']) if outputs else 'No malops found',
'EntryContext': ec
})
|
46,534 |
def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
current_epoch = spec.get_current_epoch(state)
revealed_index = spec.get_active_validator_indices(state, current_epoch)[-1]
masker_index = spec.get_active_validator_indices(state, current_epoch)[0]
if epoch is None:
epoch = current_epoch + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING
# Generate the secret that is being revealed
reveal = bls_sign(
message_hash=spec.hash_tree_root(epoch),
privkey=privkeys[revealed_index],
domain=spec.get_domain(
state=state,
domain_type=spec.DOMAIN_RANDAO,
message_epoch=epoch,
),
)
# Generate the mask (any random 32 bytes will do)
mask = reveal[:32]
# Generate masker's signature on the mask
masker_signature = bls_sign(
message_hash=mask,
privkey=privkeys[masker_index],
domain=spec.get_domain(
state=state,
domain_type=spec.DOMAIN_RANDAO,
message_epoch=epoch,
),
)
masked_reveal = bls_aggregate_signatures([reveal, masker_signature])
return spec.EarlyDerivedSecretReveal(
revealed_index=revealed_index,
epoch=epoch,
reveal=masked_reveal,
masker_index=masker_index,
mask=mask,
)
|
def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
current_epoch = spec.get_current_epoch(state)
revealed_index = spec.get_active_validator_indices(state, current_epoch)[-1]
masker_index = spec.get_active_validator_indices(state, current_epoch)[0]
if epoch is None:
epoch = current_epoch + spec.CUSTODY_PERIOD_TO_RANDAO_PADDING
# Generate the secret that is being revealed
reveal = bls_sign(
message_hash=spec.hash_tree_root(epoch),
privkey=privkeys[revealed_index],
domain=spec.get_domain(
state=state,
domain_type=spec.DOMAIN_RANDAO,
message_epoch=epoch,
),
)
# Generate the mask (any random 32 bytes will do)
mask = hash(reveal)
# Generate masker's signature on the mask
masker_signature = bls_sign(
message_hash=mask,
privkey=privkeys[masker_index],
domain=spec.get_domain(
state=state,
domain_type=spec.DOMAIN_RANDAO,
message_epoch=epoch,
),
)
masked_reveal = bls_aggregate_signatures([reveal, masker_signature])
return spec.EarlyDerivedSecretReveal(
revealed_index=revealed_index,
epoch=epoch,
reveal=masked_reveal,
masker_index=masker_index,
mask=mask,
)
|
5,420 |
def test__session_profile(aws_profile): # pylint: disable=no-self-use
"""
_session instantiates boto3.Session with the configured profile_name
"""
with patch.object(aws_kms, "_cfg", lambda k: aws_profile):
with patch("boto3.Session") as session:
aws_kms._session()
session.assert_called_with(profile_name=aws_profile)
|
def test__session_profile(aws_profile):
"""
_session instantiates boto3.Session with the configured profile_name
"""
with patch.object(aws_kms, "_cfg", lambda k: aws_profile):
with patch("boto3.Session") as session:
aws_kms._session()
session.assert_called_with(profile_name=aws_profile)
|
57,135 |
def in_terminal_state(job_state: str) -> bool:
"""Returns whether the job run has reached a terminal state and is no
longer executing.
Returns:
bool. Whether the job has reached a terminal state.
"""
return job_state in (
beam_job_models.BeamJobState.CANCELLED.value,
beam_job_models.BeamJobState.DRAINED.value,
beam_job_models.BeamJobState.UPDATED.value,
beam_job_models.BeamJobState.DONE.value,
beam_job_models.BeamJobState.FAILED.value,
)
|
def in_terminal_state(job_state: str) -> bool:
"""Returns whether the job state is a terminal state, meaning that the jobs is
longer executing.
Returns:
bool. Whether the job has reached a terminal state.
"""
return job_state in (
beam_job_models.BeamJobState.CANCELLED.value,
beam_job_models.BeamJobState.DRAINED.value,
beam_job_models.BeamJobState.UPDATED.value,
beam_job_models.BeamJobState.DONE.value,
beam_job_models.BeamJobState.FAILED.value,
)
|
24,427 |
def load_base_check(req_file, dependencies, errors, check_name=None):
for i, line in enumerate(stream_file_lines(req_file)):
line = line.strip()
if line.startswith('CHECKS_BASE_REQ'):
try:
dep = line.split(' = ')[1]
req = Requirement(dep.strip("'").strip('"'))
except (IndexError, InvalidRequirement) as e:
errors.append(f'File `{req_file}` has an invalid base check dependency: `{line}`\n{e}')
return
name = req.name.lower().replace('_', '-')
dependency = dependencies[name][req.specifier]
dependency.append(DependencyDefinition(name, req, req_file, i, check_name))
return
# no `CHECKS_BASE_REQ` found in setup.py file ..
errors.append(f'File `{req_file}` missing base check dependency `CHECKS_BASE_REQ`')
|
def load_base_check(req_file, dependencies, errors, check_name=None):
for i, line in enumerate(stream_file_lines(req_file)):
line = line.strip()
if line.startswith('CHECKS_BASE_REQ'):
try:
dep = line.split(' = ')[1]
req = Requirement(dep.strip("'\""))
except (IndexError, InvalidRequirement) as e:
errors.append(f'File `{req_file}` has an invalid base check dependency: `{line}`\n{e}')
return
name = req.name.lower().replace('_', '-')
dependency = dependencies[name][req.specifier]
dependency.append(DependencyDefinition(name, req, req_file, i, check_name))
return
# no `CHECKS_BASE_REQ` found in setup.py file ..
errors.append(f'File `{req_file}` missing base check dependency `CHECKS_BASE_REQ`')
|
34,540 |
def _subsample_array(
arr: List[Any],
max_values: int,
can_modify_incoming_array: bool = True,
rand: Optional["Random"] = None,
) -> List[Any]:
"""Shuffles the array and returns `max_values` number of elements."""
import random
if not can_modify_incoming_array:
arr = arr[:]
if rand is not None:
rand.shuffle(arr)
else:
random.shuffle(arr)
return arr[:max_values]
|
def _subsample_array(
arr: List[Any],
max_values: int,
can_modify_incoming_array: bool = True,
rand: Optional[random.Random] = None,
) -> List[Any]:
"""Shuffles the array and returns `max_values` number of elements."""
import random
if not can_modify_incoming_array:
arr = arr[:]
if rand is not None:
rand.shuffle(arr)
else:
random.shuffle(arr)
return arr[:max_values]
|
54,503 |
def test_plot_parallel_coordinate_unique_hyper_param() -> None:
# Test case when one unique value is suggested during the optimization.
study_categorical_params = create_study()
study_categorical_params.add_trial(
create_trial(
value=0.0,
params={"category_a": "preferred", "param_b": 30},
distributions={
"category_a": CategoricalDistribution(("preferred", "opt")),
"param_b": LogUniformDistribution(1, 1000),
},
)
)
# both hyperparameters contain unique values
figure = plot_parallel_coordinate(study_categorical_params)
assert len(figure.get_lines()) == 0
study_categorical_params.add_trial(
create_trial(
value=2.0,
params={"category_a": "preferred", "param_b": 20},
distributions={
"category_a": CategoricalDistribution(("preferred", "opt")),
"param_b": LogUniformDistribution(1, 1000),
},
)
)
# still "category_a" contains unique suggested value during the optimization.
figure = plot_parallel_coordinate(study_categorical_params)
assert len(figure.get_lines()) == 0
|
def test_plot_parallel_coordinate_unique_hyper_param() -> None:
# Test case when one unique value is suggested during the optimization.
study_categorical_params = create_study()
study_categorical_params.add_trial(
create_trial(
value=0.0,
params={"category_a": "preferred", "param_b": 30},
distributions={
"category_a": CategoricalDistribution(("preferred", "opt")),
"param_b": LogUniformDistribution(1, 1000),
},
)
)
# both hyperparameters contain unique values
figure = plot_parallel_coordinate(study_categorical_params)
assert len(figure.get_lines()) == 0
study_categorical_params.add_trial(
create_trial(
value=2.0,
params={"category_a": "preferred", "param_b": 20},
distributions={
"category_a": CategoricalDistribution(("preferred", "opt")),
"param_b": LogUniformDistribution(1, 1000),
},
)
)
# still "category_a" contains unique suggested value during the optimization.
figure = plot_parallel_coordinate(study_categorical_params)
assert len(figure.data[0]["dimensions"]) == 3
assert figure.data[0]["dimensions"][1]["label"] == "category_a"
assert figure.data[0]["dimensions"][1]["range"] == (0, 0)
assert figure.data[0]["dimensions"][1]["values"] == (0.0, 0.0)
assert figure.data[0]["dimensions"][1]["ticktext"] == ("preferred",)
|
57,773 |
def main() -> None:
params: any = demisto.params()
host: str = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
def main() -> None:
params = demisto.params()
host = params.get('host')
port: int = int(params.get('port'))
args: any = demisto.args()
if "host" in args and "port" in args:
host: str = args.get('host')
port: int = int(args.get('port'))
command: str = demisto.command()
demisto.debug(f'Command being called is {command}')
commands = {
'arduino-set-pin': arduino_set_pin_command,
'arduino-get-pin': arduino_get_pin_command,
'arduino-send-data': arduino_send_data_command
}
# try:
server: Server = Server(host, port)
if demisto.command() == 'test-module':
return_results(test_module(server))
elif command in commands:
return_results(commands[command](server, args))
else:
return_error(f"{command} command not recognised")
|
10,894 |
def test_with_dependent_schema():
"""Tests a schema with dependent schema
https://github.com/fastavro/fastavro/issues/418"""
dependency = {
"type": "record",
"name": "Dependency",
"namespace": "test",
"fields": [{
"name": "_name",
"type": "string"
}]
}
schema = {
"type": "record",
"name": "Test",
"namespace": "test",
"fields": [{
"name": "_name",
"type": "string"
}, {
"name": "_dependency",
"type": "Dependency"
}]
}
records = [{
'_name': 'parent',
'_dependency': {
'_name': 'child'
}
}]
parse_schema(dependency, "/tmp/dir")
parse_schema(schema, "/tmp/dir")
new_records = roundtrip(schema, records)
assert records == new_records
|
def test_with_dependent_schema():
"""Tests a schema with dependent schema
https://github.com/fastavro/fastavro/issues/418"""
dependency = {
"type": "record",
"name": "Dependency",
"namespace": "test",
"fields": [{
"name": "_name",
"type": "string"
}]
}
schema = {
"type": "record",
"name": "Test",
"namespace": "test",
"fields": [{
"name": "_name",
"type": "string"
}, {
"name": "_dependency",
"type": "Dependency"
}]
}
records = [{
'_name': 'parent',
'_dependency': {
'_name': 'child'
}
}]
parse_schema(dependency)
parse_schema(schema, "/tmp/dir")
new_records = roundtrip(schema, records)
assert records == new_records
|
30,923 |
def set_group_state_command():
groupID = args.get('id')
on = args.get('on')
if on == "true" or on == "True":
on = True
elif on == "false" or on == "False":
on = False
hue = int(args.get('hue'))
bri = int(args.get('bri'))
sat = int(args.get('sat'))
lightID = args.get('id')
header = {
'Content-Type': 'application/json'
}
payload = {"hue": hue, "on": on, "sat": sat, "bri": bri}
page = 'groups/'
URL = baseURL + page + groupID + "/" + "action"
response = requests.request("PUT", URL, data=json.dumps(payload), headers=header)
if not response.ok:
error = "Error in request {} - {}".format(response.status_code, response.text)
raise Exception(text)
data = response.json()
return data
|
def set_group_state_command():
groupID = args.get('id')
on = args.get('on')
if on == "true" or on == "True":
on = True
elif on == "false" or on == "False":
on = False
hue = int(args.get('hue'))
bri = int(args.get('bri'))
sat = int(args.get('sat'))
lightID = args.get('id')
header = {
'Content-Type': 'application/json'
}
payload = {"hue": hue, "on": on, "sat": sat, "bri": bri}
page = 'groups/'
URL = baseURL + page + groupID + "/" + "action"
response = requests.request("PUT", URL, data=json.dumps(payload), headers=header)
if not response.ok:
error = "Error in request {} - {}".format(response.status_code, response.text)
raise Exception(error)
data = response.json()
return data
|
45,300 |
def _read(**kwargs) -> DataFrame:
"""
General documentation in `modin.pandas.read_csv`.
Experimental feature is simultaneous reading from multiple csv files which are
defined using glob pattern. Works only for local files.
Parameters
----------
**kwargs : dict
Keyword arguments in `modin.pandas.read_csv`.
Returns
-------
Modin DataFrame.
"""
from modin.data_management.factories.dispatcher import EngineDispatcher
Engine.subscribe(_update_engine)
try:
pd_obj = EngineDispatcher.read_csv_glob(**kwargs)
except AttributeError:
raise AttributeError("read_csv_glob() is only implemented for pandas on Ray.")
# This happens when `read_csv` returns a TextFileReader object for iterating through
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj)
|
def _read(**kwargs) -> DataFrame:
"""
General documentation in `modin.pandas.read_csv`.
Experimental feature is simultaneous reading from multiple csv files which are
defined using glob pattern. Works only for local files.
Parameters
----------
**kwargs : dict
Keyword arguments in `modin.pandas.read_csv`.
Returns
-------
modin.DataFrame
"""
from modin.data_management.factories.dispatcher import EngineDispatcher
Engine.subscribe(_update_engine)
try:
pd_obj = EngineDispatcher.read_csv_glob(**kwargs)
except AttributeError:
raise AttributeError("read_csv_glob() is only implemented for pandas on Ray.")
# This happens when `read_csv` returns a TextFileReader object for iterating through
if isinstance(pd_obj, pandas.io.parsers.TextFileReader):
reader = pd_obj.read
pd_obj.read = lambda *args, **kwargs: DataFrame(
query_compiler=reader(*args, **kwargs)
)
return pd_obj
return DataFrame(query_compiler=pd_obj)
|
55,536 |
def dict_equals(dict1, dict2):
"""Check whether two dictionaries are equal and raise an ``AssertionError`` if they aren't."""
dict1 = OrderedDict(sorted(dict1.items(), key=lambda item: item[0]))
dict2 = OrderedDict(sorted(dict2.items(), key=lambda item: item[0]))
for (key1, value1), (key2, value2) in itertools.zip_longest(
dict1.items(), dict2.items()
):
assert (key1 == key2) or (np.isnan(key1) and np.isnan(key2))
np.testing.assert_array_equal(value1, value2)
|
def dict_equals(dict1, dict2):
"""Check whether two dictionaries are equal and raise an ``AssertionError`` if they aren't."""
for key1, key2 in itertools.zip_longest(sorted(dict1), sorted(dict2)):
assert (key1 == key2) or (np.isnan(key1) and np.isnan(key2))
np.testing.assert_array_equal(dict1[key1], dict2[key2])
|
45,316 |
def _replace_doc(
source_obj, target_obj, overwrite, apilink, parent_cls=None, attr_name=None
):
"""
Replace docstring in `target_obj`, possibly taking from `source_obj` and augmenting.
Can append the link to pandas API online documentation.
Parameters
----------
source_obj : object
Any object from which to take docstring from.
target_obj : object
The object which docstring to replace.
overwrite : bool
Forces replacing the docstring with the one from `source_obj` even
if `target_obj` has its own non-empty docstring.
apilink : str
If non-empty, insert the link to pandas API documentation.
Should be the prefix part in the URL template, e.g. "pandas.DataFrame".
parent_cls : class, optional
If `target_obj` is an attribute of a class, `parent_cls` should be that class.
This is used for generating the API URL as well as for handling special cases
like `target_obj` being a property.
attr_name : str, optional
Gives the name to `target_obj` if it's an attribute of `parent_cls`.
Needed to handle some special cases and in most cases could be determined automatically.
"""
source_doc = source_obj.__doc__ or ""
target_doc = target_obj.__doc__ or ""
overwrite = overwrite or not target_doc
doc = source_doc if overwrite else target_doc
if parent_cls and not attr_name:
if isinstance(target_obj, property):
attr_name = target_obj.fget.__name__
elif isinstance(target_obj, (staticmethod, classmethod)):
attr_name = target_obj.__func__.__name__
else:
attr_name = target_obj.__name__
if (
source_doc.strip()
and apilink
and "`Pandas API documentation for " not in target_doc
and (not (attr_name or "").startswith("_"))
):
if attr_name:
token = f"{apilink}.{attr_name}"
else:
token = apilink
url = _make_api_url(token)
doc += f"\n{' ' * _get_indent(doc)}See `pandas API documentation for {token} <{url}>`_ for more.\n"
if parent_cls and isinstance(target_obj, property):
if overwrite:
target_obj.fget.__doc_inherited__ = True
setattr(
parent_cls,
target_obj.fget.__name__,
property(target_obj.fget, target_obj.fset, target_obj.fdel, doc),
)
else:
if overwrite:
target_obj.__doc_inherited__ = True
target_obj.__doc__ = doc
|
def _replace_doc(
source_obj, target_obj, overwrite, apilink, parent_cls=None, attr_name=None
):
"""
Replace docstring in `target_obj`, possibly taking from `source_obj` and augmenting.
Can append the link to pandas API online documentation.
Parameters
----------
source_obj : object
Any object from which to take docstring from.
target_obj : object
The object which docstring to replace.
overwrite : bool
Forces replacing the docstring with the one from `source_obj` even
if `target_obj` has its own non-empty docstring.
apilink : str
If non-empty, insert the link to pandas API documentation.
Should be the prefix part in the URL template, e.g. "pandas.DataFrame".
parent_cls : class, optional
If `target_obj` is an attribute of a class, `parent_cls` should be that class.
This is used for generating the API URL as well as for handling special cases
like `target_obj` being a property.
attr_name : str, optional
Gives the name to `target_obj` if it's an attribute of `parent_cls`.
Needed to handle some special cases and in most cases could be determined automatically.
"""
source_doc = source_obj.__doc__ or ""
target_doc = target_obj.__doc__ or ""
overwrite = overwrite or not target_doc
doc = source_doc if overwrite else target_doc
if parent_cls and not attr_name:
if isinstance(target_obj, property):
attr_name = target_obj.fget.__name__
elif isinstance(target_obj, (staticmethod, classmethod)):
attr_name = target_obj.__func__.__name__
else:
attr_name = target_obj.__name__
if (
source_doc.strip()
and apilink
and "`pandas API documentation for " not in target_doc
and (not (attr_name or "").startswith("_"))
):
if attr_name:
token = f"{apilink}.{attr_name}"
else:
token = apilink
url = _make_api_url(token)
doc += f"\n{' ' * _get_indent(doc)}See `pandas API documentation for {token} <{url}>`_ for more.\n"
if parent_cls and isinstance(target_obj, property):
if overwrite:
target_obj.fget.__doc_inherited__ = True
setattr(
parent_cls,
target_obj.fget.__name__,
property(target_obj.fget, target_obj.fset, target_obj.fdel, doc),
)
else:
if overwrite:
target_obj.__doc_inherited__ = True
target_obj.__doc__ = doc
|
7,368 |
def _masked_phase_cross_correlation(reference_image, moving_image,
reference_mask, moving_mask=None,
overlap_ratio=0.3):
"""Masked image translation registration by masked normalized
cross-correlation.
Parameters
----------
reference_image : ndarray
Reference image.
moving_image : ndarray
Image to register. Must be same dimensionality as ``reference_image``,
but not necessarily the same size.
reference_mask : ndarray
Boolean mask for ``reference_image``. The mask should evaluate
to ``True`` (or 1) on valid pixels. ``reference_mask`` should
have the same shape as ``reference_image``.
moving_mask : ndarray or None, optional
Boolean mask for ``moving_image``. The mask should evaluate to ``True``
(or 1) on valid pixels. ``moving_mask`` should have the same shape
as ``moving_image``. If ``None``, ``reference_mask`` will be used.
overlap_ratio : float, optional
Minimum allowed overlap ratio between images. The correlation for
translations corresponding with an overlap ratio lower than this
threshold will be ignored. A lower `overlap_ratio` leads to smaller
maximum translation, while a higher `overlap_ratio` leads to greater
robustness against spurious matches due to small overlap between
masked images.
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``moving_image``
with ``reference_image``. Axis ordering is consistent with
numpy (e.g. Z, Y, X)
References
----------
.. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
IEEE Transactions on Image Processing, vol. 21(5),
pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
.. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
Pattern Recognition, pp. 2918-2925 (2010).
:DOI:`10.1109/CVPR.2010.5540032`
"""
if moving_mask is None:
if reference_image.shape != moving_image.shape:
raise ValueError(
"Input images have different shapes, moving_mask must "
"be explicitely set.")
moving_mask = reference_mask.astype(bool)
# We need masks to be of the same size as their respective images
for (im, mask) in [(reference_image, reference_mask),
(moving_image, moving_mask)]:
if im.shape != mask.shape:
raise ValueError(
"Image sizes must match their respective mask sizes.")
xcorr = cross_correlate_masked(moving_image, reference_image,
moving_mask, reference_mask,
axes=tuple(range(moving_image.ndim)),
mode='full',
overlap_ratio=overlap_ratio)
# Generalize to the average of multiple equal maxima
maxima = np.stack(np.nonzero(xcorr == xcorr.max()), axis=1)
center = np.mean(maxima, axis=0)
shifts = center - np.array(reference_image.shape) + 1
# The mismatch in size will impact the center location of the
# cross-correlation
size_mismatch = (np.array(moving_image.shape)
- np.array(reference_image.shape))
return -shifts + (size_mismatch / 2)
|
def _masked_phase_cross_correlation(reference_image, moving_image,
reference_mask, moving_mask=None,
overlap_ratio=0.3):
"""Masked image translation registration by masked normalized
cross-correlation.
Parameters
----------
reference_image : ndarray
Reference image.
moving_image : ndarray
Image to register. Must be same dimensionality as ``reference_image``,
but not necessarily the same size.
reference_mask : ndarray
Boolean mask for ``reference_image``. The mask should evaluate
to ``True`` (or 1) on valid pixels. ``reference_mask`` should
have the same shape as ``reference_image``.
moving_mask : ndarray or None, optional
Boolean mask for ``moving_image``. The mask should evaluate to ``True``
(or 1) on valid pixels. ``moving_mask`` should have the same shape
as ``moving_image``. If ``None``, ``reference_mask`` will be used.
overlap_ratio : float, optional
Minimum allowed overlap ratio between images. The correlation for
translations corresponding with an overlap ratio lower than this
threshold will be ignored. A lower `overlap_ratio` leads to smaller
maximum translation, while a higher `overlap_ratio` leads to greater
robustness against spurious matches due to small overlap between
masked images.
Returns
-------
shifts : ndarray
Shift vector (in pixels) required to register ``moving_image``
with ``reference_image``. Axis ordering is consistent with
numpy (e.g. Z, Y, X)
References
----------
.. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
IEEE Transactions on Image Processing, vol. 21(5),
pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
.. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
Pattern Recognition, pp. 2918-2925 (2010).
:DOI:`10.1109/CVPR.2010.5540032`
"""
if moving_mask is None:
if reference_image.shape != moving_image.shape:
raise ValueError(
"Input images have different shapes, moving_mask must "
"be explicitely set.")
moving_mask = reference_mask.astype(bool)
# We need masks to be of the same size as their respective images
for (im, mask) in [(reference_image, reference_mask),
(moving_image, moving_mask)]:
if im.shape != mask.shape:
raise ValueError(
"Image shapes must match their respective mask shapes.")
xcorr = cross_correlate_masked(moving_image, reference_image,
moving_mask, reference_mask,
axes=tuple(range(moving_image.ndim)),
mode='full',
overlap_ratio=overlap_ratio)
# Generalize to the average of multiple equal maxima
maxima = np.stack(np.nonzero(xcorr == xcorr.max()), axis=1)
center = np.mean(maxima, axis=0)
shifts = center - np.array(reference_image.shape) + 1
# The mismatch in size will impact the center location of the
# cross-correlation
size_mismatch = (np.array(moving_image.shape)
- np.array(reference_image.shape))
return -shifts + (size_mismatch / 2)
|
32,534 |
def initialize_instance(args: Dict[str, str], params: Dict[str, str]):
global URL, API_KEY, USE_SSL, USE_URL_FILTERING, VSYS, DEVICE_GROUP, XPATH_SECURITY_RULES, XPATH_OBJECTS, \
XPATH_RULEBASE, TEMPLATE, PRE_POST
if not params.get('port'):
raise DemistoException('Set a port for the instance')
URL = params.get('server', '').rstrip('/:') + ':' + params.get('port', '') + '/api/'
API_KEY = str(params.get('key')) or str((params.get('credentials') or {}).get('password', '')) # type: ignore
if not API_KEY:
raise Exception('API Key must be provided.')
USE_SSL = not params.get('insecure')
USE_URL_FILTERING = params.get('use_url_filtering')
# determine a vsys or a device-group
VSYS = params.get('vsys', '')
if args and (device_group := (args.get('device-group') or args.get('device_group'))):
DEVICE_GROUP = device_group # type: ignore[assignment]
else:
DEVICE_GROUP = params.get('device_group', None) # type: ignore[arg-type]
if args and args.get('template'):
TEMPLATE = args.get('template') # type: ignore[assignment]
else:
TEMPLATE = params.get('template', None) # type: ignore[arg-type]
PRE_POST = args.get('pre_post', '')
# configuration check
if DEVICE_GROUP and VSYS:
raise DemistoException(
'Cannot configure both vsys and Device group. Set vsys for firewall, set Device group for Panorama.')
if not DEVICE_GROUP and not VSYS:
raise DemistoException('Set vsys for firewall or Device group for Panorama.')
# setting security xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if device_group_shared == 'shared':
XPATH_SECURITY_RULES = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_SECURITY_RULES = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/"
else:
XPATH_SECURITY_RULES = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/rulebase/security/rules/entry"
# setting objects xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if DEVICE_GROUP == 'shared':
XPATH_OBJECTS = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_OBJECTS = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/"
else:
XPATH_OBJECTS = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/" # ignore:
# setting security rulebase xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if DEVICE_GROUP == 'shared':
XPATH_RULEBASE = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_RULEBASE = "/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name=\'" + \
DEVICE_GROUP + "\']/"
else:
XPATH_RULEBASE = f"/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'{VSYS}\']/"
|
def initialize_instance(args: Dict[str, str], params: Dict[str, str]):
global URL, API_KEY, USE_SSL, USE_URL_FILTERING, VSYS, DEVICE_GROUP, XPATH_SECURITY_RULES, XPATH_OBJECTS, \
XPATH_RULEBASE, TEMPLATE, PRE_POST
if not params.get('port'):
raise DemistoException('Set a port for the instance')
URL = params.get('server', '').rstrip('/:') + ':' + params.get('port', '') + '/api/'
API_KEY = str(params.get('key')) or str((params.get('credentials') or {}).get('password', '')) # type: ignore
if not API_KEY:
raise Exception('API Key must be provided.')
USE_SSL = not params.get('insecure')
USE_URL_FILTERING = params.get('use_url_filtering')
# determine a vsys or a device-group
VSYS = params.get('vsys', '')
DEVICE_GROUP = args and args.get('device-group') or params.get('device_group', None)
if args and args.get('template'):
TEMPLATE = args.get('template') # type: ignore[assignment]
else:
TEMPLATE = params.get('template', None) # type: ignore[arg-type]
PRE_POST = args.get('pre_post', '')
# configuration check
if DEVICE_GROUP and VSYS:
raise DemistoException(
'Cannot configure both vsys and Device group. Set vsys for firewall, set Device group for Panorama.')
if not DEVICE_GROUP and not VSYS:
raise DemistoException('Set vsys for firewall or Device group for Panorama.')
# setting security xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if device_group_shared == 'shared':
XPATH_SECURITY_RULES = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_SECURITY_RULES = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/"
else:
XPATH_SECURITY_RULES = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/rulebase/security/rules/entry"
# setting objects xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if DEVICE_GROUP == 'shared':
XPATH_OBJECTS = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_OBJECTS = "/config/devices/entry/device-group/entry[@name=\'" + DEVICE_GROUP + "\']/"
else:
XPATH_OBJECTS = "/config/devices/entry/vsys/entry[@name=\'" + VSYS + "\']/" # ignore:
# setting security rulebase xpath relevant to FW or panorama management
if DEVICE_GROUP:
device_group_shared = DEVICE_GROUP.lower()
if DEVICE_GROUP == 'shared':
XPATH_RULEBASE = "/config/shared/"
DEVICE_GROUP = device_group_shared
else:
XPATH_RULEBASE = "/config/devices/entry[@name='localhost.localdomain']/device-group/entry[@name=\'" + \
DEVICE_GROUP + "\']/"
else:
XPATH_RULEBASE = f"/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'{VSYS}\']/"
|
8,445 |
def _sdss_wcs_to_log_wcs(old_wcs):
"""
The WCS in the SDSS files does not appear to follow the WCS standard - it
claims to be linear, but is logarithmic in base-10.
The wavelength is given by:
λ = 10^(w0 + w1 * i)
with i being the pixel index starting from 0.
The FITS standard uses a natural log with a sightly different formulation,
see WCS Paper 3 (which discusses spectral WCS).
This function does the conversion from the SDSS WCS to FITS WCS.
"""
w0 = old_wcs.wcs.crval[0]
w1 = old_wcs.wcs.cd[0,0]
crval = 10 ** w0
cdelt = crval * w1 * log(10)
cunit = old_wcs.wcs.cunit[0] or Unit('Angstrom')
ctype = "WAVE-LOG"
w = WCS(naxis=1)
w.wcs.crval[0] = crval
w.wcs.cdelt[0] = cdelt
w.wcs.cunit[0] = cunit
w.wcs.ctype[0] = ctype
return w
|
def _sdss_wcs_to_log_wcs(old_wcs):
"""
The WCS in the SDSS files does not appear to follow the WCS standard - it
claims to be linear, but is logarithmic in base-10.
The wavelength is given by:
λ = 10^(w0 + w1 * i)
with i being the pixel index starting from 0.
The FITS standard uses a natural log with a sightly different formulation,
see WCS Paper 3 (which discusses spectral WCS).
This function does the conversion from the SDSS WCS to FITS WCS.
"""
w0 = old_wcs.wcs.crval[0]
w1 = old_wcs.wcs.cd[0,0]
crval = 10 ** w0
cdelt = crval * w1 * np.log(10)
cunit = old_wcs.wcs.cunit[0] or Unit('Angstrom')
ctype = "WAVE-LOG"
w = WCS(naxis=1)
w.wcs.crval[0] = crval
w.wcs.cdelt[0] = cdelt
w.wcs.cunit[0] = cunit
w.wcs.ctype[0] = ctype
return w
|
27,903 |
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with ``update_statistics=True`` option.
.. note::
For the consistency with Batch Normalization, this function
intentionally ignores some of the theoretical flaws in Algorithm 1 of
the Batch Renormalization paper:
- The function maintains the moving average of variances
:math:`\\sigma^2`, while the original paper maintain the moving
average of standard deviations :math:`\\sigma`.
- The function applies Bessel's correction to update the moving average
of variances.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`~chainer.links.BatchRenormalization`
"""
if running_mean is None:
raise TypeError('running_mean is required')
if running_var is None:
raise TypeError('running_var is required')
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
|
def batch_renormalization(x, gamma, beta, rmax, dmax, eps=2e-5,
running_mean=None, running_var=None, decay=0.9,
update_statistics=False):
"""Batch renormalization function.
This is an extension of batch normalization, which ensures that the
training and inference models generate the same outputs that depend on
individual examples rather than the entire minibatch.
.. note::
This function does not perform in-place update to
``running_mean`` and ``running_var`` by default, contrary to
:func:`~chainer.functions.batch_normalization`.
If the function is called, it will not be possible to access the
updated running mean and variance statistics, because they are members
of the function object, which cannot be accessed by the caller.
If it is desired to update the running statistics, call the function
with ``update_statistics=True`` option.
.. note::
For the consistency with Batch Normalization, this function
intentionally ignores some of the theoretical flaws in Algorithm 1 of
the Batch Renormalization paper:
- ``F.batch_renormalization`` maintains the moving average of variances
:math:`\\sigma^2`, while the original paper maintain the moving
average of standard deviations :math:`\\sigma`.
- The function applies Bessel's correction to update the moving average
of variances.
See: `Batch Renormalization: Towards Reducing Minibatch Dependence in \
Batch-Normalized Models <https://arxiv.org/abs/1702.03275>`_
.. seealso:: :class:`~chainer.links.BatchRenormalization`
"""
if running_mean is None:
raise TypeError('running_mean is required')
if running_var is None:
raise TypeError('running_var is required')
return BatchRenormalizationFunction(
eps, running_mean, running_var, decay, rmax, dmax, update_statistics
)(x, gamma, beta)
|
55,684 |
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Defaults to False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
13,658 |
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if UserRetirementRequest.has_user_requested_retirement(user):
# Refuse to reset the password of any user that has requested retirement.
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': _('Error in resetting your password.'),
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': SYSTEM_MAINTENANCE_MSG,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
if request.method == 'POST':
if PASSWORD_UNICODE_NORMALIZE_FLAG.is_enabled():
# We have to make a copy of request.POST because it is a QueryDict object which is immutable until copied.
# We have to use request.POST because the password_reset_confirm method takes in the request and a user's
# password is set to the request.POST['new_password1'] field. We have to also normalize the new_password2
# field so it passes the equivalence check that new_password1 == new_password2
# In order to switch out of having to do this copy, we would want to move the normalize_password code into
# the User object's set_password method to ensure it is always happening upon calling set_password.
request.POST = request.POST.copy()
request.POST['new_password1'] = normalize_password(request.POST['new_password1'])
request.POST['new_password2'] = normalize_password(request.POST['new_password2'])
password = request.POST['new_password1']
try:
validate_password(password, user=user)
except ValidationError as err:
# We have a password reset attempt which violates some security
# policy, or any other validation. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': ' '.join(err.messages),
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200:
form_valid = response.context_data['form'].is_valid() if response.context_data['form'] else False
if not form_valid:
log.warning(
u'Unable to reset password for user [%s] because form is not valid. '
u'A possible cause is that the user had an invalid reset token',
user.username,
)
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
|
def password_reset_confirm_wrapper(request, uidb36=None, token=None):
"""
A wrapper around django.contrib.auth.views.password_reset_confirm.
Needed because we want to set the user as active at this step.
We also optionally do some additional password policy checks.
"""
# convert old-style base36-encoded user id to base64
uidb64 = uidb36_to_uidb64(uidb36)
platform_name = {
"platform_name": configuration_helpers.get_value('platform_name', settings.PLATFORM_NAME)
}
try:
uid_int = base36_to_int(uidb36)
user = User.objects.get(id=uid_int)
except (ValueError, User.DoesNotExist):
# if there's any error getting a user, just let django's
# password_reset_confirm function handle it.
return password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
if UserRetirementRequest.has_user_requested_retirement(user):
# Refuse to reset the password of any user that has requested retirement.
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': _('Error in resetting your password.'),
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
if waffle().is_enabled(PREVENT_AUTH_USER_WRITES):
context = {
'validlink': False,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': SYSTEM_MAINTENANCE_MSG,
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
if request.method == 'POST':
if PASSWORD_UNICODE_NORMALIZE_FLAG.is_enabled():
# We have to make a copy of request.POST because it is a QueryDict object which is immutable until copied.
# We have to use request.POST because the password_reset_confirm method takes in the request and a user's
# password is set to the request.POST['new_password1'] field. We have to also normalize the new_password2
# field so it passes the equivalence check that new_password1 == new_password2
# In order to switch out of having to do this copy, we would want to move the normalize_password code into
# a custom User model's set_password method to ensure it is always happening upon calling set_password.
request.POST = request.POST.copy()
request.POST['new_password1'] = normalize_password(request.POST['new_password1'])
request.POST['new_password2'] = normalize_password(request.POST['new_password2'])
password = request.POST['new_password1']
try:
validate_password(password, user=user)
except ValidationError as err:
# We have a password reset attempt which violates some security
# policy, or any other validation. Use the existing Django template to communicate that
# back to the user.
context = {
'validlink': True,
'form': None,
'title': _('Password reset unsuccessful'),
'err_msg': ' '.join(err.messages),
}
context.update(platform_name)
return TemplateResponse(
request, 'registration/password_reset_confirm.html', context
)
# remember what the old password hash is before we call down
old_password_hash = user.password
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
# If password reset was unsuccessful a template response is returned (status_code 200).
# Check if form is invalid then show an error to the user.
# Note if password reset was successful we get response redirect (status_code 302).
if response.status_code == 200:
form_valid = response.context_data['form'].is_valid() if response.context_data['form'] else False
if not form_valid:
log.warning(
u'Unable to reset password for user [%s] because form is not valid. '
u'A possible cause is that the user had an invalid reset token',
user.username,
)
response.context_data['err_msg'] = _('Error in resetting your password. Please try again.')
return response
# get the updated user
updated_user = User.objects.get(id=uid_int)
# did the password hash change, if so record it in the PasswordHistory
if updated_user.password != old_password_hash:
entry = PasswordHistory()
entry.create(updated_user)
else:
response = password_reset_confirm(
request, uidb64=uidb64, token=token, extra_context=platform_name
)
response_was_successful = response.context_data.get('validlink')
if response_was_successful and not user.is_active:
user.is_active = True
user.save()
return response
|
10,470 |
def main():
# Module settings
argument_spec = dict(
bandwidth=dict(),
baseurl=dict(type='list'),
copr=dict(),
copr_api=dict(default='https://copr.fedorainfracloud.org', type='str'),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(type='list'),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(),
gpgcheck=dict(type='bool'),
gpgkey=dict(type='list'),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(type='list'),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(),
params=dict(type='dict'),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d', type='path'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(aliases=['ca_cert']),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(aliases=['client_cert']),
sslclientkey=dict(aliases=['client_key']),
sslverify=dict(type='bool', aliases=['validate_certs']),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
)
argument_spec['async'] = dict(type='bool')
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Params was removed
# https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
if module.params['params']:
module.fail_json(msg="The params option to yum_repository was removed in Ansible 2.5 since it circumvents Ansible's option handling")
if module.params['name'] is None and module.params['copr'] is None:
module.fail_json(msg="Either the 'name' or the 'copr' parameter need to be set")
if module.params['copr']:
copr_api = module.params['copr_api']
copr_user, copr_project = module.params['copr'].split('/')
copr_server = generic_urlparse(list(urlparse(copr_api)))['hostname']
distro = get_distribution().lower()
if distro in ['centos', 'rhel', 'redhat']:
distro = 'epel'
# RHEL/CentOS will give a version like '7.8' or '8.2'
# but Fedora will give a version like '31' or '32' or 'Rawhide'
if distro == 'epel':
distro_version = get_distribution_version().split('.')[0]
else:
distro_version = get_distribution_version().lower()
copr_url = '{0}/coprs/{1}/{2}/repo/{3}-{4}/dnf.repo'.format(copr_api, copr_user, copr_project, distro, distro_version)
response, info = fetch_url(module, copr_url)
if info['status'] != 200:
module.fail_json(msg="failed to fetch COPR repo {0}, error was: {1}".format(copr_url, info['msg']))
copr_params = configparser.RawConfigParser()
# Python 2 ConfigParser doesn't have read_string and readfp is
# deprecated in Python 3
try:
copr_params.read_string(to_text(response.read()))
except AttributeError:
copr_params.readfp(StringIO(to_text(response.read())))
# Take all the parameters from the yum repo we just got from the COPR
# server and use them to set module.params for this module. We only set
# the variable if it hasn't bee set by the user already.
#
# What this means in practical terms is the user can specify 'baseurl:
# http://example.com/packages/' in their playbook and that will override
# the baseurl we get from the COPR server.
for key in dict(copr_params.items("copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project))).keys():
if not module.params.get(key, None):
module.params[key] = copr_params.get("copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project), key)
module.params['name'] = "copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project)
module.params['description'] = copr_params.get("copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project), 'name')
module.params['file'] = "_copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project)
# It is possible to have multiple entries for the baseurl and gpgkey but
# copr doesn't do this. this ansible module expects it to be a list so
# give it a list
for list_param in ['baseurl', 'gpgkey']:
if list_param in module.params and not isinstance(module.params[list_param], list):
module.params[list_param] = [module.params[list_param]]
name = module.params['name']
state = module.params['state']
# Check if required parameters are present
if state == 'present':
if (
module.params['baseurl'] is None and
module.params['metalink'] is None and
module.params['mirrorlist'] is None):
module.fail_json(
msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required.")
if module.params['description'] is None:
module.fail_json(
msg="Parameter 'description' is required.")
# Rename "name" and "description" to ensure correct key sorting
module.params['repoid'] = module.params['name']
module.params['name'] = module.params['description']
del module.params['description']
# Change list type to string for baseurl and gpgkey
for list_param in ['baseurl', 'gpgkey']:
if (
list_param in module.params and
module.params[list_param] is not None):
module.params[list_param] = "\n".join(module.params[list_param])
# Define repo file name if it doesn't exist
if module.params['file'] is None:
module.params['file'] = module.params['repoid']
# Instantiate the YumRepo object
yumrepo = YumRepo(module)
# Get repo status before change
diff = {
'before_header': yumrepo.params['dest'],
'before': yumrepo.dump(),
'after_header': yumrepo.params['dest'],
'after': ''
}
# Perform action depending on the state
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
# Get repo status after change
diff['after'] = yumrepo.dump()
# Compare repo states
changed = diff['before'] != diff['after']
# Save the file only if not in check mode and if there was a change
if not module.check_mode and changed:
yumrepo.save()
# Change file attributes if needed
if os.path.isfile(module.params['dest']):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Print status of the change
module.exit_json(changed=changed, repo=name, state=state, diff=diff)
|
def main():
# Module settings
argument_spec = dict(
bandwidth=dict(),
baseurl=dict(type='list'),
copr=dict(),
copr_api=dict(default='https://copr.fedorainfracloud.org', type='str'),
cost=dict(),
deltarpm_metadata_percentage=dict(),
deltarpm_percentage=dict(),
description=dict(),
enabled=dict(type='bool'),
enablegroups=dict(type='bool'),
exclude=dict(type='list'),
failovermethod=dict(choices=['roundrobin', 'priority']),
file=dict(),
gpgcakey=dict(),
gpgcheck=dict(type='bool'),
gpgkey=dict(type='list'),
http_caching=dict(choices=['all', 'packages', 'none']),
include=dict(),
includepkgs=dict(type='list'),
ip_resolve=dict(choices=['4', '6', 'IPv4', 'IPv6', 'whatever']),
keepalive=dict(type='bool'),
keepcache=dict(choices=['0', '1']),
metadata_expire=dict(),
metadata_expire_filter=dict(
choices=[
'never',
'read-only:past',
'read-only:present',
'read-only:future']),
metalink=dict(),
mirrorlist=dict(),
mirrorlist_expire=dict(),
name=dict(),
params=dict(type='dict'),
password=dict(no_log=True),
priority=dict(),
protect=dict(type='bool'),
proxy=dict(),
proxy_password=dict(no_log=True),
proxy_username=dict(),
repo_gpgcheck=dict(type='bool'),
reposdir=dict(default='/etc/yum.repos.d', type='path'),
retries=dict(),
s3_enabled=dict(type='bool'),
skip_if_unavailable=dict(type='bool'),
sslcacert=dict(aliases=['ca_cert']),
ssl_check_cert_permissions=dict(type='bool'),
sslclientcert=dict(aliases=['client_cert']),
sslclientkey=dict(aliases=['client_key']),
sslverify=dict(type='bool', aliases=['validate_certs']),
state=dict(choices=['present', 'absent'], default='present'),
throttle=dict(),
timeout=dict(),
ui_repoid_vars=dict(),
username=dict(),
)
argument_spec['async'] = dict(type='bool')
module = AnsibleModule(
argument_spec=argument_spec,
add_file_common_args=True,
supports_check_mode=True,
)
# Params was removed
# https://meetbot.fedoraproject.org/ansible-meeting/2017-09-28/ansible_dev_meeting.2017-09-28-15.00.log.html
if module.params['params']:
module.fail_json(msg="The params option to yum_repository was removed in Ansible 2.5 since it circumvents Ansible's option handling")
if module.params['name'] is None and module.params['copr'] is None:
module.fail_json(msg="Either the 'name' or the 'copr' parameter need to be set")
if module.params['copr']:
copr_api = module.params['copr_api']
copr_user, copr_project = module.params['copr'].split('/')
copr_server = generic_urlparse(list(urlparse(copr_api)))['hostname']
distro = get_distribution().lower()
if distro in ['centos', 'rhel', 'redhat']:
distro = 'epel'
# RHEL/CentOS will give a version like '7.8' or '8.2'
# but Fedora will give a version like '31' or '32' or 'Rawhide'
if distro == 'epel':
distro_version = get_distribution_version().split('.')[0]
else:
distro_version = get_distribution_version().lower()
copr_url = '{0}/coprs/{1}/{2}/repo/{3}-{4}/dnf.repo'.format(copr_api, copr_user, copr_project, distro, distro_version)
response, info = fetch_url(module, copr_url)
if info['status'] != 200:
module.fail_json(msg="failed to fetch COPR repo {0}, error was: {1}".format(copr_url, info['msg']))
copr_params = configparser.RawConfigParser()
# Python 2 ConfigParser doesn't have read_string and readfp is
# deprecated in Python 3
try:
copr_params.read_string(to_text(response.read()))
except AttributeError:
copr_params.readfp(StringIO(to_text(response.read())))
# Take all the parameters from the yum repo we just got from the COPR
# server and use them to set module.params for this module. We only set
# the variable if it hasn't been set by the user already.
#
# What this means in practical terms is the user can specify 'baseurl:
# http://example.com/packages/' in their playbook and that will override
# the baseurl we get from the COPR server.
for key in dict(copr_params.items("copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project))).keys():
if not module.params.get(key, None):
module.params[key] = copr_params.get("copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project), key)
module.params['name'] = "copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project)
module.params['description'] = copr_params.get("copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project), 'name')
module.params['file'] = "_copr:{0}:{1}:{2}".format(copr_server, copr_user, copr_project)
# It is possible to have multiple entries for the baseurl and gpgkey but
# copr doesn't do this. this ansible module expects it to be a list so
# give it a list
for list_param in ['baseurl', 'gpgkey']:
if list_param in module.params and not isinstance(module.params[list_param], list):
module.params[list_param] = [module.params[list_param]]
name = module.params['name']
state = module.params['state']
# Check if required parameters are present
if state == 'present':
if (
module.params['baseurl'] is None and
module.params['metalink'] is None and
module.params['mirrorlist'] is None):
module.fail_json(
msg="Parameter 'baseurl', 'metalink' or 'mirrorlist' is required.")
if module.params['description'] is None:
module.fail_json(
msg="Parameter 'description' is required.")
# Rename "name" and "description" to ensure correct key sorting
module.params['repoid'] = module.params['name']
module.params['name'] = module.params['description']
del module.params['description']
# Change list type to string for baseurl and gpgkey
for list_param in ['baseurl', 'gpgkey']:
if (
list_param in module.params and
module.params[list_param] is not None):
module.params[list_param] = "\n".join(module.params[list_param])
# Define repo file name if it doesn't exist
if module.params['file'] is None:
module.params['file'] = module.params['repoid']
# Instantiate the YumRepo object
yumrepo = YumRepo(module)
# Get repo status before change
diff = {
'before_header': yumrepo.params['dest'],
'before': yumrepo.dump(),
'after_header': yumrepo.params['dest'],
'after': ''
}
# Perform action depending on the state
if state == 'present':
yumrepo.add()
elif state == 'absent':
yumrepo.remove()
# Get repo status after change
diff['after'] = yumrepo.dump()
# Compare repo states
changed = diff['before'] != diff['after']
# Save the file only if not in check mode and if there was a change
if not module.check_mode and changed:
yumrepo.save()
# Change file attributes if needed
if os.path.isfile(module.params['dest']):
file_args = module.load_file_common_arguments(module.params)
changed = module.set_fs_attributes_if_different(file_args, changed)
# Print status of the change
module.exit_json(changed=changed, repo=name, state=state, diff=diff)
|
11,959 |
def address_to_hash(addr: str, *, net=None) -> [int, bytes]:
"""Return the pubkey hash / witness program of an address"""
if net is None: net = constants.net
if not is_address(addr, net=net):
raise BitcoinException(f"invalid bitcoin address: {addr}")
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
if witprog is not None:
if len(witprog) == 20:
return WIF_SCRIPT_TYPES['p2wpkh'], bytes(witprog)
return WIF_SCRIPT_TYPES['p2wsh'], bytes(witprog)
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
return WIF_SCRIPT_TYPES['p2pkh'], hash_160_
return WIF_SCRIPT_TYPES['p2sh'], hash_160_
|
def address_to_hash(addr: str, *, net=None) -> Tuple[int, bytes]:
"""Return the pubkey hash / witness program of an address"""
if net is None: net = constants.net
if not is_address(addr, net=net):
raise BitcoinException(f"invalid bitcoin address: {addr}")
witver, witprog = segwit_addr.decode(net.SEGWIT_HRP, addr)
if witprog is not None:
if len(witprog) == 20:
return WIF_SCRIPT_TYPES['p2wpkh'], bytes(witprog)
return WIF_SCRIPT_TYPES['p2wsh'], bytes(witprog)
addrtype, hash_160_ = b58_address_to_hash160(addr)
if addrtype == net.ADDRTYPE_P2PKH:
return WIF_SCRIPT_TYPES['p2pkh'], hash_160_
return WIF_SCRIPT_TYPES['p2sh'], hash_160_
|
54,791 |
def BS_mat(theta):
r"""Beam splitter matrix as implemented in hardware
"""
mat = np.array([[np.cos(theta), 1j*np.sin(theta)], [1j*np.sin(theta), np.cos(theta)]])
return mat
|
def BS_mat(theta):
r"""Beamsplitter matrix as implemented in hardware.
Args:
theta (float): Transmittivity angle :math:`\theta`. The transmission amplitude of
the beamsplitter is :math:`t = \cos(\theta)`.
"""
mat = np.array([[np.cos(theta), 1j*np.sin(theta)], [1j*np.sin(theta), np.cos(theta)]])
return mat
|
23,670 |
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""
Smooth out short-term model transience using the Prilliman model [1]_.
The Prilliman et al. model applies an exponential moving average to
the output of a steady-state cell temperature model to account for a
module's thermal inertia and smooth out the cell temperature's response
to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time. Data with irregular time steps should be resampled
prior to using this function.
Parameters
----------
temp_cell : pandas Series
Cell temperature modeled with steady-state assumptions [C]
wind_speed : pandas Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0–a_3 from [1]_
Returns
-------
temp_cell : pandas Series
Smoothed version of the input cell temperature [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
model for the steady-state input.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`
"""
# TODO: check inputs to ensure regular spacing?
time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds()
if time_step >= 1200:
# too coarsely sampled for smoothing to be relevant
return temp_cell
window = min(int(1200 / time_step), # time series > 20 minutes
len(temp_cell)) # time series < 20 minutes
# prefix with NaNs so that the rolling window is "full",
# even for the first actual value:
prefix = np.full(window, np.nan)
temp_cell_prefixed = np.append(prefix, temp_cell.values)
# get one row per 20-minute window
H = scipy.linalg.hankel(np.arange(window),
np.arange(window - 1, len(temp_cell_prefixed)))
subsets = temp_cell_prefixed[H].T
# calculate weights for the values in each window
if coefficients is not None:
a = coefficients
else:
# values from [1], Table II
a = [0.0046, 0.00046, -0.00023, -1.6e-5]
wind_speed = wind_speed.values
P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
timedeltas = np.arange(window, 0, -1) * time_step
weights = np.exp(-P[:, np.newaxis] * timedeltas)
# set weights corresponding to the prefix values to zero; otherwise the
# denominator of the weighted average below would be wrong
mask_idx = np.triu_indices(window)
np.fliplr(weights)[mask_idx] = 0
# change the first row of weights from zero to nan -- this is a
# trick to prevent div by zero warning when dividing by summed weights
weights[0, :] = np.nan
# finally, take the weighted average of each window
numerator = np.nansum(subsets[:-1] * weights, axis=1)
denominator = np.sum(weights, axis=1)
smoothed = numerator / denominator
smoothed[0] = temp_cell.values[0]
smoothed = pd.Series(smoothed, index=temp_cell.index)
return smoothed
|
def prilliman(temp_cell, wind_speed, unit_mass=11.1, coefficients=None):
"""
Smooth out short-term model transience using the Prilliman model [1]_.
The Prilliman et al. model applies an exponential moving average to
the output of a steady-state cell temperature model to account for a
module's thermal inertia and smooth out the cell temperature's response
to changing weather conditions.
.. warning::
This implementation requires the time series inputs to be regularly
sampled in time. Data with irregular time steps should be resampled
prior to using this function.
Parameters
----------
temp_cell : pandas Series
Cell temperature modeled with steady-state assumptions [C]
wind_speed : pandas Series
Wind speed, adjusted to correspond to array height [m/s]
unit_mass : float, default 11.1
Total mass of module divided by its one-sided surface area [kg/m^2]
coefficients : 4-element list-like, optional
Values for coefficients a_0 through a_3, see Eq. 9 of [1]_
Returns
-------
temp_cell : pandas Series
Smoothed version of the input cell temperature [C]
Notes
-----
This smoothing model was developed and validated using the SAPM
model for the steady-state input.
References
----------
.. [1] M. Prilliman, J. S. Stein, D. Riley and G. Tamizhmani,
"Transient Weighted Moving-Average Model of Photovoltaic Module
Back-Surface Temperature," IEEE Journal of Photovoltaics, 2020.
:doi:`10.1109/JPHOTOV.2020.2992351`
"""
# TODO: check inputs to ensure regular spacing?
time_step = (temp_cell.index[1] - temp_cell.index[0]).total_seconds()
if time_step >= 1200:
# too coarsely sampled for smoothing to be relevant
return temp_cell
window = min(int(1200 / time_step), # time series > 20 minutes
len(temp_cell)) # time series < 20 minutes
# prefix with NaNs so that the rolling window is "full",
# even for the first actual value:
prefix = np.full(window, np.nan)
temp_cell_prefixed = np.append(prefix, temp_cell.values)
# get one row per 20-minute window
H = scipy.linalg.hankel(np.arange(window),
np.arange(window - 1, len(temp_cell_prefixed)))
subsets = temp_cell_prefixed[H].T
# calculate weights for the values in each window
if coefficients is not None:
a = coefficients
else:
# values from [1], Table II
a = [0.0046, 0.00046, -0.00023, -1.6e-5]
wind_speed = wind_speed.values
P = a[0] + a[1]*wind_speed + a[2]*unit_mass + a[3]*wind_speed*unit_mass
timedeltas = np.arange(window, 0, -1) * time_step
weights = np.exp(-P[:, np.newaxis] * timedeltas)
# set weights corresponding to the prefix values to zero; otherwise the
# denominator of the weighted average below would be wrong
mask_idx = np.triu_indices(window)
np.fliplr(weights)[mask_idx] = 0
# change the first row of weights from zero to nan -- this is a
# trick to prevent div by zero warning when dividing by summed weights
weights[0, :] = np.nan
# finally, take the weighted average of each window
numerator = np.nansum(subsets[:-1] * weights, axis=1)
denominator = np.sum(weights, axis=1)
smoothed = numerator / denominator
smoothed[0] = temp_cell.values[0]
smoothed = pd.Series(smoothed, index=temp_cell.index)
return smoothed
|
54,026 |
def _get_ironpython_lib_path_mac(version):
lib_paths = {
'5.0': ['/', 'Applications', 'Rhinoceros.app', 'Contents'],
'6.0': ['/', 'Applications', 'Rhinoceros.app', 'Contents', 'Frameworks', 'RhCore.framework', 'Versions', 'A'],
'7.0': ['/', 'Applications', 'Rhino 7.app', 'Contents', 'Frameworks', 'RhCore.framework', 'Versions', 'A']
}
return os.path.join(*lib_paths.get(version) + ['Resources', 'ManagedPlugIns', 'RhinoDLR_Python.rhp', 'Lib'])
|
def _get_ironpython_lib_path_mac(version):
lib_paths = {
'5.0': ['/', 'Applications', 'Rhinoceros.app', 'Contents'],
'6.0': ['/', 'Applications', 'Rhinoceros.app', 'Contents', 'Frameworks', 'RhCore.framework', 'Versions', 'A'],
'7.0': ['/', 'Applications', 'Rhino 7.app', 'Contents', 'Frameworks', 'RhCore.framework', 'Versions', 'A'],
'WIP': ['/', 'Applications', 'RhinoWIP.app', 'Contents', 'Frameworks', 'RhCore.framework', 'Versions', 'A'],
}
return os.path.join(*lib_paths.get(version) + ['Resources', 'ManagedPlugIns', 'RhinoDLR_Python.rhp', 'Lib'])
|
59,700 |
def load_hotspots():
"""
Load a table with the locations, names, and suggested icon sizes of
hotspots.
This is the ``@hotspots.txt`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Use ``print(data.describe())`` to see the available
columns.
"""
fname = which("@hotspots.txt", download="c")
with open(fname) as hotspot_text:
hotspot_text.readline()
hotspot_text.readline()
hotspot_text.readline()
hotspots = []
for line in hotspot_text:
line_split = line.strip().split("\t")
# Add coordinates and icon_size of hotspot
hotspot = [float(item.strip()) for item in line_split[0].split()]
hotspot.append(line_split[1].title()) # Add name of hotspot
hotspots.append(hotspot)
data = pd.DataFrame(
hotspots, columns=["longitude", "latitude", "icon_size", "name"]
)
return data
|
def load_hotspots():
"""
Load a table with the locations, names, and suggested icon sizes of
hotspots.
This is the ``@hotspots.txt`` dataset used in the GMT tutorials.
The data are downloaded to a cache directory (usually ``~/.gmt/cache``) the
first time you invoke this function. Afterwards, it will load the data from
the cache. So you'll need an internet connection the first time around.
Returns
-------
data : pandas.DataFrame
The data table. Use ``print(data.describe())`` to see the available
columns.
"""
fname = which("@hotspots.txt", download="c")
# Read first 3 columns (space-separated): longitude, latitude and icon_size
lon_lat_size = pd.read_table(
fname,
sep="\s+",
comment="#",
usecols=[0, 1, 2],
names=["longitude", "latitude", "icon_size"],
)
# Read last column (tab-separated): placename
placename = pd.read_table(fname, comment="#", usecols=[1], names=["placename"])
# Concat first 3 columns with last column to get a 4-column dataframe
data = pd.concat(objs=[lon_lat_size, placename], axis="columns")
return data
|
39,201 |
def get_sinusoid(
*,
frequency: float = 300,
sample_rate: int = 16000,
duration: float = 1, # seconds
n_channels: int = 1,
dtype: Union[str, torch.dtype] = "float32",
device: Union[str, torch.device] = "cpu",
channels_first: bool = True,
):
"""Generate pseudo audio data with sine wave.
Args:
frequency: Frequency of sine wave
sample_rate: Sampling rate
duration: Length of the resulting Tensor in seconds.
n_channels: Number of channels
dtype: Torch dtype
device: device
Returns:
Tensor: shape of (n_channels, sample_rate * duration)
"""
if isinstance(dtype, str):
dtype = getattr(torch, dtype)
pie2 = 2 * 3.141592653589793
end = pie2 * frequency * duration
num_frames = int(sample_rate * duration)
# Randomize the initial phase. (except the first channel)
theta0 = pie2 * torch.randn(n_channels, 1, dtype=torch.float32, device=device)
theta0[0, :] = 0
theta = torch.linspace(0, end, num_frames, dtype=torch.float32, device=device)
theta = theta0.repeat(1, num_frames) + theta.unsqueeze(0)
tensor = torch.sin(theta, out=None)
if not channels_first:
tensor = tensor.t()
return convert_tensor_encoding(tensor, dtype)
|
def get_sinusoid(
*,
frequency: float = 300,
sample_rate: int = 16000,
duration: float = 1, # seconds
n_channels: int = 1,
dtype: Union[str, torch.dtype] = "float32",
device: Union[str, torch.device] = "cpu",
channels_first: bool = True,
):
"""Generate pseudo audio data with sine wave.
Args:
frequency: Frequency of sine wave
sample_rate: Sampling rate
duration: Length of the resulting Tensor in seconds.
n_channels: Number of channels
dtype: Torch dtype
device: device
Returns:
Tensor: shape of (n_channels, sample_rate * duration)
"""
if isinstance(dtype, str):
dtype = getattr(torch, dtype)
pie2 = 2 * 3.141592653589793
end = pie2 * frequency * duration
num_frames = int(sample_rate * duration)
# Randomize the initial phase. (except the first channel)
theta0 = pie2 * torch.randn(n_channels, 1, dtype=torch.float32, device=device)
theta0[0, :] = 0
theta = torch.linspace(0, end, num_frames, dtype=torch.float32, device=device)
theta = theta0 + theta
tensor = torch.sin(theta, out=None)
if not channels_first:
tensor = tensor.t()
return convert_tensor_encoding(tensor, dtype)
|
7,146 |
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,
overlap=.5, exclude_border=False):
r"""Finds blobs in the given grayscale image.
Blobs are found using the Difference of Gaussian (DoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : 2D or 3D ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : scalar or sequence of scalars, optional
the minimum standard deviation for Gaussian kernel. Keep this low to
detect smaller blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
max_sigma : scalar or sequence of scalars, optional
The maximum standard deviation for Gaussian kernel. Keep this high to
detect larger blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
sigma_ratio : float, optional
The ratio between the standard deviation of Gaussian Kernels used for
computing the Difference of Gaussians
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
exclude_border : int or bool, optional
If nonzero int, `exclude_border` excludes blobs from
within `exclude_border`-pixels of the border of the image.
Returns
-------
A : (n, image.ndim + sigma) ndarray
A 2d array with each row representing 2 coordinate values for a 2D
image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
When a single sigma is passed, outputs are:
``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
deviation of the Gaussian kernel which detected the blob. When an
anisotropic gaussian is used (sigmas per dimension), the detected sigma
is returned for each dimension.
References
----------
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach
Examples
--------
>>> from skimage import data, feature
>>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40)
array([[ 267. , 359. , 16.777216],
[ 267. , 115. , 10.48576 ],
[ 263. , 302. , 16.777216],
[ 263. , 245. , 16.777216],
[ 261. , 173. , 16.777216],
[ 260. , 46. , 16.777216],
[ 198. , 155. , 10.48576 ],
[ 196. , 43. , 10.48576 ],
[ 195. , 102. , 16.777216],
[ 194. , 277. , 16.777216],
[ 193. , 213. , 16.777216],
[ 185. , 347. , 16.777216],
[ 128. , 154. , 10.48576 ],
[ 127. , 102. , 10.48576 ],
[ 125. , 208. , 10.48576 ],
[ 125. , 45. , 16.777216],
[ 124. , 337. , 10.48576 ],
[ 120. , 272. , 16.777216],
[ 58. , 100. , 10.48576 ],
[ 54. , 276. , 10.48576 ],
[ 54. , 42. , 16.777216],
[ 52. , 216. , 16.777216],
[ 52. , 155. , 16.777216],
[ 45. , 336. , 16.777216]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
"""
image = img_as_float(image)
# Gaussian filter requires that sequence-type sigmas have same
# dimensionality as image. This broadcasts scalar kernels
if isinstance(max_sigma, (int, float)):
max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float)
if isinstance(min_sigma, (int, float)):
min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float)
# Convert sequence types to array
min_sigma = np.asarray(min_sigma, dtype=np.float)
max_sigma = np.asarray(max_sigma, dtype=np.float)
# k such that min_sigma*(sigma_ratio**k) > max_sigma
k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1))
# a geometric progression of standard deviations for gaussian kernels
sigma_list = np.array([min_sigma * (sigma_ratio ** i)
for i in range(k + 1)])
gaussian_images = [gaussian_filter(image, s) for s in sigma_list]
# computing difference between two successive Gaussian blurred images
# multiplying with average standard deviation provides scale invariance
dog_images = [(gaussian_images[i] - gaussian_images[i + 1])
* np.mean(sigma_list[i]) for i in range(k)]
image_cube = np.stack(dog_images, axis=-1)
# local_maxima = get_local_maxima(image_cube, threshold)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3,) * (image.ndim + 1)),
threshold_rel=0.0,
exclude_border=exclude_border)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# translate final column of lm, which contains the index of the
# sigma that produced the maximum intensity value, into the sigma
sigmas_of_peaks = sigma_list[local_maxima[:, -1]]
# if the gaussian is isotropic, the stdev across dimensions are
# identical, so return only the stdev deviation of the first dimension
if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,):
sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None]
# Remove sigma index and replace with sigmas
lm = np.hstack([lm[:, :-1], sigmas_of_peaks])
return _prune_blobs(lm, overlap)
|
def blob_dog(image, min_sigma=1, max_sigma=50, sigma_ratio=1.6, threshold=2.0,
overlap=.5, exclude_border=False):
r"""Finds blobs in the given grayscale image.
Blobs are found using the Difference of Gaussian (DoG) method [1]_.
For each blob found, the method returns its coordinates and the standard
deviation of the Gaussian kernel that detected the blob.
Parameters
----------
image : 2D or 3D ndarray
Input grayscale image, blobs are assumed to be light on dark
background (white on black).
min_sigma : scalar or sequence of scalars, optional
the minimum standard deviation for Gaussian kernel. Keep this low to
detect smaller blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
max_sigma : scalar or sequence of scalars, optional
The maximum standard deviation for Gaussian kernel. Keep this high to
detect larger blobs. The standard deviations of the Gaussian filter
are given for each axis as a sequence, or as a single number, in
which case it is equal for all axes.
sigma_ratio : float, optional
The ratio between the standard deviation of Gaussian Kernels used for
computing the Difference of Gaussians
threshold : float, optional.
The absolute lower bound for scale space maxima. Local maxima smaller
than thresh are ignored. Reduce this to detect blobs with less
intensities.
overlap : float, optional
A value between 0 and 1. If the area of two blobs overlaps by a
fraction greater than `threshold`, the smaller blob is eliminated.
exclude_border : int or bool, optional
If nonzero int, `exclude_border` excludes blobs from
within `exclude_border`-pixels of the border of the image.
Returns
-------
A : (n, image.ndim + sigma) ndarray
A 2d array with each row representing 2 coordinate values for a 2D
image, and 3 coordinate values for a 3D image, plus the sigma(s) used.
When a single sigma is passed, outputs are:
``(r, c, sigma)`` or ``(p, r, c, sigma)`` where ``(r, c)`` or
``(p, r, c)`` are coordinates of the blob and ``sigma`` is the standard
deviation of the Gaussian kernel which detected the blob. When an
anisotropic gaussian is used (sigmas per dimension), the detected sigma
is returned for each dimension.
References
----------
.. [1] https://en.wikipedia.org/wiki/Blob_detection#The_difference_of_Gaussians_approach
Examples
--------
>>> from skimage import data, feature
>>> feature.blob_dog(data.coins(), threshold=.5, max_sigma=40)
array([[ 267. , 359. , 16.777216],
[ 267. , 115. , 10.48576 ],
[ 263. , 302. , 16.777216],
[ 263. , 245. , 16.777216],
[ 261. , 173. , 16.777216],
[ 260. , 46. , 16.777216],
[ 198. , 155. , 10.48576 ],
[ 196. , 43. , 10.48576 ],
[ 195. , 102. , 16.777216],
[ 194. , 277. , 16.777216],
[ 193. , 213. , 16.777216],
[ 185. , 347. , 16.777216],
[ 128. , 154. , 10.48576 ],
[ 127. , 102. , 10.48576 ],
[ 125. , 208. , 10.48576 ],
[ 125. , 45. , 16.777216],
[ 124. , 337. , 10.48576 ],
[ 120. , 272. , 16.777216],
[ 58. , 100. , 10.48576 ],
[ 54. , 276. , 10.48576 ],
[ 54. , 42. , 16.777216],
[ 52. , 216. , 16.777216],
[ 52. , 155. , 16.777216],
[ 45. , 336. , 16.777216]])
Notes
-----
The radius of each blob is approximately :math:`\sqrt{2}\sigma` for
a 2-D image and :math:`\sqrt{3}\sigma` for a 3-D image.
"""
image = img_as_float(image)
# Gaussian filter requires that sequence-type sigmas have same
# dimensionality as image. This broadcasts scalar kernels
if isinstance(max_sigma, (int, float)):
max_sigma = np.full(len(image.shape), max_sigma, dtype=np.float)
if isinstance(min_sigma, (int, float)):
min_sigma = np.full(len(image.shape), min_sigma, dtype=np.float)
# Convert sequence types to array
min_sigma = np.asarray(min_sigma, dtype=np.float)
max_sigma = np.asarray(max_sigma, dtype=float)
# k such that min_sigma*(sigma_ratio**k) > max_sigma
k = int(np.mean(np.log(max_sigma / min_sigma) / np.log(sigma_ratio) + 1))
# a geometric progression of standard deviations for gaussian kernels
sigma_list = np.array([min_sigma * (sigma_ratio ** i)
for i in range(k + 1)])
gaussian_images = [gaussian_filter(image, s) for s in sigma_list]
# computing difference between two successive Gaussian blurred images
# multiplying with average standard deviation provides scale invariance
dog_images = [(gaussian_images[i] - gaussian_images[i + 1])
* np.mean(sigma_list[i]) for i in range(k)]
image_cube = np.stack(dog_images, axis=-1)
# local_maxima = get_local_maxima(image_cube, threshold)
local_maxima = peak_local_max(image_cube, threshold_abs=threshold,
footprint=np.ones((3,) * (image.ndim + 1)),
threshold_rel=0.0,
exclude_border=exclude_border)
# Catch no peaks
if local_maxima.size == 0:
return np.empty((0, 3))
# Convert local_maxima to float64
lm = local_maxima.astype(np.float64)
# translate final column of lm, which contains the index of the
# sigma that produced the maximum intensity value, into the sigma
sigmas_of_peaks = sigma_list[local_maxima[:, -1]]
# if the gaussian is isotropic, the stdev across dimensions are
# identical, so return only the stdev deviation of the first dimension
if np.unique(min_sigma).shape == (1,) and np.unique(max_sigma).shape == (1,):
sigmas_of_peaks = sigmas_of_peaks[:, 0][:, None]
# Remove sigma index and replace with sigmas
lm = np.hstack([lm[:, :-1], sigmas_of_peaks])
return _prune_blobs(lm, overlap)
|
50,640 |
def main() -> int:
"""Linter CLI entry point."""
cwd = pathlib.Path.cwd()
options = cli.get_config(sys.argv[1:])
initialize_logger(options.verbosity)
_logger.debug("Options: %s", options)
formatter_factory: Any = formatters.Formatter
if options.quiet:
formatter_factory = formatters.QuietFormatter
if options.parseable:
formatter_factory = formatters.ParseableFormatter
if options.parseable_severity:
formatter_factory = formatters.ParseableSeverityFormatter
formatter = formatter_factory(cwd, options.display_relative_path)
if options.use_default_rules:
rulesdirs = options.rulesdir + [DEFAULT_RULESDIR]
else:
rulesdirs = options.rulesdir or [DEFAULT_RULESDIR]
rules = RulesCollection(rulesdirs)
if options.listrules:
formatted_rules = rules if options.format == 'plain' else rules_as_rst(rules)
print(formatted_rules)
return 0
if options.listtags:
print(rules.listtags())
return 0
if isinstance(options.tags, str):
options.tags = options.tags.split(',')
skip = set()
for s in options.skip_list:
skip.update(str(s).split(','))
options.skip_list = frozenset(skip)
if not options.playbook:
# no args triggers auto-detection mode
playbooks = get_playbooks_and_roles(options=options)
else:
playbooks = sorted(set(options.playbook))
matches = list()
checked_files: Set[Any] = set()
for playbook in playbooks:
runner = Runner(rules, playbook, options.tags,
options.skip_list, options.exclude_paths,
options.verbosity, checked_files)
matches.extend(runner.run())
matches.sort(
key=lambda x: (
normpath(x.filename),
x.linenumber,
x.rule.id if hasattr(x.rule, 'id') else 0))
for match in matches:
print(formatter.format(match, options.colored))
if len(matches):
return 2
else:
return 0
|
def main() -> int:
"""Linter CLI entry point."""
cwd = pathlib.Path.cwd()
options = cli.get_config(sys.argv[1:])
initialize_logger(options.verbosity)
_logger.debug("Options: %s", options)
formatter_factory: Any = formatters.Formatter
if options.quiet:
formatter_factory = formatters.QuietFormatter
if options.parseable:
formatter_factory = formatters.ParseableFormatter
if options.parseable_severity:
formatter_factory = formatters.ParseableSeverityFormatter
formatter = formatter_factory(cwd, options.display_relative_path)
if options.use_default_rules:
rulesdirs = options.rulesdir + [DEFAULT_RULESDIR]
else:
rulesdirs = options.rulesdir or [DEFAULT_RULESDIR]
rules = RulesCollection(rulesdirs)
if options.listrules:
formatted_rules = rules if options.format == 'plain' else rules_as_rst(rules)
print(formatted_rules)
return 0
if options.listtags:
print(rules.listtags())
return 0
if isinstance(options.tags, str):
options.tags = options.tags.split(',')
skip = set()
for s in options.skip_list:
skip.update(str(s).split(','))
options.skip_list = frozenset(skip)
if not options.playbook:
# no args triggers auto-detection mode
playbooks = get_playbooks_and_roles(options=options)
else:
playbooks = sorted(set(options.playbook))
matches = list()
checked_files: Set[Any] = set()
for playbook in playbooks:
runner = Runner(rules, playbook, options.tags,
options.skip_list, options.exclude_paths,
options.verbosity, checked_files)
matches.extend(runner.run())
matches.sort(
key=lambda x: (
normpath(x.filename),
x.linenumber,
getattr(x.rule, 'id', 0),
),
)
for match in matches:
print(formatter.format(match, options.colored))
if len(matches):
return 2
else:
return 0
|
23,081 |
def merge_chunk(lhs, *args, **kwargs):
empty_index_dtype = kwargs.pop("empty_index_dtype", None)
categorical_columns = kwargs.pop("categorical_columns", None)
rhs, *args = args
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
if categorical_columns is not None and PANDAS_GT_100:
for col in categorical_columns:
left = None
right = None
if col in lhs:
left = lhs[col]
elif col == kwargs.get("right_on", None) and left_index:
if is_categorical_dtype(lhs.index):
left = lhs.index
if col in rhs:
right = rhs[col]
elif col == kwargs.get("left_on", None) and right_index:
if is_categorical_dtype(rhs.index):
right = rhs.index
dtype = "category"
if left is not None and right is not None:
dtype = union_categoricals(
[left.astype("category").values, right.astype("category").values]
).dtype
if left is not None:
if isinstance(left, pd.Index):
lhs.index = left.astype(dtype)
else:
lhs.assign(**{col: left.astype(dtype)})
if right is not None:
if isinstance(right, pd.Index):
rhs.index = right.astype(dtype)
else:
rhs.assign(**{col: right.astype(dtype)})
out = lhs.merge(rhs, *args, **kwargs)
# Workaround pandas bug where if the output result of a merge operation is
# an empty dataframe, the output index is `int64` in all cases, regardless
# of input dtypes.
if len(out) == 0 and empty_index_dtype is not None:
out.index = out.index.astype(empty_index_dtype)
return out
|
def merge_chunk(lhs, *args, **kwargs):
empty_index_dtype = kwargs.pop("empty_index_dtype", None)
categorical_columns = kwargs.pop("categorical_columns", None)
rhs, *args = args
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
if categorical_columns is not None and PANDAS_GT_100:
for col in categorical_columns:
left = None
right = None
if col in lhs:
left = lhs[col]
elif col == kwargs.get("right_on", None) and left_index:
if is_categorical_dtype(lhs.index):
left = lhs.index
if col in rhs:
right = rhs[col]
elif col == kwargs.get("left_on", None) and right_index:
if is_categorical_dtype(rhs.index):
right = rhs.index
dtype = "category"
if left is not None and right is not None:
dtype = union_categoricals(
[left.astype("category").values, right.astype("category").values]
).dtype
if left is not None:
if isinstance(left, pd.Index):
lhs.index = left.astype(dtype)
else:
lhs = lhs.assign(**{col: left.astype(dtype)})
if right is not None:
if isinstance(right, pd.Index):
rhs.index = right.astype(dtype)
else:
rhs.assign(**{col: right.astype(dtype)})
out = lhs.merge(rhs, *args, **kwargs)
# Workaround pandas bug where if the output result of a merge operation is
# an empty dataframe, the output index is `int64` in all cases, regardless
# of input dtypes.
if len(out) == 0 and empty_index_dtype is not None:
out.index = out.index.astype(empty_index_dtype)
return out
|
30,422 |
def get_remote_file(full_file_path, tag='master'):
# 'origin/' prefix is used to compared with remote branches but it is not a part of the github url.
tag = tag.replace('origin/', '')
# The replace in the end is for Windows support
github_path = os.path.join(CONTENT_GITHUB_LINK, tag, full_file_path).replace('\\', '/')
try:
res = requests.get(github_path, verify=False)
res.raise_for_status()
except Exception as exc:
print_warning('Could not find the old entity file under "{}".\n'
'please make sure that you did not break backward compatibility. '
'Reason: {}'.format(github_path, exc))
return {}
if full_file_path.endswith('json'):
details = json.loads(res.content)
else:
details = yaml.safe_load(res.content)
return details
|
def get_remote_file(full_file_path, tag='master'):
# 'origin/' prefix is used to compared with remote branches but it is not a part of the github url.
tag = tag.replace('origin/', '')
# The replace in the end is for Windows support
github_path = os.path.join(CONTENT_GITHUB_LINK, tag, full_file_path).replace('\\', '/')
try:
res = requests.get(github_path, verify=False)
res.raise_for_status()
except Exception as exc:
print_warning('Could not find the old entity file under "{}".\n'
'please make sure that you did not break backward compatibility. '
'Reason: {}'.format(github_path, str(exc)))
return {}
if full_file_path.endswith('json'):
details = json.loads(res.content)
else:
details = yaml.safe_load(res.content)
return details
|
32,400 |
def cyble_fetch_taxii(client, args):
'''
TAXII feed details will be pulled from server
:param client: instance of client to communicate with server
:param args: Parameters for fetching the feed
:return: TAXII feed details
'''
try:
args['begin'] = str(parser.parse(args.get('begin')).replace(tzinfo=pytz.UTC)) if args.get('begin', None) else None
args['end'] = str(parser.parse(args.get('end')).replace(tzinfo=pytz.UTC)) if args.get('end', None) else None
except Exception as e:
raise ValueError("Invalid date format received")
result, time = client.get_taxii(args)
indicators = client.build_indicators(args, result)
entry_result = camelize(indicators)
hr = tableToMarkdown('Indicators', entry_result, headers=['Type', 'Value', 'Title', 'Time', 'Rawjson'])
command_results = CommandResults(
readable_output=hr,
outputs_prefix='CybleIntel.Threat',
outputs_key_field='details',
outputs=indicators
)
return command_results
|
def cyble_fetch_taxii(client: Client, args: Dict[str, Any]):
'''
TAXII feed details will be pulled from server
:param client: instance of client to communicate with server
:param args: Parameters for fetching the feed
:return: TAXII feed details
'''
try:
args['begin'] = str(parser.parse(args.get('begin')).replace(tzinfo=pytz.UTC)) if args.get('begin', None) else None
args['end'] = str(parser.parse(args.get('end')).replace(tzinfo=pytz.UTC)) if args.get('end', None) else None
except Exception as e:
raise ValueError("Invalid date format received")
result, time = client.get_taxii(args)
indicators = client.build_indicators(args, result)
entry_result = camelize(indicators)
hr = tableToMarkdown('Indicators', entry_result, headers=['Type', 'Value', 'Title', 'Time', 'Rawjson'])
command_results = CommandResults(
readable_output=hr,
outputs_prefix='CybleIntel.Threat',
outputs_key_field='details',
outputs=indicators
)
return command_results
|
57,709 |
def workday_first_run_command(client, mapper_in):
report_data = client.get_full_report()
indicators = report_to_indicators(report_data.get('Report_Entry'), mapper_in)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
demisto.results("Indicators were created successfully")
|
def workday_first_run_command(client, mapper_in, report_url):
report_data = client.get_full_report(report_url)
indicators = report_to_indicators(report_data.get('Report_Entry'), mapper_in)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
demisto.results("Indicators were created successfully")
|
6,994 |
def execute():
if frappe.conf.get(BACKUP_ENCRYPTION_CONFIG_KEY):
return
backup_path = pathlib.Path(get_backup_path())
encrypted_backups_present = bool(list(backup_path.glob("*-enc*")))
if encrypted_backups_present:
update_site_config(BACKUP_ENCRYPTION_CONFIG_KEY, get_encryption_key())
|
def execute():
if frappe.conf.get(BACKUP_ENCRYPTION_CONFIG_KEY):
return
backup_path = pathlib.Path(get_backup_path())
encrypted_backups_present = bool(list(backup_path.glob("*-enc*")))
if encrypted_backups_present:
update_site_config(BACKUP_ENCRYPTION_CONFIG_KEY, frappe.conf.encryption_key)
|
41,822 |
def test_save_best_config() -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
def objective(trial: optuna.Trial) -> float:
trial.suggest_uniform("DROPOUT", dropout, dropout)
executor = optuna.integration.AllenNLPExecutor(trial, config_file, tmp_dir)
return executor.run()
config_file = "tests/integration_tests/allennlp_tests/example.jsonnet"
dropout = 0.5
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=1)
best_config = optuna.integration.allennlp._save_best_config(config_file, study)
model_config = best_config["model"]
target_config = model_config["text_field_embedder"]["token_embedders"]["token_characters"]
assert target_config["dropout"] == dropout
|
def test_save_best_config() -> None:
with tempfile.TemporaryDirectory() as tmp_dir:
def objective(trial: optuna.Trial) -> float:
trial.suggest_uniform("DROPOUT", dropout, dropout)
executor = optuna.integration.AllenNLPExecutor(trial, config_file, tmp_dir)
return executor.run()
config_file = "tests/integration_tests/allennlp_tests/example.jsonnet"
dropout = 0.5
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=1)
best_config = optuna.integration.allennlp._dump_best_config(config_file, study)
model_config = best_config["model"]
target_config = model_config["text_field_embedder"]["token_embedders"]["token_characters"]
assert target_config["dropout"] == dropout
|
35,355 |
def general_plotter(
meshes,
points,
labels,
title="",
cpos=None,
show_bounds=False,
show_axes=True,
background=None,
off_screen=None,
savefig=None,
window_size=None,
notebook=None,
# add_mesh kwargs:
style=None,
color="w",
show_edges=None,
edge_color=None,
point_size=5.0,
line_width=None,
opacity=1.0,
flip_scalars=False,
lighting=None,
n_colors=256,
interpolate_before_map=True,
cmap=None,
render_points_as_spheres=False,
render_lines_as_tubes=False,
scalar_bar_args={},
smooth_shading=None,
show_scalar_bar=None,
# labels kwargs
font_size=None,
font_family=None,
text_color=None,
theme=None,
return_plotter=False,
return_cpos = False
):
"""General pymapdl plotter for APDL geometry and meshes.
Parameters
----------
title : str, optional
Add given title to plot.
cpos : list(tuple(floats)), str
The camera position to use. You can either use a saved camera
position or specify one of the following strings:
- ``"xy"``
- ``"xz"``
- ``"yz"``
- ``"yx"``
- ``"zx"``
- ``"zy"``
- ``"iso"``
off_screen : bool, optional
Renders off screen when ``True``. Useful for automated
screenshots.
window_size : list, optional
Window size in pixels. Defaults to ``[1024, 768]``
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter
notebook. Assumes a jupyter console is active. Automatically
enables off_screen.
show_bounds : bool, optional
Shows mesh bounds when ``True``.
show_axes : bool, optional
Shows a vtk axes widget. Enabled by default.
savefig : str, optional
Saves screenshot to a file path.
style : string, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``,
``style='points'``. Defaults to ``'surface'``. Note that
``'wireframe'`` only shows a wireframe of the outer geometry.
color : string or 3 item list, optional
Use to make the entire mesh have a single solid color. Either
a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : string or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also applicable
when style='points'. Default ``5.0``
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a single float value is given, it will be
the global opacity of the mesh and uniformly applied everywhere -
should be between 0 and 1. A string can also be specified to map
the scalars range to a predefined opacity transfer function
(options include: 'linear', 'linear_r', 'geom', 'geom_r').
A string could also be used to map a scalars array from the mesh to
the opacity (must have same number of elements as the
``scalars`` argument). Or you can pass a custom made transfer
function that is an array either ``n_colors`` in length or shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, list, optional
Name of the Matplotlib colormap to us when mapping the
``scalars``. See available Matplotlib colormaps. Only
applicable for when displaying ``scalars``. Requires Matplotlib
to be installed. ``colormap`` is also an accepted alias for
this. If ``colorcet`` or ``cmocean`` are installed, their
colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``
render_points_as_spheres : bool, optional
Render points as spheres.
render_lines_as_tubes : bool, optional
Renders lines as tubes.
smooth_shading : bool, optional
Smoothly render curved surfaces when plotting. Not helpful
for all meshes.
theme : pyvista.DefaultTheme, optional
PyVista theme. Defaults to PyMAPDL theme.
return_plotter : bool, optional
Return the plotting object rather than showing the plot and
returning the camera position. Default ``False``.
return_cpos : bool, optional
Returns the camera position as an array. Default ``False``.
Returns
-------
cpos or pyvista.Plotter
Camera position or instance of ``pyvista.Plotter`` depending
on ``return_plotter``.
Examples
--------
Plot areas and modify the background color to ``'black'``
>>> cpos = mapdl.aplot(background='black')
Enable smooth_shading on an element plot.
>>> cpos = mapdl.eplot(smooth_shading=True)
Return the plotting instance, modify it, and display the plot.
>>> pl = mapdl.aplot(return_plotter=True)
>>> pl.show_bounds()
>>> pl.set_background('black')
>>> pl.add_text('my text')
>>> pl.show()
Save a screenshot to disk without showing the plot.
>>> mapdl.eplot(background='w', show_edges=True, smooth_shading=True,
window_size=[1920, 1080], savefig='screenshot.png',
off_screen=True)
"""
if notebook:
off_screen = True
if theme is None:
theme = MapdlTheme()
pl = pv.Plotter(off_screen=off_screen, notebook=notebook, theme=theme)
if background:
pl.set_background(background)
for point in points:
pl.add_points(
point["points"],
scalars=point.get("scalars", None),
color=color,
show_edges=show_edges,
edge_color=edge_color,
point_size=point_size,
line_width=line_width,
opacity=opacity,
flip_scalars=flip_scalars,
lighting=lighting,
n_colors=n_colors,
interpolate_before_map=interpolate_before_map,
cmap=cmap,
render_points_as_spheres=render_points_as_spheres,
render_lines_as_tubes=render_lines_as_tubes,
)
for mesh in meshes:
pl.add_mesh(
mesh["mesh"],
scalars=mesh.get("scalars"),
scalar_bar_args=scalar_bar_args,
color=mesh.get("color", color),
style=mesh.get("style", style),
show_edges=show_edges,
edge_color=edge_color,
smooth_shading=smooth_shading,
point_size=point_size,
line_width=line_width,
show_scalar_bar=show_scalar_bar,
opacity=opacity,
flip_scalars=flip_scalars,
lighting=lighting,
n_colors=n_colors,
interpolate_before_map=interpolate_before_map,
cmap=cmap,
render_points_as_spheres=render_points_as_spheres,
render_lines_as_tubes=render_lines_as_tubes,
)
for label in labels:
# verify points are not duplicates
points, idx, _ = unique_rows(np.array(label["points"]))
labels = np.array(label["labels"])[idx].tolist()
pl.add_point_labels(
points,
labels,
show_points=False,
shadow=False,
font_size=font_size,
font_family=font_family,
text_color=text_color,
)
if cpos:
pl.camera_position = cpos
if show_bounds:
pl.show_bounds()
if show_axes:
pl.show_axes()
if title:
pl.add_title(title)
returns_parameter = []
if return_plotter:
returns_parameter.append(pl)
if return_cpos:
returns_parameter.append(pl.camera_position)
if not returns_parameter:
returns_parameter = None
else:
if len(returns_parameter) == 1:
returns_parameter = returns_parameter[0]
else:
returns_parameter = tuple(returns_parameter)
# permit user to save the figure as a screenshot
if savefig:
pl.show(title=title, auto_close=False, window_size=window_size, screenshot=True)
pl.screenshot(savefig)
# return unclosed plotter
if return_plotter:
return returns_parameter
# if not returning plotter, close right away
pl.close()
elif return_plotter:
return returns_parameter
else:
pl.show()
return returns_parameter
|
def general_plotter(
meshes,
points,
labels,
title="",
cpos=None,
show_bounds=False,
show_axes=True,
background=None,
off_screen=None,
savefig=None,
window_size=None,
notebook=None,
# add_mesh kwargs:
style=None,
color="w",
show_edges=None,
edge_color=None,
point_size=5.0,
line_width=None,
opacity=1.0,
flip_scalars=False,
lighting=None,
n_colors=256,
interpolate_before_map=True,
cmap=None,
render_points_as_spheres=False,
render_lines_as_tubes=False,
scalar_bar_args={},
smooth_shading=None,
show_scalar_bar=None,
# labels kwargs
font_size=None,
font_family=None,
text_color=None,
theme=None,
return_plotter=False,
return_cpos=False,
):
"""General pymapdl plotter for APDL geometry and meshes.
Parameters
----------
title : str, optional
Add given title to plot.
cpos : list(tuple(floats)), str
The camera position to use. You can either use a saved camera
position or specify one of the following strings:
- ``"xy"``
- ``"xz"``
- ``"yz"``
- ``"yx"``
- ``"zx"``
- ``"zy"``
- ``"iso"``
off_screen : bool, optional
Renders off screen when ``True``. Useful for automated
screenshots.
window_size : list, optional
Window size in pixels. Defaults to ``[1024, 768]``
notebook : bool, optional
When True, the resulting plot is placed inline a jupyter
notebook. Assumes a jupyter console is active. Automatically
enables off_screen.
show_bounds : bool, optional
Shows mesh bounds when ``True``.
show_axes : bool, optional
Shows a vtk axes widget. Enabled by default.
savefig : str, optional
Saves screenshot to a file path.
style : string, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``,
``style='points'``. Defaults to ``'surface'``. Note that
``'wireframe'`` only shows a wireframe of the outer geometry.
color : string or 3 item list, optional
Use to make the entire mesh have a single solid color. Either
a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : string or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also applicable
when style='points'. Default ``5.0``
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a single float value is given, it will be
the global opacity of the mesh and uniformly applied everywhere -
should be between 0 and 1. A string can also be specified to map
the scalars range to a predefined opacity transfer function
(options include: 'linear', 'linear_r', 'geom', 'geom_r').
A string could also be used to map a scalars array from the mesh to
the opacity (must have same number of elements as the
``scalars`` argument). Or you can pass a custom made transfer
function that is an array either ``n_colors`` in length or shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, list, optional
Name of the Matplotlib colormap to us when mapping the
``scalars``. See available Matplotlib colormaps. Only
applicable for when displaying ``scalars``. Requires Matplotlib
to be installed. ``colormap`` is also an accepted alias for
this. If ``colorcet`` or ``cmocean`` are installed, their
colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``
render_points_as_spheres : bool, optional
Render points as spheres.
render_lines_as_tubes : bool, optional
Renders lines as tubes.
smooth_shading : bool, optional
Smoothly render curved surfaces when plotting. Not helpful
for all meshes.
theme : pyvista.DefaultTheme, optional
PyVista theme. Defaults to PyMAPDL theme.
return_plotter : bool, optional
Return the plotting object rather than showing the plot and
returning the camera position. Default ``False``.
return_cpos : bool, optional
Returns the camera position as an array. Default ``False``.
Returns
-------
cpos or pyvista.Plotter
Camera position or instance of ``pyvista.Plotter`` depending
on ``return_plotter``.
Examples
--------
Plot areas and modify the background color to ``'black'``
>>> cpos = mapdl.aplot(background='black')
Enable smooth_shading on an element plot.
>>> cpos = mapdl.eplot(smooth_shading=True)
Return the plotting instance, modify it, and display the plot.
>>> pl = mapdl.aplot(return_plotter=True)
>>> pl.show_bounds()
>>> pl.set_background('black')
>>> pl.add_text('my text')
>>> pl.show()
Save a screenshot to disk without showing the plot.
>>> mapdl.eplot(background='w', show_edges=True, smooth_shading=True,
window_size=[1920, 1080], savefig='screenshot.png',
off_screen=True)
"""
if notebook:
off_screen = True
if theme is None:
theme = MapdlTheme()
pl = pv.Plotter(off_screen=off_screen, notebook=notebook, theme=theme)
if background:
pl.set_background(background)
for point in points:
pl.add_points(
point["points"],
scalars=point.get("scalars", None),
color=color,
show_edges=show_edges,
edge_color=edge_color,
point_size=point_size,
line_width=line_width,
opacity=opacity,
flip_scalars=flip_scalars,
lighting=lighting,
n_colors=n_colors,
interpolate_before_map=interpolate_before_map,
cmap=cmap,
render_points_as_spheres=render_points_as_spheres,
render_lines_as_tubes=render_lines_as_tubes,
)
for mesh in meshes:
pl.add_mesh(
mesh["mesh"],
scalars=mesh.get("scalars"),
scalar_bar_args=scalar_bar_args,
color=mesh.get("color", color),
style=mesh.get("style", style),
show_edges=show_edges,
edge_color=edge_color,
smooth_shading=smooth_shading,
point_size=point_size,
line_width=line_width,
show_scalar_bar=show_scalar_bar,
opacity=opacity,
flip_scalars=flip_scalars,
lighting=lighting,
n_colors=n_colors,
interpolate_before_map=interpolate_before_map,
cmap=cmap,
render_points_as_spheres=render_points_as_spheres,
render_lines_as_tubes=render_lines_as_tubes,
)
for label in labels:
# verify points are not duplicates
points, idx, _ = unique_rows(np.array(label["points"]))
labels = np.array(label["labels"])[idx].tolist()
pl.add_point_labels(
points,
labels,
show_points=False,
shadow=False,
font_size=font_size,
font_family=font_family,
text_color=text_color,
)
if cpos:
pl.camera_position = cpos
if show_bounds:
pl.show_bounds()
if show_axes:
pl.show_axes()
if title:
pl.add_title(title)
returns_parameter = []
if return_plotter:
returns_parameter.append(pl)
if return_cpos:
returns_parameter.append(pl.camera_position)
if not returns_parameter:
returns_parameter = None
else:
if len(returns_parameter) == 1:
returns_parameter = returns_parameter[0]
else:
returns_parameter = tuple(returns_parameter)
# permit user to save the figure as a screenshot
if savefig:
pl.show(title=title, auto_close=False, window_size=window_size, screenshot=True)
pl.screenshot(savefig)
# return unclosed plotter
if return_plotter:
return returns_parameter
# if not returning plotter, close right away
pl.close()
elif return_plotter:
return returns_parameter
else:
pl.show()
return returns_parameter
|
8,847 |
def find(*patterns):
"""Decorate a function to be called each time patterns is found in a line.
:param str patterns: one or more regular expression(s)
Each argument is a regular expression which will trigger the function::
@find('hello', 'here')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
This decorator can be used multiple times to add more rules::
@find('here')
@find('hello')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
If the Sopel instance is in a channel, or sent a ``PRIVMSG``, the function
will execute for each time in a string said matches the expression. Each
match will also contains the position of the instance it found.
Inside the regular expression, some special directives can be used.
``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and
``$nickname`` will be replaced with the nick of the bot::
@find('$nickname')
# will trigger for each time the bot's nick is in a trigger
.. versionadded:: 7.1
.. note::
The regex rule will match for each non-overlapping matches, from left
to right, and the function will execute for each of these matches.
To match only once from anywhere in the line, use the :func:`search`
decorator instead. To match only once from the start of the line,
use the :func:`rule` decorator instead.
"""
def add_attribute(function):
if not hasattr(function, "find_rules"):
function.find_rules = []
for value in patterns:
if value not in function.find_rules:
function.find_rules.append(value)
return function
return add_attribute
|
def find(*patterns):
"""Decorate a function to be called for each time a pattern is found in a line.
:param str patterns: one or more regular expression(s)
Each argument is a regular expression which will trigger the function::
@find('hello', 'here')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
This decorator can be used multiple times to add more rules::
@find('here')
@find('hello')
# will trigger once on "hello you"
# will trigger twice on "hello here"
# will trigger once on "I'm right here!"
If the Sopel instance is in a channel, or sent a ``PRIVMSG``, the function
will execute for each time in a string said matches the expression. Each
match will also contains the position of the instance it found.
Inside the regular expression, some special directives can be used.
``$nick`` will be replaced with the nick of the bot and ``,`` or ``:``, and
``$nickname`` will be replaced with the nick of the bot::
@find('$nickname')
# will trigger for each time the bot's nick is in a trigger
.. versionadded:: 7.1
.. note::
The regex rule will match for each non-overlapping matches, from left
to right, and the function will execute for each of these matches.
To match only once from anywhere in the line, use the :func:`search`
decorator instead. To match only once from the start of the line,
use the :func:`rule` decorator instead.
"""
def add_attribute(function):
if not hasattr(function, "find_rules"):
function.find_rules = []
for value in patterns:
if value not in function.find_rules:
function.find_rules.append(value)
return function
return add_attribute
|
26,496 |
def do_setup():
verify_gpl_dependency()
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.9, <1.0',
'bleach~=2.1.3',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.12.4, <0.13',
'flask-appbuilder==1.12.1',
'flask-admin==1.5.2',
'flask-caching>=1.3.3, <1.4.0',
'flask-login>=0.3, <0.5',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'json-merge-patch==0.2',
'jinja2>=2.7.3, <=2.10.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.15.0',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=1.1.15, <1.3.0',
'tabulate>=0.7.5, <=0.8.2',
'tenacity==4.8.0',
'thrift>=0.9.2',
'tzlocal>=1.4',
'unicodecsv>=0.14.1',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'atlas': atlas,
'async': async_packages,
'azure_blob_storage': azure_blob_storage,
'azure_data_lake': azure_data_lake,
'azure_cosmos': azure_cosmos,
'azure_container_instances': azure_container_instances,
'cassandra': cassandra,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'devel_azure': devel_azure,
'doc': doc,
'docker': docker,
'druid': druid,
'elasticsearch': elasticsearch,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'google_auth': google_auth,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes,
'ldap': ldap,
'mongo': mongo,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'pinot': pinot,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid': sendgrid,
'segment': segment,
'slack': slack,
'snowflake': snowflake,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'winrm': winrm
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release//airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
'compile_assets': CompileAssets
},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
)
|
def do_setup():
verify_gpl_dependency()
write_version()
setup(
name='apache-airflow',
description='Programmatically author, schedule and monitor data pipelines',
license='Apache License 2.0',
version=version,
packages=find_packages(exclude=['tests*']),
package_data={'': ['airflow/alembic.ini', "airflow/git_version"]},
include_package_data=True,
zip_safe=False,
scripts=['airflow/bin/airflow'],
install_requires=[
'alembic>=0.9, <1.0',
'bleach~=2.1.3',
'configparser>=3.5.0, <3.6.0',
'croniter>=0.3.17, <0.4',
'dill>=0.2.2, <0.3',
'flask>=0.12.4, <0.13',
'flask-appbuilder==1.12.1',
'flask-admin==1.5.2',
'flask-caching>=1.3.3, <1.4.0',
'flask-login>=0.3, <0.5',
'flask-swagger==0.2.13',
'flask-wtf>=0.14.2, <0.15',
'funcsigs==1.0.0',
'future>=0.16.0, <0.17',
'gitpython>=2.0.2',
'gunicorn>=19.4.0, <20.0',
'iso8601>=0.1.12',
'json-merge-patch==0.2',
'jinja2>=2.7.3, <=2.10.0',
'markdown>=2.5.2, <3.0',
'pandas>=0.17.1, <1.0.0',
'pendulum==1.4.4',
'psutil>=4.2.0, <6.0.0',
'pygments>=2.0.1, <3.0',
'python-daemon>=2.1.1, <2.2',
'python-dateutil>=2.3, <3',
'python-nvd3==0.15.0',
'requests>=2.5.1, <3',
'setproctitle>=1.1.8, <2',
'sqlalchemy>=1.1.15, <1.3.0',
'tabulate>=0.7.5, <=0.8.2',
'tenacity==4.8.0',
'thrift>=0.9.2',
'tzlocal>=1.4',
'unicodecsv>=0.14.1',
'werkzeug>=0.14.1, <0.15.0',
'zope.deprecation>=4.0, <5.0',
],
setup_requires=[
'docutils>=0.14, <1.0',
],
extras_require={
'all': devel_all,
'devel_ci': devel_ci,
'all_dbs': all_dbs,
'atlas': atlas,
'async': async_packages,
'azure_blob_storage': azure_blob_storage,
'azure_data_lake': azure_data_lake,
'azure_cosmos': azure_cosmos,
'azure_container_instances': azure_container_instances,
'cassandra': cassandra,
'celery': celery,
'cgroups': cgroups,
'cloudant': cloudant,
'crypto': crypto,
'dask': dask,
'databricks': databricks,
'datadog': datadog,
'devel': devel_minreq,
'devel_hadoop': devel_hadoop,
'devel_azure': devel_azure,
'doc': doc,
'docker': docker,
'druid': druid,
'elasticsearch': elasticsearch,
'emr': emr,
'gcp_api': gcp_api,
'github_enterprise': github_enterprise,
'google_auth': google_auth,
'hdfs': hdfs,
'hive': hive,
'jdbc': jdbc,
'jira': jira,
'kerberos': kerberos,
'kubernetes': kubernetes,
'ldap': ldap,
'mongo': mongo,
'mssql': mssql,
'mysql': mysql,
'oracle': oracle,
'password': password,
'pinot': pinot,
'postgres': postgres,
'qds': qds,
'rabbitmq': rabbitmq,
'redis': redis,
's3': s3,
'salesforce': salesforce,
'samba': samba,
'sendgrid': sendgrid,
'segment': segment,
'slack': slack,
'snowflake': snowflake,
'ssh': ssh,
'statsd': statsd,
'vertica': vertica,
'webhdfs': webhdfs,
'winrm': winrm
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Topic :: System :: Monitoring',
],
author='Apache Software Foundation',
author_email='[email protected]',
url='http://airflow.apache.org/',
download_url=(
'https://dist.apache.org/repos/dist/release/airflow/' + version),
cmdclass={
'test': Tox,
'extra_clean': CleanCommand,
'compile_assets': CompileAssets
},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*',
)
|
58,879 |
def tensor_train_cross(input_tensor, rank, tol=1e-4, n_iter_max=100, random_state=None):
"""TT (tensor-train) decomposition via cross-approximation (TTcross) [1]
Decomposes `input_tensor` into a sequence of order-3 tensors of given rank. (factors/cores)
Rather than directly decompose the whole tensor, we sample fibers based on skeleton decomposition.
We initialize a random tensor-train and sweep from left to right and right to left.
On each core, we shape the core as a matrix and choose the fibers indices by finding maximum-volume submatrix and update the core.
* Advantage: faster
The main advantage of TTcross is that it doesn't need to evaluate all the entries of the tensor.
For a tensor_shape^tensor_order tensor, SVD needs O(tensor_shape^tensor_order) runtime, but TTcross' runtime is linear in tensor_shape and tensor_order, which makes it feasible in high dimension.
* Disadvantage: less accurate
TTcross may underestimate the error, since it only evaluates partial entries of the tensor.
Besides, in contrast to its practical fast performance, there is no theoretical guarantee of it convergence.
Parameters
----------
input_tensor : tensorly.tensor
The tensor to decompose.
rank : {int, int list}
maximum allowable TT rank of the factors
if int, then this is the same for all the factors
if int list, then rank[k] is the rank of the kth factor
tol : float
accuracy threshold for outer while-loop
n_iter_max : int
maximum iterations of outer while-loop (the 'crosses' or 'sweeps' sampled)
random_state : {None, int, np.random.RandomState}
Returns
-------
factors : TT factors
order-3 tensors of the TT decomposition
Examples
--------
Generate a 5^3 tensor, and decompose it into tensor-train of 3 factors, with rank = [1,3,3,1]
>>> tensor = tl.tensor(np.arange(5**3).reshape(5,5,5))
>>> rank = [1, 3, 3, 1]
>>> factors = tensor_train_cross(tensor, rank)
>>> # print the first core:
>>> print(factors[0])
[[[ 24. 0. 4.]
[ 49. 25. 29.]
[ 74. 50. 54.]
[ 99. 75. 79.]
[124. 100. 104.]]]
Notes
-----
Pseudo-code [2]:
1. Initialization tensor_order cores and column indices
2. while (error > tol)
3. update the tensor-train from left to right:
.. code:: python
for Core 1 to Core tensor_order:
approximate the skeleton-decomposition by QR and maxvol
4. update the tensor-train from right to left:
.. code:: python
for Core tensor_order to Core 1
approximate the skeleton-decomposition by QR and maxvol
5. end while
Acknowledgement: the main body of the code is modified based on TensorToolbox by Daniele Bigoni.
References
----------
.. [1] Ivan Oseledets and Eugene Tyrtyshnikov. Tt-cross approximation for multidimensional arrays.
LinearAlgebra and its Applications, 432(1):70–88, 2010.
.. [2] Sergey Dolgov and Robert Scheichl. A hybrid alternating least squares–tt cross algorithm for parametricpdes.
arXiv preprint arXiv:1707.04562, 2017.
"""
# Check user input for errors
tensor_shape = tl.shape(input_tensor)
tensor_order = tl.ndim(input_tensor)
if isinstance(rank, int):
rank = [rank] * (tensor_order + 1)
elif tensor_order + 1 != len(rank):
message = f"Provided incorrect number of ranks. Should verify len(rank) == tl.ndim(tensor)+1, but {len(rank) = } while tl.ndim(tensor) + 1 = {tensor_order}"
raise (ValueError(message))
# Make sure iter's not a tuple but a list
rank = list(rank)
# Initialize rank
if rank[0] != 1:
print(
f"Provided {rank[0] = } but boundary conditions dictate rank[0] == rank[-1] == 1: setting rank[0] to 1."
)
rank[0] = 1
if rank[-1] != 1:
print(
f"Provided {rank[-11] = } but boundary conditions dictate rank[0] == rank[-1] == 1: setting rank[-1] to 1."
)
# list col_idx: column indices (right indices) for skeleton-decomposition: indicate which columns used in each core.
# list row_idx: row indices (left indices) for skeleton-decomposition: indicate which rows used in each core.
# Initialize indice: random selection of column indices
rng = tl.check_random_state(random_state)
col_idx = [None] * tensor_order
for k_col_idx in range(tensor_order - 1):
col_idx[k_col_idx] = []
for i in range(rank[k_col_idx + 1]):
newidx = tuple(
[
rng.randint(tensor_shape[j])
for j in range(k_col_idx + 1, tensor_order)
]
)
while newidx in col_idx[k_col_idx]:
newidx = tuple(
[
rng.randint(tensor_shape[j])
for j in range(k_col_idx + 1, tensor_order)
]
)
col_idx[k_col_idx].append(newidx)
# Initialize the cores of tensor-train
factor_old = [
tl.zeros((rank[k], tensor_shape[k], rank[k + 1]), **tl.context(input_tensor))
for k in range(tensor_order)
]
factor_new = [
tl.tensor(
rng.random_sample((rank[k], tensor_shape[k], rank[k + 1])),
**tl.context(input_tensor)
)
for k in range(tensor_order)
]
iter = 0
error = tl.norm(tt_to_tensor(factor_old) - tt_to_tensor(factor_new), 2)
threshold = tol * tl.norm(tt_to_tensor(factor_new), 2)
for iter in range(n_iter_max):
if error < threshold:
break
factor_old = factor_new
factor_new = [None for i in range(tensor_order)]
######################################
# left-to-right step
left_to_right_fiberlist = []
# list row_idx: list of (tensor_order-1) of lists of left indices
row_idx = [[()]]
for k in range(tensor_order - 1):
(next_row_idx, fibers_list) = left_right_ttcross_step(
input_tensor, k, rank, row_idx, col_idx
)
# update row indices
left_to_right_fiberlist.extend(fibers_list)
row_idx.append(next_row_idx)
# end left-to-right step
###############################################
###############################################
# right-to-left step
right_to_left_fiberlist = []
# list col_idx: list (tensor_order-1) of lists of right indices
col_idx = [None] * tensor_order
col_idx[-1] = [()]
for k in range(tensor_order, 1, -1):
(next_col_idx, fibers_list, Q_skeleton) = right_left_ttcross_step(
input_tensor, k, rank, row_idx, col_idx
)
# update col indices
right_to_left_fiberlist.extend(fibers_list)
col_idx[k - 2] = next_col_idx
# Compute cores
try:
factor_new[k - 1] = tl.transpose(Q_skeleton)
factor_new[k - 1] = tl.reshape(
factor_new[k - 1], (rank[k - 1], tensor_shape[k - 1], rank[k])
)
except:
# The rank should not be larger than the input tensor's size
raise (
ValueError(
"The rank is too large compared to the size of the tensor. Try with small rank."
)
)
# Add the last core
idx = (slice(None, None, None),) + tuple(zip(*col_idx[0]))
core = input_tensor[idx]
core = tl.reshape(core, (tensor_shape[0], 1, rank[1]))
core = tl.transpose(core, (1, 0, 2))
factor_new[0] = core
# end right-to-left step
################################################
# check the error for while-loop
error = tl.norm(tt_to_tensor(factor_old) - tt_to_tensor(factor_new), 2)
threshold = tol * tl.norm(tt_to_tensor(factor_new), 2)
# check convergence
if iter >= n_iter_max:
raise ValueError("Maximum number of iterations reached.")
if tl.norm(tt_to_tensor(factor_old) - tt_to_tensor(factor_new), 2) > tol * tl.norm(
tt_to_tensor(factor_new), 2
):
raise ValueError("Low Rank Approximation algorithm did not converge.")
return factor_new
|
def tensor_train_cross(input_tensor, rank, tol=1e-4, n_iter_max=100, random_state=None):
"""TT (tensor-train) decomposition via cross-approximation (TTcross) [1]
Decomposes `input_tensor` into a sequence of order-3 tensors of given rank. (factors/cores)
Rather than directly decompose the whole tensor, we sample fibers based on skeleton decomposition.
We initialize a random tensor-train and sweep from left to right and right to left.
On each core, we shape the core as a matrix and choose the fibers indices by finding maximum-volume submatrix and update the core.
* Advantage: faster
The main advantage of TTcross is that it doesn't need to evaluate all the entries of the tensor.
For a tensor_shape^tensor_order tensor, SVD needs O(tensor_shape^tensor_order) runtime, but TTcross' runtime is linear in tensor_shape and tensor_order, which makes it feasible in high dimension.
* Disadvantage: less accurate
TTcross may underestimate the error, since it only evaluates partial entries of the tensor.
Besides, in contrast to its practical fast performance, there is no theoretical guarantee of it convergence.
Parameters
----------
input_tensor : tensorly.tensor
The tensor to decompose.
rank : {int, int list}
maximum allowable TT rank of the factors
if int, then this is the same for all the factors
if int list, then rank[k] is the rank of the kth factor
tol : float
accuracy threshold for outer while-loop
n_iter_max : int
maximum iterations of outer while-loop (the 'crosses' or 'sweeps' sampled)
random_state : {None, int, np.random.RandomState}
Returns
-------
factors : TT factors
order-3 tensors of the TT decomposition
Examples
--------
Generate a 5^3 tensor, and decompose it into tensor-train of 3 factors, with rank = [1,3,3,1]
>>> tensor = tl.tensor(np.arange(5**3).reshape(5,5,5))
>>> rank = [1, 3, 3, 1]
>>> factors = tensor_train_cross(tensor, rank)
>>> # print the first core:
>>> print(factors[0])
[[[ 24. 0. 4.]
[ 49. 25. 29.]
[ 74. 50. 54.]
[ 99. 75. 79.]
[124. 100. 104.]]]
Notes
-----
Pseudo-code [2]:
1. Initialization tensor_order cores and column indices
2. while (error > tol)
3. update the tensor-train from left to right:
.. code:: python
for Core 1 to Core tensor_order:
approximate the skeleton-decomposition by QR and maxvol
4. update the tensor-train from right to left:
.. code:: python
for Core tensor_order to Core 1
approximate the skeleton-decomposition by QR and maxvol
5. end while
Acknowledgement: the main body of the code is modified based on TensorToolbox by Daniele Bigoni.
References
----------
.. [1] Ivan Oseledets and Eugene Tyrtyshnikov. Tt-cross approximation for multidimensional arrays.
LinearAlgebra and its Applications, 432(1):70–88, 2010.
.. [2] Sergey Dolgov and Robert Scheichl. A hybrid alternating least squares–tt cross algorithm for parametricpdes.
arXiv preprint arXiv:1707.04562, 2017.
"""
# Check user input for errors
tensor_shape = tl.shape(input_tensor)
tensor_order = tl.ndim(input_tensor)
if isinstance(rank, int):
rank = [rank] * (tensor_order + 1)
elif tensor_order + 1 != len(rank):
message = f"Provided incorrect number of ranks. Should verify len(rank) == tl.ndim(tensor)+1, but {len(rank) = } while tl.ndim(tensor) + 1 = {tensor_order}"
raise (ValueError(message))
# Make sure iter's not a tuple but a list
rank = list(rank)
# Initialize rank
if rank[0] != 1:
print(
f"Provided rank[0] = {rank[0]} but boundary conditions dictate "
+ "rank[0] == rank[-1] == 1: setting rank[0] to 1."
)
rank[0] = 1
if rank[-1] != 1:
print(
f"Provided {rank[-11] = } but boundary conditions dictate rank[0] == rank[-1] == 1: setting rank[-1] to 1."
)
# list col_idx: column indices (right indices) for skeleton-decomposition: indicate which columns used in each core.
# list row_idx: row indices (left indices) for skeleton-decomposition: indicate which rows used in each core.
# Initialize indice: random selection of column indices
rng = tl.check_random_state(random_state)
col_idx = [None] * tensor_order
for k_col_idx in range(tensor_order - 1):
col_idx[k_col_idx] = []
for i in range(rank[k_col_idx + 1]):
newidx = tuple(
[
rng.randint(tensor_shape[j])
for j in range(k_col_idx + 1, tensor_order)
]
)
while newidx in col_idx[k_col_idx]:
newidx = tuple(
[
rng.randint(tensor_shape[j])
for j in range(k_col_idx + 1, tensor_order)
]
)
col_idx[k_col_idx].append(newidx)
# Initialize the cores of tensor-train
factor_old = [
tl.zeros((rank[k], tensor_shape[k], rank[k + 1]), **tl.context(input_tensor))
for k in range(tensor_order)
]
factor_new = [
tl.tensor(
rng.random_sample((rank[k], tensor_shape[k], rank[k + 1])),
**tl.context(input_tensor)
)
for k in range(tensor_order)
]
iter = 0
error = tl.norm(tt_to_tensor(factor_old) - tt_to_tensor(factor_new), 2)
threshold = tol * tl.norm(tt_to_tensor(factor_new), 2)
for iter in range(n_iter_max):
if error < threshold:
break
factor_old = factor_new
factor_new = [None for i in range(tensor_order)]
######################################
# left-to-right step
left_to_right_fiberlist = []
# list row_idx: list of (tensor_order-1) of lists of left indices
row_idx = [[()]]
for k in range(tensor_order - 1):
(next_row_idx, fibers_list) = left_right_ttcross_step(
input_tensor, k, rank, row_idx, col_idx
)
# update row indices
left_to_right_fiberlist.extend(fibers_list)
row_idx.append(next_row_idx)
# end left-to-right step
###############################################
###############################################
# right-to-left step
right_to_left_fiberlist = []
# list col_idx: list (tensor_order-1) of lists of right indices
col_idx = [None] * tensor_order
col_idx[-1] = [()]
for k in range(tensor_order, 1, -1):
(next_col_idx, fibers_list, Q_skeleton) = right_left_ttcross_step(
input_tensor, k, rank, row_idx, col_idx
)
# update col indices
right_to_left_fiberlist.extend(fibers_list)
col_idx[k - 2] = next_col_idx
# Compute cores
try:
factor_new[k - 1] = tl.transpose(Q_skeleton)
factor_new[k - 1] = tl.reshape(
factor_new[k - 1], (rank[k - 1], tensor_shape[k - 1], rank[k])
)
except:
# The rank should not be larger than the input tensor's size
raise (
ValueError(
"The rank is too large compared to the size of the tensor. Try with small rank."
)
)
# Add the last core
idx = (slice(None, None, None),) + tuple(zip(*col_idx[0]))
core = input_tensor[idx]
core = tl.reshape(core, (tensor_shape[0], 1, rank[1]))
core = tl.transpose(core, (1, 0, 2))
factor_new[0] = core
# end right-to-left step
################################################
# check the error for while-loop
error = tl.norm(tt_to_tensor(factor_old) - tt_to_tensor(factor_new), 2)
threshold = tol * tl.norm(tt_to_tensor(factor_new), 2)
# check convergence
if iter >= n_iter_max:
raise ValueError("Maximum number of iterations reached.")
if tl.norm(tt_to_tensor(factor_old) - tt_to_tensor(factor_new), 2) > tol * tl.norm(
tt_to_tensor(factor_new), 2
):
raise ValueError("Low Rank Approximation algorithm did not converge.")
return factor_new
|
55,516 |
def _assign_row_partitions_to_actors(
actors: List,
row_partitions,
data_for_aligning=None,
is_predict=False,
):
"""Assign row_partitions to actors.
In case of `is_predict` == False, `row_partitions` will be
assigned to actors according to their IPs. If distribution isn't
evenly, partitions will be moved from actor with excess of parts
to actor with lack of parts.
In case of `is_predict` == True, `row_partitions` will be
assigned evenly to actors in order.
Parameters
----------
actors : list
List of used actors.
row_partitions : list
Row partitions of data to assign.
data_for_aligning : dict, optional. Default is None
Data according to the order of which should be
distributed `row_partitions`. Used to align y with X.
is_predict : boolean, optional. Default is False
Is split data for predict or not.
Returns
-------
dict
Dictionary of assigned to actors partitions
as {actor_rank: (partitions, order)}.
"""
num_actors = len(actors)
if not is_predict:
if data_for_aligning is None:
parts_ips_ref, parts_ref = zip(*row_partitions)
# Group actors which are one the same ip
actor_ips = defaultdict(list)
for rank, ip in enumerate(
ray.get([actor.get_ip.remote() for actor in actors])
):
actor_ips[ip].append(rank)
# Get distribution of parts between nodes ({ip:[(part, position),..],..})
init_parts_distribution = defaultdict(list)
for idx, (ip, part_ref) in enumerate(
zip(ray.get(list(parts_ips_ref)), parts_ref)
):
init_parts_distribution[ip].append((part_ref, idx))
num_parts = len(parts_ref)
min_parts_per_actor = math.floor(num_parts / num_actors)
max_parts_per_actor = math.ceil(num_parts / num_actors)
num_actors_with_max_parts = num_parts % num_actors
row_partitions_by_actors = defaultdict(list)
# Fill actors without movement parts between ips
for actor_ip, ranks in actor_ips.items():
# Loop across actors which are placed on actor_ip
for rank in ranks:
num_parts_on_ip = len(init_parts_distribution[actor_ip])
# Check that have something to distribute on this ip
if num_parts_on_ip == 0:
break
# Check that node with `actor_ip` has enough parts for minimal
# filling actor with `rank`
if num_parts_on_ip >= min_parts_per_actor:
# Check that node has enough parts for max filling
# actor with `rank`
if (
num_parts_on_ip >= max_parts_per_actor
and num_actors_with_max_parts > 0
):
pop_slice = slice(0, max_parts_per_actor)
num_actors_with_max_parts -= 1
else:
pop_slice = slice(0, min_parts_per_actor)
row_partitions_by_actors[rank].extend(
init_parts_distribution[actor_ip][pop_slice]
)
# Delete parts which we already assign
del init_parts_distribution[actor_ip][pop_slice]
else:
row_partitions_by_actors[rank].extend(
init_parts_distribution[actor_ip]
)
init_parts_distribution[actor_ip] = []
# Remove empty IPs
for ip in list(init_parts_distribution):
if len(init_parts_distribution[ip]) == 0:
init_parts_distribution.pop(ip)
# IP's aren't necessary now
init_parts_distribution = [
pair for pairs in init_parts_distribution.values() for pair in pairs
]
# Fill the actors with extra parts (movements data between nodes)
for rank in range(len(actors)):
num_parts_on_rank = len(row_partitions_by_actors[rank])
if num_parts_on_rank == max_parts_per_actor or (
num_parts_on_rank == min_parts_per_actor
and num_actors_with_max_parts == 0
):
continue
if num_actors_with_max_parts > 0:
pop_slice = slice(0, max_parts_per_actor - num_parts_on_rank)
num_actors_with_max_parts -= 1
else:
pop_slice = slice(0, min_parts_per_actor - num_parts_on_rank)
row_partitions_by_actors[rank].extend(
init_parts_distribution[pop_slice]
)
del init_parts_distribution[pop_slice]
if len(init_parts_distribution) != 0:
raise RuntimeError(
f"Not all partitions were ditributed between actors: {len(init_parts_distribution)} left."
)
row_parts_by_ranks = dict()
for rank, pairs_part_pos in dict(row_partitions_by_actors).items():
parts, order = zip(*pairs_part_pos)
row_parts_by_ranks[rank] = (list(parts), list(order))
else:
row_parts_by_ranks = {rank: ([], []) for rank in range(len(actors))}
for rank, (_, order_of_indexes) in data_for_aligning.items():
row_parts_by_ranks[rank][1].extend(order_of_indexes)
for row_idx in order_of_indexes:
row_parts_by_ranks[rank][0].append(row_partitions[row_idx])
else:
row_parts_by_ranks = defaultdict(list)
_, parts_ref = zip(*row_partitions)
num_parts = len(parts_ref)
min_parts_per_actor = math.floor(num_parts / num_actors)
max_parts_per_actor = math.ceil(num_parts / num_actors)
num_actors_with_max_parts = num_parts % num_actors
start_idx = 0
for rank, actor in enumerate(actors):
if num_actors_with_max_parts > 0:
num_actor_parts = max_parts_per_actor
num_actors_with_max_parts -= 1
else:
num_actor_parts = min_parts_per_actor
idx_slice = slice(start_idx, start_idx + num_actor_parts)
row_parts_by_ranks[rank].extend(parts_ref[idx_slice])
start_idx += num_actor_parts
return row_parts_by_ranks
|
def _assign_row_partitions_to_actors(
actors: List,
row_partitions,
data_for_aligning=None,
is_predict=False,
):
"""Assign row_partitions to actors.
In case of `is_predict` == False, `row_partitions` will be
assigned to actors according to their IPs. If distribution isn't
even, partitions will be moved from actor with excess partitions
to actor with lack of them.
In case of `is_predict` == True, `row_partitions` will be
assigned evenly to actors in order.
Parameters
----------
actors : list
List of used actors.
row_partitions : list
Row partitions of data to assign.
data_for_aligning : dict, optional. Default is None
Data according to the order of which should be
distributed `row_partitions`. Used to align y with X.
is_predict : boolean, optional. Default is False
Is split data for predict or not.
Returns
-------
dict
Dictionary of assigned to actors partitions
as {actor_rank: (partitions, order)}.
"""
num_actors = len(actors)
if not is_predict:
if data_for_aligning is None:
parts_ips_ref, parts_ref = zip(*row_partitions)
# Group actors which are one the same ip
actor_ips = defaultdict(list)
for rank, ip in enumerate(
ray.get([actor.get_ip.remote() for actor in actors])
):
actor_ips[ip].append(rank)
# Get distribution of parts between nodes ({ip:[(part, position),..],..})
init_parts_distribution = defaultdict(list)
for idx, (ip, part_ref) in enumerate(
zip(ray.get(list(parts_ips_ref)), parts_ref)
):
init_parts_distribution[ip].append((part_ref, idx))
num_parts = len(parts_ref)
min_parts_per_actor = math.floor(num_parts / num_actors)
max_parts_per_actor = math.ceil(num_parts / num_actors)
num_actors_with_max_parts = num_parts % num_actors
row_partitions_by_actors = defaultdict(list)
# Fill actors without movement parts between ips
for actor_ip, ranks in actor_ips.items():
# Loop across actors which are placed on actor_ip
for rank in ranks:
num_parts_on_ip = len(init_parts_distribution[actor_ip])
# Check that have something to distribute on this ip
if num_parts_on_ip == 0:
break
# Check that node with `actor_ip` has enough parts for minimal
# filling actor with `rank`
if num_parts_on_ip >= min_parts_per_actor:
# Check that node has enough parts for max filling
# actor with `rank`
if (
num_parts_on_ip >= max_parts_per_actor
and num_actors_with_max_parts > 0
):
pop_slice = slice(0, max_parts_per_actor)
num_actors_with_max_parts -= 1
else:
pop_slice = slice(0, min_parts_per_actor)
row_partitions_by_actors[rank].extend(
init_parts_distribution[actor_ip][pop_slice]
)
# Delete parts which we already assign
del init_parts_distribution[actor_ip][pop_slice]
else:
row_partitions_by_actors[rank].extend(
init_parts_distribution[actor_ip]
)
init_parts_distribution[actor_ip] = []
# Remove empty IPs
for ip in list(init_parts_distribution):
if len(init_parts_distribution[ip]) == 0:
init_parts_distribution.pop(ip)
# IP's aren't necessary now
init_parts_distribution = [
pair for pairs in init_parts_distribution.values() for pair in pairs
]
# Fill the actors with extra parts (movements data between nodes)
for rank in range(len(actors)):
num_parts_on_rank = len(row_partitions_by_actors[rank])
if num_parts_on_rank == max_parts_per_actor or (
num_parts_on_rank == min_parts_per_actor
and num_actors_with_max_parts == 0
):
continue
if num_actors_with_max_parts > 0:
pop_slice = slice(0, max_parts_per_actor - num_parts_on_rank)
num_actors_with_max_parts -= 1
else:
pop_slice = slice(0, min_parts_per_actor - num_parts_on_rank)
row_partitions_by_actors[rank].extend(
init_parts_distribution[pop_slice]
)
del init_parts_distribution[pop_slice]
if len(init_parts_distribution) != 0:
raise RuntimeError(
f"Not all partitions were ditributed between actors: {len(init_parts_distribution)} left."
)
row_parts_by_ranks = dict()
for rank, pairs_part_pos in dict(row_partitions_by_actors).items():
parts, order = zip(*pairs_part_pos)
row_parts_by_ranks[rank] = (list(parts), list(order))
else:
row_parts_by_ranks = {rank: ([], []) for rank in range(len(actors))}
for rank, (_, order_of_indexes) in data_for_aligning.items():
row_parts_by_ranks[rank][1].extend(order_of_indexes)
for row_idx in order_of_indexes:
row_parts_by_ranks[rank][0].append(row_partitions[row_idx])
else:
row_parts_by_ranks = defaultdict(list)
_, parts_ref = zip(*row_partitions)
num_parts = len(parts_ref)
min_parts_per_actor = math.floor(num_parts / num_actors)
max_parts_per_actor = math.ceil(num_parts / num_actors)
num_actors_with_max_parts = num_parts % num_actors
start_idx = 0
for rank, actor in enumerate(actors):
if num_actors_with_max_parts > 0:
num_actor_parts = max_parts_per_actor
num_actors_with_max_parts -= 1
else:
num_actor_parts = min_parts_per_actor
idx_slice = slice(start_idx, start_idx + num_actor_parts)
row_parts_by_ranks[rank].extend(parts_ref[idx_slice])
start_idx += num_actor_parts
return row_parts_by_ranks
|
27,057 |
def render_template_string(
template_string: str,
context: Dict[str, Any],
autoescape: bool = True,
keep_trailing_newline: bool = False,
) -> str:
"""
Renders template based on it's name. Reads the template from <name> filein current dir.
:param template_string: string of the template to use
:param context: Jinja2 context
:param autoescape: Whether to autoescape HTML
:param keep_trailing_newline: Whether to keep the newline in rendered output
:return: rendered template
"""
import jinja2
template = jinja2.Environment(
loader=BaseLoader(),
undefined=jinja2.StrictUndefined,
autoescape=autoescape,
keep_trailing_newline=keep_trailing_newline,
).from_string(template_string)
content: str = template.render(context)
return content
|
def render_template_string(
template_string: str,
context: Dict[str, Any],
autoescape: bool = True,
keep_trailing_newline: bool = False,
) -> str:
"""
Renders template based on its name. Reads the template from <name> file in the current dir.
:param template_string: string of the template to use
:param context: Jinja2 context
:param autoescape: Whether to autoescape HTML
:param keep_trailing_newline: Whether to keep the newline in rendered output
:return: rendered template
"""
import jinja2
template = jinja2.Environment(
loader=BaseLoader(),
undefined=jinja2.StrictUndefined,
autoescape=autoescape,
keep_trailing_newline=keep_trailing_newline,
).from_string(template_string)
content: str = template.render(context)
return content
|
45,921 |
def sample_is_valid_for_homography(points1: torch.Tensor, points2: torch.Tensor) -> torch.Tensor:
'''Function, which implements oriented contraint check from :cite:`Marquez-Neila2015`
Analogous to https://github.com/opencv/opencv/blob/4.x/modules/calib3d/src/usac/degeneracy.cpp#L88
Args:
points1: A set of points in the first image with a tensor shape :math:`(B, 4, 2)`.
points2: A set of points in the second image with a tensor shape :math:`(B, 4, 2)`.
Returns:
mask: if the minimal sample is good for homography estimation:math:`(B, 3, 3)`.
'''
if points1.shape != points2.shape:
raise AssertionError(points1.shape)
if not (len(points1.shape) >= 1 and points1.shape[-1] == 2):
raise AssertionError(points1.shape)
if points1.shape[1] != 4:
raise AssertionError(points1.shape)
device = points1.device
idx_perm = torch.tensor([[0, 1, 2],
[0, 1, 3],
[0, 2, 3],
[1, 2, 3]], dtype=torch.long, device=device)
points_src_h = convert_points_to_homogeneous(points1)
points_dst_h = convert_points_to_homogeneous(points2)
src_perm = points_src_h[:, idx_perm]
dst_perm = points_dst_h[:, idx_perm]
left_sign = (torch.cross(src_perm[..., 1:2, :],
src_perm[..., 2:3, :]) @ src_perm[..., 0:1, :].permute(0, 1, 3, 2)).sign()
right_sign = (torch.cross(dst_perm[..., 1:2, :],
dst_perm[..., 2:3, :]) @ dst_perm[..., 0:1, :].permute(0, 1, 3, 2)).sign()
sample_is_valid = (left_sign == right_sign).view(-1, 4).min(dim=1)[0]
return sample_is_valid
|
def sample_is_valid_for_homography(points1: torch.Tensor, points2: torch.Tensor) -> torch.Tensor:
'''Function, which implements oriented contraint check from :cite:`Marquez-Neila2015`
Analogous to https://github.com/opencv/opencv/blob/4.x/modules/calib3d/src/usac/degeneracy.cpp#L88
Args:
points1: A set of points in the first image with a tensor shape :math:`(B, 4, 2)`.
points2: A set of points in the second image with a tensor shape :math:`(B, 4, 2)`.
Returns:
Mask with the minimal sample is good for homography estimation:math:`(B, 3, 3)`.
'''
if points1.shape != points2.shape:
raise AssertionError(points1.shape)
if not (len(points1.shape) >= 1 and points1.shape[-1] == 2):
raise AssertionError(points1.shape)
if points1.shape[1] != 4:
raise AssertionError(points1.shape)
device = points1.device
idx_perm = torch.tensor([[0, 1, 2],
[0, 1, 3],
[0, 2, 3],
[1, 2, 3]], dtype=torch.long, device=device)
points_src_h = convert_points_to_homogeneous(points1)
points_dst_h = convert_points_to_homogeneous(points2)
src_perm = points_src_h[:, idx_perm]
dst_perm = points_dst_h[:, idx_perm]
left_sign = (torch.cross(src_perm[..., 1:2, :],
src_perm[..., 2:3, :]) @ src_perm[..., 0:1, :].permute(0, 1, 3, 2)).sign()
right_sign = (torch.cross(dst_perm[..., 1:2, :],
dst_perm[..., 2:3, :]) @ dst_perm[..., 0:1, :].permute(0, 1, 3, 2)).sign()
sample_is_valid = (left_sign == right_sign).view(-1, 4).min(dim=1)[0]
return sample_is_valid
|
54,686 |
def _transform_eth_address(
ethereum: EthereumManager, given_address: str) -> ChecksumEvmAddress:
try:
address = to_checksum_address(given_address)
except ValueError:
# Validation will only let .eth names come here.
# So let's see if it resolves to anything
try:
resolved_address = ethereum.ens_lookup(given_address)
except (RemoteError, InputError) as e:
raise ValidationError(
f'Given ENS address {given_address} could not be resolved for Ethereum'
f' due to: {str(e)}',
field_name='address',
) from None
if resolved_address is None:
raise ValidationError(
f'Given ENS address {given_address} could not be resolved for Ethereum',
field_name='address',
) from None
address = to_checksum_address(resolved_address)
log.info(f'Resolved ENS {given_address} to {address}')
return address
|
def _transform_eth_address(
ethereum: EthereumManager,
given_address: str,
) -> ChecksumEvmAddress:
try:
address = to_checksum_address(given_address)
except ValueError:
# Validation will only let .eth names come here.
# So let's see if it resolves to anything
try:
resolved_address = ethereum.ens_lookup(given_address)
except (RemoteError, InputError) as e:
raise ValidationError(
f'Given ENS address {given_address} could not be resolved for Ethereum'
f' due to: {str(e)}',
field_name='address',
) from None
if resolved_address is None:
raise ValidationError(
f'Given ENS address {given_address} could not be resolved for Ethereum',
field_name='address',
) from None
address = to_checksum_address(resolved_address)
log.info(f'Resolved ENS {given_address} to {address}')
return address
|
21,832 |
def generate_worker_files(environ, config_path: str, data_dir: str):
"""Read the desired list of workers from environment variables and generate
shared homeserver, nginx and supervisord configs.
Args:
environ: _Environ[str]
config_path: Where to output the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
"""
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
# shared_config is the contents of a Synapse config file that will be shared amongst
# the main Synapse process as well as all workers.
# It is intended mainly for disabling functionality when certain workers are spun up,
# and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result.
listeners = [
{
"port": 9093,
"bind_address": "127.0.0.1",
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
with open(config_path) as file_stream:
original_config = yaml.safe_load(file_stream)
original_listeners = original_config.get("listeners")
if original_listeners:
listeners += original_listeners
# The shared homeserver config. The contents of which will be inserted into the
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
shared_config: dict = {"listeners": listeners}
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
#
# Supervisord will be in charge of running everything, from redis to nginx to Synapse
# and all of its worker processes. Load the config template, which defines a few
# services that are necessary to run.
supervisord_config = ""
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
# ports of each worker. For example:
# {
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams: Dict[str, Set[int]] = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a
# dict over a str is that we can easily deduplicate endpoints across multiple instances
# of the same worker.
#
# An nginx site config that will be amended to depending on the workers that are
# spun up. To be placed in /etc/nginx/conf.d.
nginx_locations = {}
# Read the desired worker configuration from the environment
worker_types = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types is None:
# No workers, just the main process
worker_types = []
else:
# Split type names by comma
worker_types = worker_types.split(",")
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
# Start worker ports from this arbitrary port
worker_port = 18009
# A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just
# worker_type + instance #
worker_type_counter: Dict[str, int] = {}
# For each worker type specified by the user, create config values
for worker_type in worker_types:
worker_type = worker_type.strip()
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
else:
log(worker_type + " is an unknown worker type! It will be ignored")
continue
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
# Name workers by their type concatenated with an incrementing number
# e.g. federation_reader1
worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
)
# Update the shared config with any worker-type specific options
shared_config.update(cast(dict, worker_config["shared_extra_config"]))
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
if worker_type_total_count > 1:
# Update the shared config with sharding-related options if necessary
add_sharding_to_shared_config(
shared_config, worker_type, worker_name, worker_port
)
# Enable the worker in supervisord
supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
# Add nginx location blocks for this worker's endpoints (if any are defined)
for pattern in worker_config["endpoint_patterns"]:
# Determine whether we need to load-balance this worker
if worker_type_total_count > 1:
# Create or add to a load-balanced upstream for this worker
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
# Upstreams are named after the worker_type
upstream = "http://" + worker_type
else:
upstream = "http://localhost:%d" % (worker_port,)
# Note that this endpoint should proxy to this upstream
nginx_locations[pattern] = upstream
# Write out the worker's logging config file
# Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
# Then a worker config file
convert(
"/conf/worker.yaml.j2",
"/conf/workers/{name}.yaml".format(name=worker_name),
**worker_config,
worker_log_config_filepath=log_config_filepath,
)
worker_port += 1
# Build the nginx location config blocks
nginx_location_config = ""
for endpoint, upstream in nginx_locations.items():
nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format(
endpoint=endpoint,
upstream=upstream,
)
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
# At the same time, prepare a list of internal endpoints to healthcheck
# starting with the main process which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = ""
for port in upstream_worker_ports:
body += " server localhost:%d;\n" % (port,)
healthcheck_urls.append("http://localhost:%d/health" % (port,))
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
upstream_worker_type=upstream_worker_type,
body=body,
)
# Finally, we'll write out the config files.
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
"/conf/workers/shared.yaml",
shared_worker_config=yaml.dump(shared_config),
)
# Nginx config
convert(
"/conf/nginx.conf.j2",
"/etc/nginx/conf.d/matrix-synapse.conf",
worker_locations=nginx_location_config,
upstream_directives=nginx_upstream_config,
)
# Supervisord config
convert(
"/conf/supervisord.conf.j2",
"/etc/supervisor/conf.d/supervisord.conf",
main_config_path=config_path,
worker_config=supervisord_config,
)
# healthcheck config
convert(
"/conf/healthcheck.sh.j2",
"/healthcheck.sh",
healthcheck_urls=healthcheck_urls,
)
# Ensure the logging directory exists
log_dir = data_dir + "/logs"
if not os.path.exists(log_dir):
os.mkdir(log_dir)
|
def generate_worker_files(environ, config_path: str, data_dir: str):
"""Read the desired list of workers from environment variables and generate
shared homeserver, nginx and supervisord configs.
Args:
environ: _Environ[str]
config_path: Where to output the generated Synapse main worker config file.
data_dir: The location of the synapse data directory. Where log and
user-facing config files live.
"""
# Note that yaml cares about indentation, so care should be taken to insert lines
# into files at the correct indentation below.
# shared_config is the contents of a Synapse config file that will be shared amongst
# the main Synapse process as well as all workers.
# It is intended mainly for disabling functionality when certain workers are spun up,
# and adding a replication listener.
# First read the original config file and extract the listeners block. Then we'll add
# another listener for replication. Later we'll write out the result.
listeners = [
{
"port": 9093,
"bind_address": "127.0.0.1",
"type": "http",
"resources": [{"names": ["replication"]}],
}
]
with open(config_path) as file_stream:
original_config = yaml.safe_load(file_stream)
original_listeners = original_config.get("listeners")
if original_listeners:
listeners += original_listeners
# The shared homeserver config. The contents of which will be inserted into the
# base shared worker jinja2 template.
#
# This config file will be passed to all workers, included Synapse's main process.
shared_config: Dict[str, Any] = {"listeners": listeners}
# The supervisord config. The contents of which will be inserted into the
# base supervisord jinja2 template.
#
# Supervisord will be in charge of running everything, from redis to nginx to Synapse
# and all of its worker processes. Load the config template, which defines a few
# services that are necessary to run.
supervisord_config = ""
# Upstreams for load-balancing purposes. This dict takes the form of a worker type to the
# ports of each worker. For example:
# {
# worker_type: {1234, 1235, ...}}
# }
# and will be used to construct 'upstream' nginx directives.
nginx_upstreams: Dict[str, Set[int]] = {}
# A map of: {"endpoint": "upstream"}, where "upstream" is a str representing what will be
# placed after the proxy_pass directive. The main benefit to representing this data as a
# dict over a str is that we can easily deduplicate endpoints across multiple instances
# of the same worker.
#
# An nginx site config that will be amended to depending on the workers that are
# spun up. To be placed in /etc/nginx/conf.d.
nginx_locations = {}
# Read the desired worker configuration from the environment
worker_types = environ.get("SYNAPSE_WORKER_TYPES")
if worker_types is None:
# No workers, just the main process
worker_types = []
else:
# Split type names by comma
worker_types = worker_types.split(",")
# Create the worker configuration directory if it doesn't already exist
os.makedirs("/conf/workers", exist_ok=True)
# Start worker ports from this arbitrary port
worker_port = 18009
# A counter of worker_type -> int. Used for determining the name for a given
# worker type when generating its config file, as each worker's name is just
# worker_type + instance #
worker_type_counter: Dict[str, int] = {}
# For each worker type specified by the user, create config values
for worker_type in worker_types:
worker_type = worker_type.strip()
worker_config = WORKERS_CONFIG.get(worker_type)
if worker_config:
worker_config = worker_config.copy()
else:
log(worker_type + " is an unknown worker type! It will be ignored")
continue
new_worker_count = worker_type_counter.setdefault(worker_type, 0) + 1
worker_type_counter[worker_type] = new_worker_count
# Name workers by their type concatenated with an incrementing number
# e.g. federation_reader1
worker_name = worker_type + str(new_worker_count)
worker_config.update(
{"name": worker_name, "port": str(worker_port), "config_path": config_path}
)
# Update the shared config with any worker-type specific options
shared_config.update(cast(dict, worker_config["shared_extra_config"]))
# Check if more than one instance of this worker type has been specified
worker_type_total_count = worker_types.count(worker_type)
if worker_type_total_count > 1:
# Update the shared config with sharding-related options if necessary
add_sharding_to_shared_config(
shared_config, worker_type, worker_name, worker_port
)
# Enable the worker in supervisord
supervisord_config += SUPERVISORD_PROCESS_CONFIG_BLOCK.format_map(worker_config)
# Add nginx location blocks for this worker's endpoints (if any are defined)
for pattern in worker_config["endpoint_patterns"]:
# Determine whether we need to load-balance this worker
if worker_type_total_count > 1:
# Create or add to a load-balanced upstream for this worker
nginx_upstreams.setdefault(worker_type, set()).add(worker_port)
# Upstreams are named after the worker_type
upstream = "http://" + worker_type
else:
upstream = "http://localhost:%d" % (worker_port,)
# Note that this endpoint should proxy to this upstream
nginx_locations[pattern] = upstream
# Write out the worker's logging config file
# Check whether we should write worker logs to disk, in addition to the console
extra_log_template_args = {}
if environ.get("SYNAPSE_WORKERS_WRITE_LOGS_TO_DISK"):
extra_log_template_args["LOG_FILE_PATH"] = "{dir}/logs/{name}.log".format(
dir=data_dir, name=worker_name
)
# Render and write the file
log_config_filepath = "/conf/workers/{name}.log.config".format(name=worker_name)
convert(
"/conf/log.config",
log_config_filepath,
worker_name=worker_name,
**extra_log_template_args,
)
# Then a worker config file
convert(
"/conf/worker.yaml.j2",
"/conf/workers/{name}.yaml".format(name=worker_name),
**worker_config,
worker_log_config_filepath=log_config_filepath,
)
worker_port += 1
# Build the nginx location config blocks
nginx_location_config = ""
for endpoint, upstream in nginx_locations.items():
nginx_location_config += NGINX_LOCATION_CONFIG_BLOCK.format(
endpoint=endpoint,
upstream=upstream,
)
# Determine the load-balancing upstreams to configure
nginx_upstream_config = ""
# At the same time, prepare a list of internal endpoints to healthcheck
# starting with the main process which exists even if no workers do.
healthcheck_urls = ["http://localhost:8080/health"]
for upstream_worker_type, upstream_worker_ports in nginx_upstreams.items():
body = ""
for port in upstream_worker_ports:
body += " server localhost:%d;\n" % (port,)
healthcheck_urls.append("http://localhost:%d/health" % (port,))
# Add to the list of configured upstreams
nginx_upstream_config += NGINX_UPSTREAM_CONFIG_BLOCK.format(
upstream_worker_type=upstream_worker_type,
body=body,
)
# Finally, we'll write out the config files.
# Shared homeserver config
convert(
"/conf/shared.yaml.j2",
"/conf/workers/shared.yaml",
shared_worker_config=yaml.dump(shared_config),
)
# Nginx config
convert(
"/conf/nginx.conf.j2",
"/etc/nginx/conf.d/matrix-synapse.conf",
worker_locations=nginx_location_config,
upstream_directives=nginx_upstream_config,
)
# Supervisord config
convert(
"/conf/supervisord.conf.j2",
"/etc/supervisor/conf.d/supervisord.conf",
main_config_path=config_path,
worker_config=supervisord_config,
)
# healthcheck config
convert(
"/conf/healthcheck.sh.j2",
"/healthcheck.sh",
healthcheck_urls=healthcheck_urls,
)
# Ensure the logging directory exists
log_dir = data_dir + "/logs"
if not os.path.exists(log_dir):
os.mkdir(log_dir)
|
8,667 |
def enumerate_configs(config_dir, extension='.cfg'):
"""List configuration file from ``config_dir`` with ``extension``
:param str config_dir: path to the configuration directory
:param str extension: configuration file's extension (default to ``.cfg``)
:return: a list of configuration filename found in ``config_dir`` with
the correct ``extension``
:rtype: list
Example::
>>> from sopel import cli, config
>>> os.listdir(config.DEFAULT_HOMEDIR)
['config.cfg', 'extra.ini', 'module.cfg', 'README']
>>> cli.enumerate_configs(config.DEFAULT_HOMEDIR)
['config.cfg', 'module.cfg']
>>> cli.enumerate_configs(config.DEFAULT_HOMEDIR, '.ini')
['extra.ini']
"""
if not os.path.isdir(config_dir):
return
for item in os.listdir(config_dir):
if item.endswith(extension):
yield item
|
def enumerate_configs(config_dir, extension='.cfg'):
"""List configuration file from ``config_dir`` with ``extension``
:param str config_dir: path to the configuration directory
:param str extension: configuration file's extension (default to ``.cfg``)
:return: a list of configuration filenames found in ``config_dir`` with
the correct ``extension``
:rtype: list
Example::
>>> from sopel import cli, config
>>> os.listdir(config.DEFAULT_HOMEDIR)
['config.cfg', 'extra.ini', 'module.cfg', 'README']
>>> cli.enumerate_configs(config.DEFAULT_HOMEDIR)
['config.cfg', 'module.cfg']
>>> cli.enumerate_configs(config.DEFAULT_HOMEDIR, '.ini')
['extra.ini']
"""
if not os.path.isdir(config_dir):
return
for item in os.listdir(config_dir):
if item.endswith(extension):
yield item
|
15,444 |
def to_futurenow_level(level):
"""Convert the given Home Assistant light level (0-255) to FutureNow (0-100)."""
return int((level * 100) / 255 + 0.5)
|
def to_futurenow_level(level):
"""Convert the given Home Assistant light level (0-255) to FutureNow (0-100)."""
return round((level * 100) / 255)
|
31,581 |
def get_useragents_command(client, args):
ip_address = args.get('ipaddress')
page = int(args.get('page', 1))
params = {
"page": page
}
res = client.get_useragents(ip_address=ip_address, params=params)
records = res.get('records', [])
record_count = res.get('record_count', 0)
table_data = [{
"User Agent": x.get('user_agent'),
"OS Name": x.get('os', {}).get('name'),
"OS Platform": x.get('os', {}).get('platform'),
"OS Version": x.get('os', {}).get('version'),
"Browser Family": x.get('browser_family'),
"Last Seen": x.get('lastseen'),
"Device Type": x.get('device', {}).get('type'),
"Device Brand": x.get('device', {}).get('brand'),
"Device Model": x.get('device', {}).get('model'),
"Client Type": x.get('client', {}).get('type'),
"Client Name": x.get('client', {}).get('name'),
"Client Version": x.get('client', {}).get('version'),
"Client Engine": x.get('client', {}).get('engine'),
"Client Engine Verison": x.get('client', {}).get('engine_version'),
} for x in records]
md = tableToMarkdown(f"User Agents for {ip_address}:", table_data, [
'User Agent',
'OS Name',
'OS Platform',
'OS Version',
'Browser Family',
'Last Seen',
'Device Type',
'Device Brand',
'Device Model',
'Client Type',
'Client Name',
'Client Version',
'Client Engine',
'Client Engine Verison'
])
output_data = {
"ip": ip_address,
"useragents": records,
"useragent_records_count": record_count
}
command_results = CommandResults(
outputs_prefix=f"SecurityTrails.IP",
outputs_key_field="ip",
outputs=output_data,
readable_output=md
)
return_results(command_results)
|
def get_useragents_command(client, args):
ip_address = args.get('ipaddress')
page = int(args.get('page', 1))
params = {
"page": page
}
res = client.get_useragents(ip_address=ip_address, params=params)
records = res.get('records', [])
record_count = res.get('record_count', 0)
table_data = [{
"User Agent": x.get('user_agent'),
"OS Name": x.get('os', {}).get('name'),
"OS Platform": x.get('os', {}).get('platform'),
"OS Version": x.get('os', {}).get('version'),
"Browser Family": x.get('browser_family'),
"Last Seen": x.get('lastseen'),
"Device Type": x.get('device', {}).get('type'),
"Device Brand": x.get('device', {}).get('brand'),
"Device Model": x.get('device', {}).get('model'),
"Client Type": x.get('client', {}).get('type'),
"Client Name": x.get('client', {}).get('name'),
"Client Version": x.get('client', {}).get('version'),
"Client Engine": x.get('client', {}).get('engine'),
"Client Engine Verison": x.get('client', {}).get('engine_version'),
} for x in records]
md = tableToMarkdown(f"User Agents for {ip_address}:", table_data, [
'User Agent',
'OS Name',
'OS Platform',
'OS Version',
'Browser Family',
'Last Seen',
'Device Type',
'Device Brand',
'Device Model',
'Client Type',
'Client Name',
'Client Version',
'Client Engine',
'Client Engine Verison'
])
output_data = {
"ip": ip_address,
"useragents": records,
"useragent_records_count": record_count
}
command_results = CommandResults(
outputs_prefix="SecurityTrails.IP",
outputs_key_field="ip",
outputs=output_data,
readable_output=md
)
return_results(command_results)
|
54,160 |
def demo():
""" _test_mol
This demo tests the MOL learner on a file stream, which reads from
the music.csv file.
The test computes the performance of the MOL learner as well as
the time to create the structure and classify all the samples in
the file.
"""
# Setup logging
logging.basicConfig(format='%(message)s', level=logging.INFO)
# Setup the file stream
stream = FileStream("https://raw.githubusercontent.com/scikit-multiflow/streaming-datasets/"
"master/", 0, 6)
# Setup the classifier, by default it uses Logistic Regression
# classifier = MultiOutputLearner()
# classifier = MultiOutputLearner(base_estimator=SGDClassifier(n_iter=100))
classifier = MultiOutputLearner(base_estimator=Perceptron())
# Setup the pipeline
pipe = Pipeline([('classifier', classifier)])
pretrain_size = 150
logging.info('Pre training on %s samples', str(pretrain_size))
logging.info('Total %s samples', str(stream.n_samples))
X, y = stream.next_sample(pretrain_size)
# classifier.fit(X, y)
classes = stream.target_values
classes_flat = list(set([item for sublist in classes for item in sublist]))
pipe.partial_fit(X, y, classes=classes_flat)
count = 0
true_labels = []
predicts = []
init_time = timer()
logging.info('Evaluating...')
while stream.has_more_samples():
X, y = stream.next_sample()
# p = classifier.predict(X)
p = pipe.predict(X)
predicts.extend(p)
true_labels.extend(y)
count += 1
perf = hamming_score(true_labels, predicts)
logging.info('Evaluation time: %s s', str(timer() - init_time))
logging.info('Total samples analyzed: %s', str(count))
logging.info('The classifier\'s static Hamming score : %0.3f' % perf)
|
def demo():
""" _test_mol
This demo tests the MOL learner on a file stream, which reads from
the music.csv file.
The test computes the performance of the MOL learner as well as
the time to create the structure and classify all the samples in
the file.
"""
# Setup logging
logging.basicConfig(format='%(message)s', level=logging.INFO)
# Setup the file stream
stream = FileStream("https://raw.githubusercontent.com/scikit-multiflow/streaming-datasets/"
"master/music.csv", 0, 6)
# Setup the classifier, by default it uses Logistic Regression
# classifier = MultiOutputLearner()
# classifier = MultiOutputLearner(base_estimator=SGDClassifier(n_iter=100))
classifier = MultiOutputLearner(base_estimator=Perceptron())
# Setup the pipeline
pipe = Pipeline([('classifier', classifier)])
pretrain_size = 150
logging.info('Pre training on %s samples', str(pretrain_size))
logging.info('Total %s samples', str(stream.n_samples))
X, y = stream.next_sample(pretrain_size)
# classifier.fit(X, y)
classes = stream.target_values
classes_flat = list(set([item for sublist in classes for item in sublist]))
pipe.partial_fit(X, y, classes=classes_flat)
count = 0
true_labels = []
predicts = []
init_time = timer()
logging.info('Evaluating...')
while stream.has_more_samples():
X, y = stream.next_sample()
# p = classifier.predict(X)
p = pipe.predict(X)
predicts.extend(p)
true_labels.extend(y)
count += 1
perf = hamming_score(true_labels, predicts)
logging.info('Evaluation time: %s s', str(timer() - init_time))
logging.info('Total samples analyzed: %s', str(count))
logging.info('The classifier\'s static Hamming score : %0.3f' % perf)
|
4,884 |
def _deprecate_method_override(method, obj, **kwargs):
"""
Return ``obj.method`` with a deprecation if it was overridden, else None.
Parameters
----------
method
An unbound method, i.e. an expression of the form
``Class.method_name``. Remember that within the body of a method, one
can always use ``__class__`` to refer to the class that is currently
being defined.
obj
An object of the class where *method* is defined.
kwargs
Additional parameters passed to `warn_deprecated` to generate the
deprecation warning; must at least include the "since" key.
"""
name = method.__name__
bound_method = getattr(obj, name)
if bound_method != method.__get__(obj):
warn_deprecated(**{"name": name, "obj_type": "method", **kwargs})
return bound_method
return None
|
def _deprecate_method_override(method, obj, **kwargs):
"""
Return ``obj.method`` with a deprecation if it was overridden, else None.
Parameters
----------
method
An unbound method, i.e. an expression of the form
``Class.method_name``. Remember that within the body of a method, one
can always use ``__class__`` to refer to the class that is currently
being defined.
obj
An object of the class where *method* is defined.
**kwargs
Additional parameters passed to `warn_deprecated` to generate the
deprecation warning; must at least include the "since" key.
"""
name = method.__name__
bound_method = getattr(obj, name)
if bound_method != method.__get__(obj):
warn_deprecated(**{"name": name, "obj_type": "method", **kwargs})
return bound_method
return None
|
34,518 |
def test_binary_featurizer_correctly_encodes_state():
"""
Check that all the attributes are correctly featurized when they should and not featurized when shouldn't;
"""
f = BinarySingleStateFeaturizer()
f._default_feature_states[INTENT] = {"a": 0, "b": 1}
f._default_feature_states[ACTION_NAME] = {"c": 0, "d": 1, "action_listen": 2}
f._default_feature_states[SLOTS] = {"e_0": 0, "f_0": 1, "g_0": 2}
f._default_feature_states[ACTIVE_LOOP] = {"h": 0, "i": 1, "j": 2, "k": 3}
encoded = f.encode_state(
{
"user": {"intent": "a"},
"prev_action": {"action_name": "d"},
"active_loop": {"name": "i"},
"slots": {"g": (1.0,)},
},
interpreter=None,
)
# user input is ignored as prev action is not action_listen;
assert list(encoded.keys()) == [ACTION_NAME, ACTIVE_LOOP, SLOTS]
assert (
encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 1, 0]])
).nnz == 0
assert (
encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 1, 0, 0]])
).nnz == 0
assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])).nnz == 0
encoded = f.encode_state(
{
"user": {"intent": "a"},
"prev_action": {"action_name": "action_listen"},
"active_loop": {"name": "k"},
"slots": {"e": (1.0,)},
},
interpreter=None,
)
assert list(encoded.keys()) == [INTENT, ACTION_NAME, ACTIVE_LOOP, SLOTS]
assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[1, 0]])).nnz == 0
assert (
encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])
).nnz == 0
assert (
encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])
).nnz == 0
assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0
|
def test_binary_featurizer_correctly_encodes_state():
"""
Check that all the attributes are correctly featurized when they should and not featurized when shouldn't;
"""
f = SingleStateFeaturizer()
f._default_feature_states[INTENT] = {"a": 0, "b": 1}
f._default_feature_states[ACTION_NAME] = {"c": 0, "d": 1, "action_listen": 2}
f._default_feature_states[SLOTS] = {"e_0": 0, "f_0": 1, "g_0": 2}
f._default_feature_states[ACTIVE_LOOP] = {"h": 0, "i": 1, "j": 2, "k": 3}
encoded = f.encode_state(
{
"user": {"intent": "a"},
"prev_action": {"action_name": "d"},
"active_loop": {"name": "i"},
"slots": {"g": (1.0,)},
},
interpreter=None,
)
# user input is ignored as prev action is not action_listen;
assert list(encoded.keys()) == [ACTION_NAME, ACTIVE_LOOP, SLOTS]
assert (
encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 1, 0]])
).nnz == 0
assert (
encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 1, 0, 0]])
).nnz == 0
assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])).nnz == 0
encoded = f.encode_state(
{
"user": {"intent": "a"},
"prev_action": {"action_name": "action_listen"},
"active_loop": {"name": "k"},
"slots": {"e": (1.0,)},
},
interpreter=None,
)
assert list(encoded.keys()) == [INTENT, ACTION_NAME, ACTIVE_LOOP, SLOTS]
assert (encoded[INTENT][0].features != scipy.sparse.coo_matrix([[1, 0]])).nnz == 0
assert (
encoded[ACTION_NAME][0].features != scipy.sparse.coo_matrix([[0, 0, 1]])
).nnz == 0
assert (
encoded[ACTIVE_LOOP][0].features != scipy.sparse.coo_matrix([[0, 0, 0, 1]])
).nnz == 0
assert (encoded[SLOTS][0].features != scipy.sparse.coo_matrix([[1, 0, 0]])).nnz == 0
|
7,868 |
def test_get_atoms(res):
"""Tests evaluating single nuclide concentration."""
t, n = res.get_atoms("1", "Xe135")
t_ref = np.array([0.0, 1296000.0, 2592000.0, 3888000.0])
n_ref = np.array(
[6.67473282e+08, 3.76986925e+14, 3.68587383e+14, 3.91338675e+14])
np.testing.assert_allclose(t, t_ref)
np.testing.assert_allclose(n, n_ref)
# Check alternate units
volume = res[0].volume["1"]
t_days, n_cm3 = res.get_atoms("1", "Xe135", nuc_units="atoms/cm^3", time_units="d")
assert t_days == pytest.approx(t_ref / (60 * 60 * 24))
assert n_cm3 == pytest.approx(n_ref / volume)
_t, n_bcm = res.get_atoms("1", "Xe135", nuc_units="atoms/b/cm")
assert n_bcm == pytest.approx(n_cm3 * 1e-24)
|
def test_get_atoms(res):
"""Tests evaluating single nuclide concentration."""
t, n = res.get_atoms("1", "Xe135")
t_ref = np.array([0.0, 1296000.0, 2592000.0, 3888000.0])
n_ref = np.array(
[6.67473282e+08, 3.76986925e+14, 3.68587383e+14, 3.91338675e+14])
np.testing.assert_allclose(t, t_ref)
np.testing.assert_allclose(n, n_ref)
# Check alternate units
volume = res[0].volume["1"]
t_days, n_cm3 = res.get_atoms("1", "Xe135", nuc_units="atoms/cm^3", time_units="d")
assert t_days == pytest.approx(t_ref / (60 * 60 * 24))
assert n_cm3 == pytest.approx(n_ref / volume)
_t, n_bcm = res.get_atoms("1", "Xe135", nuc_units="atom/b-cm")
assert n_bcm == pytest.approx(n_cm3 * 1e-24)
|
58,009 |
def cyble_fetch_iocs(client, method, args):
"""
Call the client module to fetch IOCs using the input parameters
:param client: instace of client to communicate with server
:param method: Requests method to be used
:param args: parameters for fetching indicators
:return: indicators from server
"""
params = {
'token': args['token'],
'from': int(args['from']),
'limit': int(args['limit']),
'start_date': args['start_date'],
'end_date': args['end_date'],
'type': args['type'] if 'type' in args.keys() else "",
'keyword': args['keyword'] if 'keyword' in args.keys() else ""
}
ioc_url = r'/api/iocs'
result = client.get_iocs(method, ioc_url, params)
if result is not None:
return result
else:
return 'Failed to Fetch IOCs !!'
|
def cyble_fetch_iocs(client, method, args):
"""
Call the client module to fetch IOCs using the input parameters
:param client: instace of client to communicate with server
:param method: Requests method to be used
:param args: parameters for fetching indicators
:return: indicators from server
"""
params = {
'token': args.get('token', ''),
'from': arg_to_number(args.get('from')),
'limit': arg_to_number(args.get('limit', '50')),
'start_date': args.get('start_date'),
'end_date': args.get('end_date'),
'type': args.get('type') or '',
'type': args.get('keyword') or '',
}
ioc_url = r'/api/iocs'
result = client.get_iocs(method, ioc_url, params)
if result is not None:
return result
else:
return 'Failed to Fetch IOCs !!'
|
5,322 |
def get_health_checks_by_name(name, region=None, key=None, keyid=None, profile=None):
'''
Return detailed info about all the healthchecks with given name.
name
The name of the health check to lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.get_health_checks_by_name ANAME \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
ids = get_health_check_ids_by_name(name, region, key, keyid, profile) or []
return [get_health_check(x, region, key, keyid, profile) for x in ids]
|
def get_health_checks_by_name(name, region=None, key=None, keyid=None, profile=None):
'''
Return detailed info about all the healthchecks with given name.
name
The name of the health check to lookup.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
CLI Example:
.. code-block:: bash
salt myminion boto3_route53.get_health_checks_by_name ANAME \
profile='{"region": "us-east-1", "keyid": "A12345678AB", "key": "xblahblahblah"}'
'''
ids = get_health_check_ids_by_name(name, region, key, keyid, profile) or []
return [get_health_check(id, region, key, keyid, profile) for id in ids]
|
35,910 |
def get_new_validator_registry_delta_chain_tip(current_validator_registry_delta_chain_tip: Hash32,
validator_index: ValidatorIndex,
pubkey: BLSPubkey,
flag: int) -> Hash32:
"""
Compute the next hash in the validator registry delta hash chain.
"""
# TODO: switch to SSZ tree hashing
return ValidatorRegistryDeltaBlock(
latest_registry_delta_root=current_validator_registry_delta_chain_tip,
validator_index=validator_index,
pubkey=pubkey,
flag=flag,
).root
|
def get_new_validator_registry_delta_chain_tip(current_validator_registry_delta_chain_tip: Hash32,
validator_index: ValidatorIndex,
pubkey: BLSPubkey,
flag: ValidatorRegistryDeltaFlag) -> Hash32:
"""
Compute the next hash in the validator registry delta hash chain.
"""
# TODO: switch to SSZ tree hashing
return ValidatorRegistryDeltaBlock(
latest_registry_delta_root=current_validator_registry_delta_chain_tip,
validator_index=validator_index,
pubkey=pubkey,
flag=flag,
).root
|
2,968 |
def nargsort(items, kind="quicksort", ascending: bool = True, na_position="last"):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
items = extract_array(items)
mask = np.asarray(isna(items))
if is_extension_array_dtype(items):
items = items._values_for_argsort()
else:
items = np.asanyarray(items)
idx = np.arange(len(items))
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == "last":
indexer = np.concatenate([indexer, nan_idx])
elif na_position == "first":
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError(f"invalid na_position: {na_position}")
return indexer
|
def nargsort(items, kind="quicksort", ascending: bool = True, na_position="last"):
"""
This is intended to be a drop-in replacement for np.argsort which
handles NaNs. It adds ascending and na_position parameters.
GH #6399, #5231
"""
items = extract_array(items)
mask = np.asarray(isna(items))
if is_extension_array_dtype(items):
items = items._values_for_argsort()
else:
items = np.asanyarray(items)
idx = np.arange(len(items))
non_nans = items[~mask]
non_nan_idx = idx[~mask]
nan_idx = np.nonzero(mask)[0]
if not ascending:
non_nans = non_nans[::-1]
non_nan_idx = non_nan_idx[::-1]
indexer = non_nan_idx[non_nans.argsort(kind=kind)]
if not ascending:
indexer = indexer[::-1]
# Finally, place the NaNs at the end or the beginning according to
# na_position
if na_position == "last":
indexer = np.concatenate([indexer, nan_idx])
elif na_position == "first":
indexer = np.concatenate([nan_idx, indexer])
else:
raise ValueError(f"invalid na_position: {repr(na_position)}")
return indexer
|
30,935 |
def get_feature_zip_file_path(feature_branch_name, job_num, zip_name):
"""Merge zip files and remove the unnecessary files.
Args:
feature_branch_name (str): The name of the feature branch.
job_num (str): Last successful create instance job of the feature branch.
zip_name (str): The zip we want to download (all_content or content_new).
"""
current_feature_zip_file_path = f'content/{feature_branch_name}/{job_num}/0/{zip_name}.zip'
zip_destination_path = f'{ARTIFACTS_PATH}feature_{zip_name}_zip'
feature_zip_file_path = download_zip_file_from_gcp(current_feature_zip_file_path, zip_destination_path)
return feature_zip_file_path, zip_destination_path
|
def get_feature_zip_file_path(feature_branch_name, job_num, zip_name):
"""Merge zip files and remove the unnecessary files.
Args:
feature_branch_name (str): The name of the feature branch.
job_num (str): Last successful create instance job of the feature branch.
zip_name (str): The zip we want to download (all_content or content_new).
"""
current_feature_branch_zip_file_path = f'content/{feature_branch_name}/{job_num}/0/{zip_name}.zip'
zip_destination_path = f'{ARTIFACTS_PATH}feature_{zip_name}_zip'
feature_zip_file_path = download_zip_file_from_gcp(current_feature_zip_file_path, zip_destination_path)
return feature_zip_file_path, zip_destination_path
|
31,384 |
def epoch_to_timestamp(epoch: int):
return datetime.utcfromtimestamp(epoch).strftime("%Y-%m-%d %H:%M:%S")
|
def epoch_to_timestamp(epoch: int) -> str:
return datetime.utcfromtimestamp(epoch).strftime("%Y-%m-%d %H:%M:%S")
|
23,171 |
def test_getitem_avoids_large_chunks_missing(chunks):
# We cannot apply the "avoid large chunks" optimization when
# the chunks have unknown sizes.
with dask.config.set(
{"array.chunk-size": "0.1Mb", "array.slicing.split-large-chunks": True}
):
a = np.arange(4 * 500 * 500).reshape(4, 500, 500)
arr = da.from_array(a, chunks=(1, 500, 500))
arr._chunks = ((1, 1, 1, 1), (np.nan,), (np.nan,))
indexer = [0, 1] + [2] * 100 + [3]
expected = a[indexer]
result = arr[indexer]
assert_eq(result, expected)
|
def test_getitem_avoids_large_chunks_missing():
# We cannot apply the "avoid large chunks" optimization when
# the chunks have unknown sizes.
with dask.config.set(
{"array.chunk-size": "0.1Mb", "array.slicing.split-large-chunks": True}
):
a = np.arange(4 * 500 * 500).reshape(4, 500, 500)
arr = da.from_array(a, chunks=(1, 500, 500))
arr._chunks = ((1, 1, 1, 1), (np.nan,), (np.nan,))
indexer = [0, 1] + [2] * 100 + [3]
expected = a[indexer]
result = arr[indexer]
assert_eq(result, expected)
|
10,490 |
def install_deb(m, debs, cache, force, no_remove, install_recommends, allow_unauthenticated, dpkg_options):
changed = False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file)
pkg_name = get_field_of_deb(m, deb_file, "Package")
pkg_version = get_field_of_deb(m, deb_file, "Version")
if len(apt_pkg.get_architectures()) > 1:
pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
pkg_key = "%s:%s" % (pkg_name, pkg_arch)
else:
pkg_key = pkg_name
try:
installed_pkg = apt.Cache()[pkg_key]
installed_version = installed_pkg.installed.version
if package_version_compare(pkg_version, installed_version) == 0:
# Does not need to down-/upgrade, move on to next package
continue
except Exception:
# Must not be installed, continue with installation
pass
# Check if package is installable
if not pkg.check() and not force:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception as e:
m.fail_json(msg="Unable to install package: %s" % to_native(e))
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if deps_to_install:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
no_remove=no_remove,
allow_unauthenticated=allow_unauthenticated,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if pkgs_to_install:
options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "diff" in retvals:
diff = retvals["diff"]
if 'prepared' in diff:
diff['prepared'] += '\n\n' + out
else:
diff = parse_diff(out)
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
|
def install_deb(m, debs, cache, force, fail_on_autoremove, install_recommends, allow_unauthenticated, dpkg_options):
changed = False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file)
pkg_name = get_field_of_deb(m, deb_file, "Package")
pkg_version = get_field_of_deb(m, deb_file, "Version")
if len(apt_pkg.get_architectures()) > 1:
pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
pkg_key = "%s:%s" % (pkg_name, pkg_arch)
else:
pkg_key = pkg_name
try:
installed_pkg = apt.Cache()[pkg_key]
installed_version = installed_pkg.installed.version
if package_version_compare(pkg_version, installed_version) == 0:
# Does not need to down-/upgrade, move on to next package
continue
except Exception:
# Must not be installed, continue with installation
pass
# Check if package is installable
if not pkg.check() and not force:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception as e:
m.fail_json(msg="Unable to install package: %s" % to_native(e))
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if deps_to_install:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
no_remove=no_remove,
allow_unauthenticated=allow_unauthenticated,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if pkgs_to_install:
options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "diff" in retvals:
diff = retvals["diff"]
if 'prepared' in diff:
diff['prepared'] += '\n\n' + out
else:
diff = parse_diff(out)
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
|
9,044 |
def rate(
user: int = 0,
channel: int = 0,
server: int = 0,
*, message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited.
:param user: seconds between permitted calls of this function by the same
user
:param channel: seconds between permitted calls of this function in the
same channel, regardless of triggering user
:param server: seconds between permitted calls of this function no matter
who triggered it or where
:param message: optional keyword argument; default message send as notice
when a rate limit is reached
How often a function can be triggered on a per-user basis, in a channel,
or across the server (bot) can be controlled with this decorator. A value
of ``0`` means no limit. If a function is given a rate of 20, that
function may only be used once every 20 seconds in the scope corresponding
to the parameter::
from sopel import plugin
@plugin.rate(10)
# won't trigger if used more than once per 10s by a user
@plugin.rate(10, 10)
# won't trigger if used more than once per 10s by a user/channel
@plugin.rate(10, 10, 2)
# won't trigger if used more than once per 10s by a user/channel
# and never more than once every 2s
If a ``message`` is provided, it will be used as the default message sent
as a ``NOTICE`` to the user who hit the rate limit::
@rate(10, 10, 10, message='Hit the rate limit for this function.')
# will send a NOTICE
Rate-limited functions that use scheduled future commands should import
:class:`threading.Timer` instead of :mod:`sched`, or rate limiting will
not work properly.
.. versionchanged:: 8.0
Optional keyword argument ``message`` was added in Sopel 8.
.. note::
Users on the admin list in Sopel’s configuration are exempted from rate
limits.
.. seealso::
You can control each rate limit separatly, with their own custom
message using :func:`rate_user`, :func:`rate_channel`, or
:func:`rate_server`.
"""
def add_attribute(function):
if not hasattr(function, 'rate'):
function.rate = user
if not hasattr(function, 'channel_rate'):
function.channel_rate = channel
if not hasattr(function, 'global_rate'):
function.global_rate = server
function.default_rate_message = message
return function
return add_attribute
|
def rate(
user: int = 0,
channel: int = 0,
server: int = 0,
*, message: typing.Optional[str] = None,
) -> typing.Callable:
"""Decorate a function to be rate-limited.
:param user: seconds between permitted calls of this function by the same
user
:param channel: seconds between permitted calls of this function in the
same channel, regardless of triggering user
:param server: seconds between permitted calls of this function no matter
who triggered it or where
:param message: optional keyword argument; default message sent as NOTICE
when a rate limit is reached
How often a function can be triggered on a per-user basis, in a channel,
or across the server (bot) can be controlled with this decorator. A value
of ``0`` means no limit. If a function is given a rate of 20, that
function may only be used once every 20 seconds in the scope corresponding
to the parameter::
from sopel import plugin
@plugin.rate(10)
# won't trigger if used more than once per 10s by a user
@plugin.rate(10, 10)
# won't trigger if used more than once per 10s by a user/channel
@plugin.rate(10, 10, 2)
# won't trigger if used more than once per 10s by a user/channel
# and never more than once every 2s
If a ``message`` is provided, it will be used as the default message sent
as a ``NOTICE`` to the user who hit the rate limit::
@rate(10, 10, 10, message='Hit the rate limit for this function.')
# will send a NOTICE
Rate-limited functions that use scheduled future commands should import
:class:`threading.Timer` instead of :mod:`sched`, or rate limiting will
not work properly.
.. versionchanged:: 8.0
Optional keyword argument ``message`` was added in Sopel 8.
.. note::
Users on the admin list in Sopel’s configuration are exempted from rate
limits.
.. seealso::
You can control each rate limit separatly, with their own custom
message using :func:`rate_user`, :func:`rate_channel`, or
:func:`rate_server`.
"""
def add_attribute(function):
if not hasattr(function, 'rate'):
function.rate = user
if not hasattr(function, 'channel_rate'):
function.channel_rate = channel
if not hasattr(function, 'global_rate'):
function.global_rate = server
function.default_rate_message = message
return function
return add_attribute
|
14,169 |
def _extra_link_args(lib_name=None, rpath=None):
"""
Add linker argument to load dependencies from the directory where vpi/vhpi/fli library is located
On osx use `install_name`.
Use `rpath` on all platforms
"""
args = []
if sys.platform == "darwin" and lib_name is not None:
args += ["-Wl,-install_name,@rpath/%s.so" % lib_name]
if rpath is not None:
args += ["-Wl,-rpath,%s" % rpath]
# allow 3bit cross compilation
if platform.architecture()[0] == "32bit":
args += ["-m32"]
return args
|
def _extra_link_args(lib_name=None, rpath=None):
"""
Add linker argument to load dependencies from the directory where vpi/vhpi/fli library is located
On osx use `install_name`.
Use `rpath` on all platforms
"""
args = []
if sys.platform == "darwin" and lib_name is not None:
args += ["-Wl,-install_name,@rpath/%s.so" % lib_name]
if rpath is not None:
args += ["-Wl,-rpath,%s" % rpath]
# allow 32bit cross compilation
if platform.architecture()[0] == "32bit":
args += ["-m32"]
return args
|
32,335 |
def from_bytes_to_text(mode: str, binary: bytes) -> str:
"""
Make a text from a binary.
:param mode: How to convert the binary to text.
:return: A text converted from the binary.
"""
if mode == 'text-based-protocol':
# Keep all the charactors used in text based protocols
# * The unicodedata category names of control code start with C
return ''.join(' '
if c == u'\ufffd'
or (c not in ('\n', '\r', '\t') and unicodedata.category(c)[0] == 'C')
else c
for c in binary.decode('utf-8', errors='replace'))
elif mode == 'human-readable':
return binary.decode('utf-8', errors='replace_with_space')
else:
raise ValueError(f'Unknown text conversion mode: {mode}')
|
def from_bytes_to_text(mode: str, binary: bytes) -> str:
"""
Make a text from a binary.
:param mode: How to convert the binary to text.
:return: A text converted from the binary.
"""
if mode == 'text-based-protocol':
# Keep all the characters used in text based protocols
# * The unicodedata category names of control code start with C
return ''.join(' '
if c == u'\ufffd'
or (c not in ('\n', '\r', '\t') and unicodedata.category(c)[0] == 'C')
else c
for c in binary.decode('utf-8', errors='replace'))
elif mode == 'human-readable':
return binary.decode('utf-8', errors='replace_with_space')
else:
raise ValueError(f'Unknown text conversion mode: {mode}')
|
3,394 |
def _get_redis_key(public_key, project_id, organization_id):
if organization_id:
return f"relayconfig-debounce:o:{organization_id}"
elif project_id:
return f"relayconfig-debounce:p:{project_id}"
elif public_key:
return f"relayconfig-debounce:k:{project_id}"
else:
raise ValueError()
|
def _get_redis_key(public_key, project_id, organization_id):
if organization_id:
return f"relayconfig-debounce:o:{organization_id}"
elif project_id:
return f"relayconfig-debounce:p:{project_id}"
elif public_key:
return f"relayconfig-debounce:k:{public_key}"
else:
raise ValueError()
|
17,619 |
def create_pep_json(peps: list[parser.PEP], out_dir: str) -> None:
pep_list = [
{
"number": pep.number,
"title": pep.title,
"authors": ", ".join([pep.authors.nick for pep.authors in pep.authors]),
"status": pep.status,
"type": pep.pep_type,
"url": f"https://peps.python.org/pep-{pep.number:0>4}/",
}
for pep in sorted(peps)
]
out_file = os.path.join(out_dir, "peps.json")
with open(out_file, "w", encoding="UTF-8") as f:
json.dump(pep_list, f, indent=0)
|
def create_pep_json(peps: list[parser.PEP], out_dir: str) -> None:
pep_list = [
{
"number": pep.number,
"title": pep.title,
"authors": ", ".join([pep.authors.nick for pep.authors in pep.authors]),
"status": pep.status,
"type": pep.pep_type,
"url": f"https://peps.python.org/pep-{pep.number:0>4}/",
}
for pep in sorted(peps)
]
Path(out_dir, "peps.json").write_text(json.dumps(pep_list, indent=0), encoding="utf-8")
|
28,248 |
def from_dict_to_current(dct: RunDescriberDicts) -> current.RunDescriber:
"""
Convert a dict into a RunDescriber of the current version
"""
dct_version = dct['version']
if dct_version == 0:
return current.RunDescriber._from_dict(cast(RunDescriberV0Dict, dct))
elif dct_version == 1:
return current.RunDescriber._from_dict(cast(RunDescriberV1Dict, dct))
elif dct_version >= 2:
return current.RunDescriber._from_dict(cast(RunDescriberV2Dict, dct))
elif dct_version == 3:
return current.RunDescriber._from_dict(cast(RunDescriberV3Dict, dct))
else:
raise RuntimeError(f"Unknown version of run describer dictionary, can't deserialize. The dictionary is {dct!r}")
|
def from_dict_to_current(dct: RunDescriberDicts) -> current.RunDescriber:
"""
Convert a dict into a RunDescriber of the current version
"""
dct_version = dct['version']
if dct_version == 0:
return current.RunDescriber._from_dict(cast(RunDescriberV0Dict, dct))
elif dct_version == 1:
return current.RunDescriber._from_dict(cast(RunDescriberV1Dict, dct))
elif dct_version >= 2:
return current.RunDescriber._from_dict(cast(RunDescriberV2Dict, dct))
elif dct_version >= 3:
return current.RunDescriber._from_dict(cast(RunDescriberV3Dict, dct))
else:
raise RuntimeError(f"Unknown version of run describer dictionary, can't deserialize. The dictionary is {dct!r}")
|
31,232 |
def feed_main(feed_name, params=None, prefix=''):
if not params:
params = assign_params(**demisto.params())
if 'feed_name' not in params:
params['feed_name'] = feed_name
feed_tags = argToList(demisto.params().get('feedTags'))
tlp_color = demisto.params().get('tlp_color')
client = Client(**params)
command = demisto.command()
if command != 'fetch-indicators':
demisto.info('Command being called is {}'.format(command))
if prefix and not prefix.endswith('-'):
prefix += '-'
# Switch case
commands: dict = {
'test-module': test_module,
f'{prefix}get-indicators': get_indicators_command
}
try:
if command == 'fetch-indicators':
indicators = fetch_indicators_command(client, feed_tags, tlp_color, params.get('indicator_type'),
params.get('auto_detect_type'))
# we submit the indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
args = demisto.args()
args['feed_name'] = feed_name
if feed_tags:
args['feedTags'] = feed_tags
if tlp_color:
args['tlp_color'] = tlp_color
readable_output, outputs, raw_response = commands[command](client, args)
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
err_msg = f'Error in {feed_name} integration [{e}]'
return_error(err_msg, error=e)
|
def feed_main(feed_name, params=None, prefix=''):
if not params:
params = assign_params(**demisto.params())
if 'feed_name' not in params:
params['feed_name'] = feed_name
feed_tags = argToList(demisto.params().get('feedTags'))
tlp_color = demisto.params().get('tlp_color')
client = Client(**params)
command = demisto.command()
if command != 'fetch-indicators':
demisto.info('Command being called is {}'.format(command))
if prefix and not prefix.endswith('-'):
prefix += '-'
# Switch case
commands: dict = {
'test-module': test_module,
f'{prefix}get-indicators': get_indicators_command
}
try:
if command == 'fetch-indicators':
indicators = fetch_indicators_command(client, feed_tags, tlp_color, params.get('indicator_type'),
params.get('auto_detect_type'))
# we submit the indicators in batches
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
args = demisto.args()
args['feed_name'] = feed_name
if feed_tags:
args['feedTags'] = feed_tags
if tlp_color:
args['tlp_color'] = tlp_color
readable_output, outputs, raw_response = commands[command](client, args)
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
err_msg = f'Error in {feed_name} integration:\n [{e}]'
return_error(err_msg, error=e)
|
13,760 |
def _get_expected_error_settings_dict():
"""
Returns a dict of dicts of expected error settings used for logging and monitoring.
The contents of the EXPECTED_ERRORS Django Setting list is processed for efficient lookup by module:class.
Returns:
(dict): dict of dicts, mapping module-and-class name to settings for proper handling of expected errors.
Keys of the inner dicts use the lowercase version of the related Django Setting (e.g. ''REASON_EXPECTED' =>
'reason_expected').
Example return value::
{
'rest_framework.exceptions:PermissionDenied': {
'is_ignored': True,
'log_error': True,
'log_stack_trace': True,
'reason_expected': 'In most cases, signifies a user was trying to do something they couldn't.' /
'It is possible that there could be a bug, so this case should still be monitored at some level.'
}
...
}
"""
global _EXPECTED_ERROR_SETTINGS_DICT
# Return cached processed mappings if already processed
if _EXPECTED_ERROR_SETTINGS_DICT is not None:
return _EXPECTED_ERROR_SETTINGS_DICT
expected_errors = getattr(settings, 'EXPECTED_ERRORS', None)
if expected_errors is None:
return None
# Use temporary variable to build mappings to avoid multi-threading issue with a partially
# processed map. Worst case, it is processed more than once at start-up.
expected_error_settings_dict = {}
try:
for index, expected_error in enumerate(expected_errors):
module_and_class = expected_error.get('MODULE_AND_CLASS')
processed_expected_error = {
'is_ignored': expected_error.get('IS_IGNORED', True),
'log_error': expected_error.get('LOG_ERROR', False),
'log_stack_trace': expected_error.get('LOG_STACK_TRACE', False),
'reason_expected': expected_error.get('REASON_EXPECTED'),
}
# validate configuration
if not isinstance(module_and_class, str) or ':' not in module_and_class:
log.error(
"Skipping EXPECTED_ERRORS[%d] setting. 'MODULE_AND_CLASS' set to [%s] and should be module:class, "
"like 'rest_framework.exceptions:PermissionDenied'.",
index, module_and_class
)
continue
if not processed_expected_error['reason_expected']:
log.error(
"Skipping EXPECTED_ERRORS[%d] setting. 'REASON_EXPECTED' is required to document why %s is an "
"expected error.",
index, module_and_class
)
continue
expected_error_settings_dict[module_and_class] = processed_expected_error
except Exception as e: # pylint: disable=broad-except
set_custom_attribute('expected_errors_setting_misconfigured', repr(e))
log.exception(f'Error processing setting EXPECTED_ERRORS. {repr(e)}')
_EXPECTED_ERROR_SETTINGS_DICT = expected_error_settings_dict
return _EXPECTED_ERROR_SETTINGS_DICT
|
def _get_expected_error_settings_dict():
"""
Returns a dict of dicts of expected error settings used for logging and monitoring.
The contents of the EXPECTED_ERRORS Django Setting list is processed for efficient lookup by module:class.
Returns:
(dict): dict of dicts, mapping module-and-class name to settings for proper handling of expected errors.
Keys of the inner dicts use the lowercase version of the related Django Setting (e.g. 'REASON_EXPECTED' =>
'reason_expected').
Example return value::
{
'rest_framework.exceptions:PermissionDenied': {
'is_ignored': True,
'log_error': True,
'log_stack_trace': True,
'reason_expected': 'In most cases, signifies a user was trying to do something they couldn't.' /
'It is possible that there could be a bug, so this case should still be monitored at some level.'
}
...
}
"""
global _EXPECTED_ERROR_SETTINGS_DICT
# Return cached processed mappings if already processed
if _EXPECTED_ERROR_SETTINGS_DICT is not None:
return _EXPECTED_ERROR_SETTINGS_DICT
expected_errors = getattr(settings, 'EXPECTED_ERRORS', None)
if expected_errors is None:
return None
# Use temporary variable to build mappings to avoid multi-threading issue with a partially
# processed map. Worst case, it is processed more than once at start-up.
expected_error_settings_dict = {}
try:
for index, expected_error in enumerate(expected_errors):
module_and_class = expected_error.get('MODULE_AND_CLASS')
processed_expected_error = {
'is_ignored': expected_error.get('IS_IGNORED', True),
'log_error': expected_error.get('LOG_ERROR', False),
'log_stack_trace': expected_error.get('LOG_STACK_TRACE', False),
'reason_expected': expected_error.get('REASON_EXPECTED'),
}
# validate configuration
if not isinstance(module_and_class, str) or ':' not in module_and_class:
log.error(
"Skipping EXPECTED_ERRORS[%d] setting. 'MODULE_AND_CLASS' set to [%s] and should be module:class, "
"like 'rest_framework.exceptions:PermissionDenied'.",
index, module_and_class
)
continue
if not processed_expected_error['reason_expected']:
log.error(
"Skipping EXPECTED_ERRORS[%d] setting. 'REASON_EXPECTED' is required to document why %s is an "
"expected error.",
index, module_and_class
)
continue
expected_error_settings_dict[module_and_class] = processed_expected_error
except Exception as e: # pylint: disable=broad-except
set_custom_attribute('expected_errors_setting_misconfigured', repr(e))
log.exception(f'Error processing setting EXPECTED_ERRORS. {repr(e)}')
_EXPECTED_ERROR_SETTINGS_DICT = expected_error_settings_dict
return _EXPECTED_ERROR_SETTINGS_DICT
|
54,855 |
def reduce_dict_data(frames, reducer, order=None):
"""Merge tensor dictionaries into one. Resolve conflict fields using reducer.
Parameters
----------
frames : list[dict[str, Tensor]]
Input tensor dictionaries
reducer : str or callable function
One of "sum", "max", "min", "mean", "stack" or a callable function.
If a callable function is provided, the input arguments is a list of tensors
from cross types, and the output of function must be a single tensor
order : list[Int], optional
Merge order hint. Useful for "stack" reducer.
If provided, each integer indicates the relative order
of the ``frames`` list. Frames are sorted according to this list
in ascending order. Tie is not handled so make sure the order values
are distinct.
Returns
-------
dict[str, Tensor]
Merged frame
"""
if len(frames) == 1 and reducer != 'stack':
# Directly return the only one input. Stack reducer requires
# modifying tensor shape.
return frames[0]
if callable(reducer):
merger = reducer
elif reducer == 'stack':
# Stack order does not matter. However, it must be consistent!
if order:
assert len(order) == len(frames)
sorted_with_key = sorted(zip(frames, order), key=lambda x: x[1])
frames = list(zip(*sorted_with_key))[0]
def merger(flist):
return F.stack(flist, 1)
else:
redfn = getattr(F, reducer, None)
if redfn is None:
raise DGLError('Invalid cross type reducer. Must be one of '
'"sum", "max", "min", "mean" or "stack".')
def merger(flist):
return redfn(F.stack(flist, 0), 0) if len(flist) > 1 else flist[0]
keys = set()
for frm in frames:
keys.update(frm.keys())
ret = {}
for k in keys:
flist = []
for frm in frames:
if k in frm:
flist.append(frm[k])
ret[k] = merger(flist)
return ret
|
def reduce_dict_data(frames, reducer, order=None):
"""Merge tensor dictionaries into one. Resolve conflict fields using reducer.
Parameters
----------
frames : list[dict[str, Tensor]]
Input tensor dictionaries
reducer : str or callable function
One of "sum", "max", "min", "mean", "stack" or a callable function.
If a callable function is provided, the input arguments must be a single list
of tensors containing aggregation results from each edge type, and the
output of function must be a single tensor.
order : list[Int], optional
Merge order hint. Useful for "stack" reducer.
If provided, each integer indicates the relative order
of the ``frames`` list. Frames are sorted according to this list
in ascending order. Tie is not handled so make sure the order values
are distinct.
Returns
-------
dict[str, Tensor]
Merged frame
"""
if len(frames) == 1 and reducer != 'stack':
# Directly return the only one input. Stack reducer requires
# modifying tensor shape.
return frames[0]
if callable(reducer):
merger = reducer
elif reducer == 'stack':
# Stack order does not matter. However, it must be consistent!
if order:
assert len(order) == len(frames)
sorted_with_key = sorted(zip(frames, order), key=lambda x: x[1])
frames = list(zip(*sorted_with_key))[0]
def merger(flist):
return F.stack(flist, 1)
else:
redfn = getattr(F, reducer, None)
if redfn is None:
raise DGLError('Invalid cross type reducer. Must be one of '
'"sum", "max", "min", "mean" or "stack".')
def merger(flist):
return redfn(F.stack(flist, 0), 0) if len(flist) > 1 else flist[0]
keys = set()
for frm in frames:
keys.update(frm.keys())
ret = {}
for k in keys:
flist = []
for frm in frames:
if k in frm:
flist.append(frm[k])
ret[k] = merger(flist)
return ret
|
17,692 |
def _run_with_exception_handler(cmdlineargs):
"""Execute the command and perform some reporting
normalization if it crashes, but otherwise just let it go"""
# otherwise - guard and only log the summary. Postmortem is not
# as convenient if being caught in this ultimate except
try:
return cmdlineargs.func(cmdlineargs)
# catch BaseException for KeyboardInterrupt
except BaseException as exc:
from datalad.support.exceptions import (
CapturedException,
InsufficientArgumentsError,
IncompleteResultsError,
CommandError,
)
ce = CapturedException(exc)
# we crashed, it has got to be non-zero for starters
exit_code = 1
if isinstance(exc, InsufficientArgumentsError):
# if the func reports inappropriate usage, give help output
lgr.error('%s (%s)', ce, exc.__class__.__name__)
cmdlineargs.subparser.print_usage(sys.stderr)
exit_code = 2
elif isinstance(exc, IncompleteResultsError):
# in general we do not want to see the error again, but
# present in debug output
lgr.debug('could not perform all requested actions: %s', ce)
elif isinstance(exc, CommandError):
exit_code = _communicate_commanderror(exc) or exit_code
elif isinstance(exc, KeyboardInterrupt):
from datalad.ui import ui
ui.error("\nInterrupted by user while doing magic: %s" % ce)
exit_code = 3
else:
# some unforeseen problem
lgr.error('%s (%s)', ce.message, exc.__class__.__name__)
sys.exit(exit_code)
|
def _run_with_exception_handler(cmdlineargs):
"""Execute the command and perform some reporting
normalization if it crashes, but otherwise just let it go"""
# otherwise - guard and only log the summary. Postmortem is not
# as convenient if being caught in this ultimate except
try:
return cmdlineargs.func(cmdlineargs)
# catch BaseException for KeyboardInterrupt
except BaseException as exc:
from datalad.support.exceptions import (
CapturedException,
InsufficientArgumentsError,
IncompleteResultsError,
CommandError,
)
ce = CapturedException(exc)
# we crashed, it has got to be non-zero for starters
exit_code = 1
if isinstance(exc, InsufficientArgumentsError):
# if the func reports inappropriate usage, give help output
lgr.error('%s (%s)', ce, exc.__class__.__name__)
cmdlineargs.subparser.print_usage(sys.stderr)
exit_code = 2
elif isinstance(exc, IncompleteResultsError):
# in general we do not want to see the error again, but
# present in debug output
lgr.debug('could not perform all requested actions: %s', ce)
elif isinstance(exc, CommandError):
exit_code = _communicate_commanderror(exc) or exit_code
elif isinstance(exc, KeyboardInterrupt):
from datalad.ui import ui
ui.error("\nInterrupted by user while doing magic: %s" % ce)
exit_code = 3
else:
# some unforeseen problem
lgr.error('%s (%s)', ce.message, ce.name)
sys.exit(exit_code)
|
42,343 |
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a python dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
def get_role_list(collection=None, playbook_dir=None, **kwargs):
'''
Run an ``ansible-doc`` command to get list of installed collection roles.
Only roles that have an argument specification defined are returned.
.. note:: Version added: 2.2
:param str collection: A fully qualified collection name used to filter the results.
:param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles.
:param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be
the work directory within container.
:param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param bool quiet: Disable all output
:param bool json_mode: Store event data in place of stdout on the console and in the stdout file
:param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation
(based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution.
:param bool process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param list container_options: List of container options to pass to execution engine.
:param str container_workdir: The working directory within the container.
:param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param str private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param str ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if
value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution.
Default value is 'False'
:returns: A tuple of response and error string. The response is a dictionary object
(as returned by ansible-doc JSON output) containing each role found, or an empty dict
if none are found.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_role_list_command(collection, playbook_dir)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response:
response = json.loads(sanitize_json_response(response))
return response, error
|
27,326 |
def _build_stub(method_name, method, attribute_name):
"""
Build a stub for a transplanted method.
A transplanted stub is a dummy method that get attached to a core class
(usually from :mod:`MDAnalysis.core.groups`) and raise a
:exc:`NoDataError`.
The stub mimics the original method for everything that as traits with the
documentation (docstring, name, signature). It gets overwritten by the
actual method when the later is transplanted at universe creation.
Parameters
----------
method_name: str
The name of the attribute in the destination class.
method: Callable
The method to be mimicked.
attribute_name: str
The name topology attribute that is required for the method to be
relevant (e.g. masses, charges, ...)
Returns
-------
The stub.
"""
def stub_method(self, *args, **kwargs):
message = (
f'{self.__class__.__name__}.{method_name}() '
f'not available; this requires {attribute_name}'
)
raise NoDataError(message)
annotation = textwrap.dedent("""\
.. note::
This requires the underlying topology to have {}. Otherwise, a
:exc:`~MDAnalysis.exceptions.NoDataError` is raised.
""".format(attribute_name))
# The first line of the original docstring is not indented, but the
# subsequent lines are. We want to dedent the whole docstring.
first_line, other_lines = method.__doc__.split('\n', 1)
stub_method.__doc__ = (
first_line + '\n'
+ textwrap.dedent(other_lines)
+ '\n\n' + annotation
)
stub_method.__name__ = method_name
stub_method.__signature__ = inspect_signature(method)
return stub_method
|
def _build_stub(method_name, method, attribute_name):
"""
Build a stub for a transplanted method.
A transplanted stub is a dummy method that get attached to a core class
(usually from :mod:`MDAnalysis.core.groups`) and raises a
:exc:`NoDataError`.
The stub mimics the original method for everything that as traits with the
documentation (docstring, name, signature). It gets overwritten by the
actual method when the later is transplanted at universe creation.
Parameters
----------
method_name: str
The name of the attribute in the destination class.
method: Callable
The method to be mimicked.
attribute_name: str
The name topology attribute that is required for the method to be
relevant (e.g. masses, charges, ...)
Returns
-------
The stub.
"""
def stub_method(self, *args, **kwargs):
message = (
f'{self.__class__.__name__}.{method_name}() '
f'not available; this requires {attribute_name}'
)
raise NoDataError(message)
annotation = textwrap.dedent("""\
.. note::
This requires the underlying topology to have {}. Otherwise, a
:exc:`~MDAnalysis.exceptions.NoDataError` is raised.
""".format(attribute_name))
# The first line of the original docstring is not indented, but the
# subsequent lines are. We want to dedent the whole docstring.
first_line, other_lines = method.__doc__.split('\n', 1)
stub_method.__doc__ = (
first_line + '\n'
+ textwrap.dedent(other_lines)
+ '\n\n' + annotation
)
stub_method.__name__ = method_name
stub_method.__signature__ = inspect_signature(method)
return stub_method
|
51,764 |
def matching_spec_from_env(spec):
"""
Returns a concrete spec, matching what is available in the environment.
If no matching spec is found in the environment, this will return the
given spec but concretized.
"""
env = spack.environment.get_env({}, cmd_name)
spec_from_env = None
if env:
spec_from_env = env.matching_spec(spec)
if spec_from_env:
spec = spec_from_env
else:
spec.concretize()
return spec
|
def matching_spec_from_env(spec):
"""
Returns a concrete spec, matching what is available in the environment.
If no matching spec is found in the environment, this will return the
given spec but concretized.
"""
env = spack.environment.get_env({}, cmd_name)
spec_from_env = None
if env:
return env.matching_spec(spec)
else:
return spec.concretized()
return spec
|
32,070 |
def print_debug_msg(msg, is_error):
"""
Prints a debug message with QRadarMsg prefix
"""
demisto.debug(f"QRadarMsg - {msg}")
|
def print_debug_msg(msg):
"""
Prints a debug message with QRadarMsg prefix
"""
demisto.debug(f"QRadarMsg - {msg}")
|
22,685 |
def prepare_and_parse_args(plugins, args, detect_defaults=False): # pylint: disable=too-many-statements
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
# pylint: disable=too-many-statements
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because in the "
"event of key loss or account compromise you will irrevocably "
"lose access to your account. You will also be unable to receive "
"notice about impending expiration or revocation of your "
"certificates. Updates to the Subscriber Agreement will still "
"affect you, and will be effective 14 days after posting an "
"update to the web site.")
# TODO: When `certbot register --update-registration` is fully deprecated,
# delete following helpful.add
helpful.add(
"register", "--update-registration", action="store_true",
default=flag_default("update_registration"), dest="update_registration",
help=argparse.SUPPRESS)
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
"automation", "--os-packages-only", action="store_true",
default=flag_default("os_packages_only"),
help="(certbot-auto only) install OS package dependencies and then stop")
helpful.add(
"automation", "--no-self-upgrade", action="store_true",
default=flag_default("no_self_upgrade"),
help="(certbot-auto only) prevent the certbot-auto script from"
" upgrading itself to newer released versions (default: Upgrade"
" automatically)")
helpful.add(
"automation", "--no-bootstrap", action="store_true",
default=flag_default("no_bootstrap"),
help="(certbot-auto only) prevent the certbot-auto script from"
" installing OS-level dependencies (default: Prompt to install "
" OS-wide dependencies, but exit if the user says 'No')")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors, and allow certbot-auto "
"execution on experimental platforms")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: Ask)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: Ask)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
helpful.add_deprecated_argument("--agree-dev-preview", 0)
helpful.add_deprecated_argument("--dialog", 0)
# Deprecation of tls-related cli flags
# TODO: remove theses flags completely in few releases
class _DeprecatedTLSSNIAction(util._ShowWarning): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
super(_DeprecatedTLSSNIAction, self).__call__(parser, namespace, values, option_string)
namespace.https_port = values
helpful.add(
["testing", "standalone", "apache", "nginx"], "--tls-sni-01-port",
type=int, action=_DeprecatedTLSSNIAction, help=argparse.SUPPRESS)
helpful.add_deprecated_argument("--tls-sni-01-address", 1)
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
|
def prepare_and_parse_args(plugins, args, detect_defaults=False): # pylint: disable=too-many-statements
"""Returns parsed command line arguments.
:param .PluginsRegistry plugins: available plugins
:param list args: command line arguments with the program name removed
:returns: parsed command line arguments
:rtype: argparse.Namespace
"""
# pylint: disable=too-many-statements
helpful = HelpfulArgumentParser(args, plugins, detect_defaults)
_add_all_groups(helpful)
# --help is automatically provided by argparse
helpful.add(
None, "-v", "--verbose", dest="verbose_count", action="count",
default=flag_default("verbose_count"), help="This flag can be used "
"multiple times to incrementally increase the verbosity of output, "
"e.g. -vvv.")
helpful.add(
None, "-t", "--text", dest="text_mode", action="store_true",
default=flag_default("text_mode"), help=argparse.SUPPRESS)
helpful.add(
None, "--max-log-backups", type=nonnegative_int,
default=flag_default("max_log_backups"),
help="Specifies the maximum number of backup logs that should "
"be kept by Certbot's built in log rotation. Setting this "
"flag to 0 disables log rotation entirely, causing "
"Certbot to always append to the same log file.")
helpful.add(
[None, "automation", "run", "certonly", "enhance"],
"-n", "--non-interactive", "--noninteractive",
dest="noninteractive_mode", action="store_true",
default=flag_default("noninteractive_mode"),
help="Run without ever asking for user input. This may require "
"additional command line flags; the client will try to explain "
"which ones are required if it finds one missing")
helpful.add(
[None, "register", "run", "certonly", "enhance"],
constants.FORCE_INTERACTIVE_FLAG, action="store_true",
default=flag_default("force_interactive"),
help="Force Certbot to be interactive even if it detects it's not "
"being run in a terminal. This flag cannot be used with the "
"renew subcommand.")
helpful.add(
[None, "run", "certonly", "certificates", "enhance"],
"-d", "--domains", "--domain", dest="domains",
metavar="DOMAIN", action=_DomainsAction,
default=flag_default("domains"),
help="Domain names to apply. For multiple domains you can use "
"multiple -d flags or enter a comma separated list of domains "
"as a parameter. The first domain provided will be the "
"subject CN of the certificate, and all domains will be "
"Subject Alternative Names on the certificate. "
"The first domain will also be used in "
"some software user interfaces and as the file paths for the "
"certificate and related material unless otherwise "
"specified or you already have a certificate with the same "
"name. In the case of a name collision it will append a number "
"like 0001 to the file path name. (default: Ask)")
helpful.add(
[None, "run", "certonly", "register"],
"--eab-kid", dest="eab_kid",
metavar="EAB_KID",
help="Key Identifier for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "register"],
"--eab-hmac-key", dest="eab_hmac_key",
metavar="EAB_HMAC_KEY",
help="HMAC key for External Account Binding"
)
helpful.add(
[None, "run", "certonly", "manage", "delete", "certificates",
"renew", "enhance"], "--cert-name", dest="certname",
metavar="CERTNAME", default=flag_default("certname"),
help="Certificate name to apply. This name is used by Certbot for housekeeping "
"and in file paths; it doesn't affect the content of the certificate itself. "
"To see certificate names, run 'certbot certificates'. "
"When creating a new certificate, specifies the new certificate's name. "
"(default: the first provided domain or the name of an existing "
"certificate on your system for the same domains)")
helpful.add(
[None, "testing", "renew", "certonly"],
"--dry-run", action="store_true", dest="dry_run",
default=flag_default("dry_run"),
help="Perform a test run of the client, obtaining test (invalid) certificates"
" but not saving them to disk. This can currently only be used"
" with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run"
" tries to avoid making any persistent changes on a system, it "
" is not completely side-effect free: if used with webserver authenticator plugins"
" like apache and nginx, it makes and then reverts temporary config changes"
" in order to obtain test certificates, and reloads webservers to deploy and then"
" roll back those changes. It also calls --pre-hook and --post-hook commands"
" if they are defined because they may be necessary to accurately simulate"
" renewal. --deploy-hook commands are not called.")
helpful.add(
["register", "automation"], "--register-unsafely-without-email", action="store_true",
default=flag_default("register_unsafely_without_email"),
help="Specifying this flag enables registering an account with no "
"email address. This is strongly discouraged, because in the "
"event of key loss or account compromise you will irrevocably "
"lose access to your account. You will also be unable to receive "
"notice about impending expiration or revocation of your "
"certificates. Updates to the Subscriber Agreement will still "
"affect you, and will be effective 14 days after posting an "
"update to the web site.")
# TODO: When `certbot register --update-registration` is fully deprecated,
# delete following helpful.add
helpful.add(
"register", "--update-registration", action="store_true",
default=flag_default("update_registration"), dest="update_registration",
help=argparse.SUPPRESS)
helpful.add(
["register", "update_account", "unregister", "automation"], "-m", "--email",
default=flag_default("email"),
help=config_help("email"))
helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true",
default=flag_default("eff_email"), dest="eff_email",
help="Share your e-mail address with EFF")
helpful.add(["register", "update_account", "automation"], "--no-eff-email",
action="store_false", default=flag_default("eff_email"), dest="eff_email",
help="Don't share your e-mail address with EFF")
helpful.add(
["automation", "certonly", "run"],
"--keep-until-expiring", "--keep", "--reinstall",
dest="reinstall", action="store_true", default=flag_default("reinstall"),
help="If the requested certificate matches an existing certificate, always keep the "
"existing one until it is due for renewal (for the "
"'run' subcommand this means reinstall the existing certificate). (default: Ask)")
helpful.add(
"automation", "--expand", action="store_true", default=flag_default("expand"),
help="If an existing certificate is a strict subset of the requested names, "
"always expand and replace it with the additional names. (default: Ask)")
helpful.add(
"automation", "--version", action="version",
version="%(prog)s {0}".format(certbot.__version__),
help="show program's version number and exit")
helpful.add(
["automation", "renew"],
"--force-renewal", "--renew-by-default", dest="renew_by_default",
action="store_true", default=flag_default("renew_by_default"),
help="If a certificate "
"already exists for the requested domains, renew it now, "
"regardless of whether it is near expiry. (Often "
"--keep-until-expiring is more appropriate). Also implies "
"--expand.")
helpful.add(
"automation", "--renew-with-new-domains", dest="renew_with_new_domains",
action="store_true", default=flag_default("renew_with_new_domains"),
help="If a "
"certificate already exists for the requested certificate name "
"but does not match the requested domains, renew it now, "
"regardless of whether it is near expiry.")
helpful.add(
"automation", "--reuse-key", dest="reuse_key",
action="store_true", default=flag_default("reuse_key"),
help="When renewing, use the same private key as the existing "
"certificate.")
helpful.add(
["automation", "renew", "certonly"],
"--allow-subset-of-names", action="store_true",
default=flag_default("allow_subset_of_names"),
help="When performing domain validation, do not consider it a failure "
"if authorizations can not be obtained for a strict subset of "
"the requested domains. This may be useful for allowing renewals for "
"multiple domains to succeed even if some domains no longer point "
"at this system. This option cannot be used with --csr.")
helpful.add(
"automation", "--agree-tos", dest="tos", action="store_true",
default=flag_default("tos"),
help="Agree to the ACME Subscriber Agreement (default: Ask)")
helpful.add(
["unregister", "automation"], "--account", metavar="ACCOUNT_ID",
default=flag_default("account"),
help="Account ID to use")
helpful.add(
"automation", "--duplicate", dest="duplicate", action="store_true",
default=flag_default("duplicate"),
help="Allow making a certificate lineage that duplicates an existing one "
"(both can be renewed in parallel)")
helpful.add(
"automation", "--os-packages-only", action="store_true",
default=flag_default("os_packages_only"),
help="(certbot-auto only) install OS package dependencies and then stop")
helpful.add(
"automation", "--no-self-upgrade", action="store_true",
default=flag_default("no_self_upgrade"),
help="(certbot-auto only) prevent the certbot-auto script from"
" upgrading itself to newer released versions (default: Upgrade"
" automatically)")
helpful.add(
"automation", "--no-bootstrap", action="store_true",
default=flag_default("no_bootstrap"),
help="(certbot-auto only) prevent the certbot-auto script from"
" installing OS-level dependencies (default: Prompt to install "
" OS-wide dependencies, but exit if the user says 'No')")
helpful.add(
["automation", "renew", "certonly", "run"],
"-q", "--quiet", dest="quiet", action="store_true",
default=flag_default("quiet"),
help="Silence all output except errors. Useful for automation via cron."
" Implies --non-interactive.")
# overwrites server, handled in HelpfulArgumentParser.parse_args()
helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging",
dest="staging", action="store_true", default=flag_default("staging"),
help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent"
" to --server " + constants.STAGING_URI)
helpful.add(
"testing", "--debug", action="store_true", default=flag_default("debug"),
help="Show tracebacks in case of errors, and allow certbot-auto "
"execution on experimental platforms")
helpful.add(
[None, "certonly", "run"], "--debug-challenges", action="store_true",
default=flag_default("debug_challenges"),
help="After setting up challenges, wait for user input before "
"submitting to CA")
helpful.add(
"testing", "--no-verify-ssl", action="store_true",
help=config_help("no_verify_ssl"),
default=flag_default("no_verify_ssl"))
helpful.add(
["testing", "standalone", "manual"], "--http-01-port", type=int,
dest="http01_port",
default=flag_default("http01_port"), help=config_help("http01_port"))
helpful.add(
["testing", "standalone"], "--http-01-address",
dest="http01_address",
default=flag_default("http01_address"), help=config_help("http01_address"))
helpful.add(
["testing", "nginx"], "--https-port", type=int,
default=flag_default("https_port"),
help=config_help("https_port"))
helpful.add(
"testing", "--break-my-certs", action="store_true",
default=flag_default("break_my_certs"),
help="Be willing to replace or renew valid certificates with invalid "
"(testing/staging) certificates")
helpful.add(
"security", "--rsa-key-size", type=int, metavar="N",
default=flag_default("rsa_key_size"), help=config_help("rsa_key_size"))
helpful.add(
"security", "--must-staple", action="store_true",
dest="must_staple", default=flag_default("must_staple"),
help=config_help("must_staple"))
helpful.add(
["security", "enhance"],
"--redirect", action="store_true", dest="redirect",
default=flag_default("redirect"),
help="Automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: Ask)")
helpful.add(
"security", "--no-redirect", action="store_false", dest="redirect",
default=flag_default("redirect"),
help="Do not automatically redirect all HTTP traffic to HTTPS for the newly "
"authenticated vhost. (default: Ask)")
helpful.add(
["security", "enhance"],
"--hsts", action="store_true", dest="hsts", default=flag_default("hsts"),
help="Add the Strict-Transport-Security header to every HTTP response."
" Forcing browser to always use SSL for the domain."
" Defends against SSL Stripping.")
helpful.add(
"security", "--no-hsts", action="store_false", dest="hsts",
default=flag_default("hsts"), help=argparse.SUPPRESS)
helpful.add(
["security", "enhance"],
"--uir", action="store_true", dest="uir", default=flag_default("uir"),
help='Add the "Content-Security-Policy: upgrade-insecure-requests"'
' header to every HTTP response. Forcing the browser to use'
' https:// for every http:// resource.')
helpful.add(
"security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"),
help=argparse.SUPPRESS)
helpful.add(
"security", "--staple-ocsp", action="store_true", dest="staple",
default=flag_default("staple"),
help="Enables OCSP Stapling. A valid OCSP response is stapled to"
" the certificate that the server offers during TLS.")
helpful.add(
"security", "--no-staple-ocsp", action="store_false", dest="staple",
default=flag_default("staple"), help=argparse.SUPPRESS)
helpful.add(
"security", "--strict-permissions", action="store_true",
default=flag_default("strict_permissions"),
help="Require that all configuration files are owned by the current "
"user; only needed if your config is somewhere unsafe like /tmp/")
helpful.add(
["manual", "standalone", "certonly", "renew"],
"--preferred-challenges", dest="pref_challs",
action=_PrefChallAction, default=flag_default("pref_challs"),
help='A sorted, comma delimited list of the preferred challenge to '
'use during authorization with the most preferred challenge '
'listed first (Eg, "dns" or "http,dns"). '
'Not all plugins support all challenges. See '
'https://certbot.eff.org/docs/using.html#plugins for details. '
'ACME Challenges are versioned, but if you pick "http" rather '
'than "http-01", Certbot will select the latest version '
'automatically.')
helpful.add(
"renew", "--pre-hook",
help="Command to be run in a shell before obtaining any certificates."
" Intended primarily for renewal, where it can be used to temporarily"
" shut down a webserver that might conflict with the standalone"
" plugin. This will only be called if a certificate is actually to be"
" obtained/renewed. When renewing several certificates that have"
" identical pre-hooks, only the first will be executed.")
helpful.add(
"renew", "--post-hook",
help="Command to be run in a shell after attempting to obtain/renew"
" certificates. Can be used to deploy renewed certificates, or to"
" restart any servers that were stopped by --pre-hook. This is only"
" run if an attempt was made to obtain/renew a certificate. If"
" multiple renewed certificates have identical post-hooks, only"
" one will be run.")
helpful.add("renew", "--renew-hook",
action=_RenewHookAction, help=argparse.SUPPRESS)
helpful.add(
"renew", "--no-random-sleep-on-renew", action="store_false",
default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew",
help=argparse.SUPPRESS)
helpful.add(
"renew", "--deploy-hook", action=_DeployHookAction,
help='Command to be run in a shell once for each successfully'
' issued certificate. For this command, the shell variable'
' $RENEWED_LINEAGE will point to the config live subdirectory'
' (for example, "/etc/letsencrypt/live/example.com") containing'
' the new certificates and keys; the shell variable'
' $RENEWED_DOMAINS will contain a space-delimited list of'
' renewed certificate domains (for example, "example.com'
' www.example.com"')
helpful.add(
"renew", "--disable-hook-validation",
action="store_false", dest="validate_hooks",
default=flag_default("validate_hooks"),
help="Ordinarily the commands specified for"
" --pre-hook/--post-hook/--deploy-hook will be checked for"
" validity, to see if the programs being run are in the $PATH,"
" so that mistakes can be caught early, even when the hooks"
" aren't being run just yet. The validation is rather"
" simplistic and fails if you use more advanced shell"
" constructs, so you can use this switch to disable it."
" (default: False)")
helpful.add(
"renew", "--no-directory-hooks", action="store_false",
default=flag_default("directory_hooks"), dest="directory_hooks",
help="Disable running executables found in Certbot's hook directories"
" during renewal. (default: False)")
helpful.add(
"renew", "--disable-renew-updates", action="store_true",
default=flag_default("disable_renew_updates"), dest="disable_renew_updates",
help="Disable automatic updates to your server configuration that"
" would otherwise be done by the selected installer plugin, and triggered"
" when the user executes \"certbot renew\", regardless of if the certificate"
" is renewed. This setting does not apply to important TLS configuration"
" updates.")
helpful.add(
"renew", "--no-autorenew", action="store_false",
default=flag_default("autorenew"), dest="autorenew",
help="Disable auto renewal of certificates.")
helpful.add_deprecated_argument("--agree-dev-preview", 0)
helpful.add_deprecated_argument("--dialog", 0)
# Deprecation of tls-sni-01 related cli flags
# TODO: remove theses flags completely in few releases
class _DeprecatedTLSSNIAction(util._ShowWarning): # pylint: disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
super(_DeprecatedTLSSNIAction, self).__call__(parser, namespace, values, option_string)
namespace.https_port = values
helpful.add(
["testing", "standalone", "apache", "nginx"], "--tls-sni-01-port",
type=int, action=_DeprecatedTLSSNIAction, help=argparse.SUPPRESS)
helpful.add_deprecated_argument("--tls-sni-01-address", 1)
# Populate the command line parameters for new style enhancements
enhancements.populate_cli(helpful.add)
_create_subparsers(helpful)
_paths_parser(helpful)
# _plugins_parsing should be the last thing to act upon the main
# parser (--help should display plugin-specific options last)
_plugins_parsing(helpful, plugins)
if not detect_defaults:
global helpful_parser # pylint: disable=global-statement
helpful_parser = helpful
return helpful.parse_args()
|
41,080 |
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--no-cupy', action='store_true', default=False,
help='Disable CUPY tests')
parser.add_argument('--torch-version', default='0.4.1', type=str,
help='Disable CUPY tests')
args = parser.parse_args(args)
# you should add the libraries which are not included in setup.py
MANUALLY_INSTALLED_LIBRARIES = [
('espnet', None),
('kaldiio', None),
('matplotlib', None),
('torch', ("0.4.1",
"1.0.0",
"1.0.1",
"1.0.1.post2",
"1.1.0",
"1.2.0",
"1.3.0",
"1.3.1",
"1.4.0")),
('chainer', ("6.0.0")),
('chainer_ctc', None),
('warprnnt_pytorch', ("0.1"))
]
if not args.no_cupy:
MANUALLY_INSTALLED_LIBRARIES.append(('cupy', ("6.0.0")))
if LooseVersion(args.torch_version) < LooseVersion('1.2.0'):
MANUALLY_INSTALLED_LIBRARIES.append(('warpctc_pytorch', ("0.1.1", "0.1.3")))
logging.basicConfig(
level=logging.INFO,
format="%(levelname)s: %(message)s")
logging.info("python version = " + sys.version)
library_list = MANUALLY_INSTALLED_LIBRARIES.copy()
# check library availableness
logging.info("library availableness check start.")
logging.info("# libraries to be checked = %d" % len(library_list))
is_correct_installed_list = []
for idx, (name, version) in enumerate(library_list):
try:
importlib.import_module(name)
logging.info("--> %s is installed." % name)
is_correct_installed_list.append(True)
if name=="torch":
if LooseVersion(importlib.import_module(name).__version__) > LooseVersion('1.2.0'):
try:
library_list.remove(('warpctc_pytorch', ("0.1.1", "0.1.3")))
MANUALLY_INSTALLED_LIBRARIES.remove(('warpctc_pytorch', ("0.1.1", "0.1.3")))
logging.info("Ignoring redundant check for warpctc_pytorch")
except ValueError:
pass
except ImportError:
logging.warning("--> %s is not installed." % name)
is_correct_installed_list.append(False)
logging.info("library availableness check done.")
logging.info("%d / %d libraries are correctly installed." % (
sum(is_correct_installed_list), len(library_list)))
if len(library_list) != sum(is_correct_installed_list):
logging.info("please try to setup again and then re-run this script.")
sys.exit(1)
# check library version
num_version_specified = sum([True if v is not None else False for n, v in library_list])
logging.info("library version check start.")
logging.info("# libraries to be checked = %d" % num_version_specified)
is_correct_version_list = []
for idx, (name, version) in enumerate(library_list):
if version is not None:
# Note: temp. fix for warprnnt_pytorch
# not found version with importlib
if name == "warprnnt_pytorch":
import pkg_resources
vers = pkg_resources.get_distribution(name).version
else:
vers = importlib.import_module(name).__version__
if vers != None:
is_correct = vers in version
if is_correct:
logging.info("--> %s version is matched." % name)
is_correct_version_list.append(True)
else:
logging.warning("--> %s version is not matched (%s is not in %s)." % (
name, vers, str(version)))
is_correct_version_list.append(False)
else:
logging.info("--> %s has no version info, but version is specified." % name)
logging.info("--> maybe it is better to reinstall the latest version.")
is_correct_version_list.append(False)
logging.info("library version check done.")
logging.info("%d / %d libraries are correct version." % (
sum(is_correct_version_list), num_version_specified))
if sum(is_correct_version_list) != num_version_specified:
logging.info("please try to setup again and then re-run this script.")
sys.exit(1)
# check cuda availableness
logging.info("cuda availableness check start.")
import chainer
import torch
try:
assert torch.cuda.is_available()
logging.info("--> cuda is available in torch.")
except AssertionError:
logging.warning("--> it seems that cuda is not available in torch.")
try:
assert torch.backends.cudnn.is_available()
logging.info("--> cudnn is available in torch.")
except AssertionError:
logging.warning("--> it seems that cudnn is not available in torch.")
try:
assert chainer.backends.cuda.available
logging.info("--> cuda is available in chainer.")
except AssertionError:
logging.warning("--> it seems that cuda is not available in chainer.")
try:
assert chainer.backends.cuda.cudnn_enabled
logging.info("--> cudnn is available in chainer.")
except AssertionError:
logging.warning("--> it seems that cudnn is not available in chainer.")
try:
from cupy.cuda import nccl # NOQA
logging.info("--> nccl is installed.")
except ImportError:
logging.warning("--> it seems that nccl is not installed. multi-gpu is not enabled.")
logging.warning("--> if you want to use multi-gpu, please install it and then re-setup.")
try:
assert torch.cuda.device_count() > 1
logging.info("--> multi-gpu is available (#gpus = %d)." % torch.cuda.device_count())
except AssertionError:
logging.warning("--> it seems that only single gpu is available.")
logging.warning('--> maybe your machine has only one gpu.')
logging.info("cuda availableness check done.")
|
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--no-cupy', action='store_true', default=False,
help='Disable CUPY tests')
parser.add_argument('--torch-version', default='0.4.1', type=str,
help='Disable CUPY tests')
args = parser.parse_args(args)
# you should add the libraries which are not included in setup.py
MANUALLY_INSTALLED_LIBRARIES = [
('espnet', None),
('kaldiio', None),
('matplotlib', None),
('torch', ("0.4.1",
"1.0.0",
"1.0.1",
"1.0.1.post2",
"1.1.0",
"1.2.0",
"1.3.0",
"1.3.1",
"1.4.0")),
('chainer', ("6.0.0")),
('chainer_ctc', None),
('warprnnt_pytorch', ("0.1"))
]
if not args.no_cupy:
MANUALLY_INSTALLED_LIBRARIES.append(('cupy', ("6.0.0")))
if LooseVersion(args.torch_version) < LooseVersion('1.2.0'):
MANUALLY_INSTALLED_LIBRARIES.append(('warpctc_pytorch', ("0.1.1", "0.1.3")))
logging.basicConfig(
level=logging.INFO,
format="%(levelname)s: %(message)s")
logging.info("python version = " + sys.version)
library_list = MANUALLY_INSTALLED_LIBRARIES.copy()
# check library availableness
logging.info("library availableness check start.")
logging.info("# libraries to be checked = %d" % len(library_list))
is_correct_installed_list = []
for idx, (name, version) in enumerate(library_list):
try:
importlib.import_module(name)
logging.info("--> %s is installed." % name)
is_correct_installed_list.append(True)
if name == "torch":
if LooseVersion(importlib.import_module(name).__version__) > LooseVersion('1.2.0'):
try:
library_list.remove(('warpctc_pytorch', ("0.1.1", "0.1.3")))
MANUALLY_INSTALLED_LIBRARIES.remove(('warpctc_pytorch', ("0.1.1", "0.1.3")))
logging.info("Ignoring redundant check for warpctc_pytorch")
except ValueError:
pass
except ImportError:
logging.warning("--> %s is not installed." % name)
is_correct_installed_list.append(False)
logging.info("library availableness check done.")
logging.info("%d / %d libraries are correctly installed." % (
sum(is_correct_installed_list), len(library_list)))
if len(library_list) != sum(is_correct_installed_list):
logging.info("please try to setup again and then re-run this script.")
sys.exit(1)
# check library version
num_version_specified = sum([True if v is not None else False for n, v in library_list])
logging.info("library version check start.")
logging.info("# libraries to be checked = %d" % num_version_specified)
is_correct_version_list = []
for idx, (name, version) in enumerate(library_list):
if version is not None:
# Note: temp. fix for warprnnt_pytorch
# not found version with importlib
if name == "warprnnt_pytorch":
import pkg_resources
vers = pkg_resources.get_distribution(name).version
else:
vers = importlib.import_module(name).__version__
if vers != None:
is_correct = vers in version
if is_correct:
logging.info("--> %s version is matched." % name)
is_correct_version_list.append(True)
else:
logging.warning("--> %s version is not matched (%s is not in %s)." % (
name, vers, str(version)))
is_correct_version_list.append(False)
else:
logging.info("--> %s has no version info, but version is specified." % name)
logging.info("--> maybe it is better to reinstall the latest version.")
is_correct_version_list.append(False)
logging.info("library version check done.")
logging.info("%d / %d libraries are correct version." % (
sum(is_correct_version_list), num_version_specified))
if sum(is_correct_version_list) != num_version_specified:
logging.info("please try to setup again and then re-run this script.")
sys.exit(1)
# check cuda availableness
logging.info("cuda availableness check start.")
import chainer
import torch
try:
assert torch.cuda.is_available()
logging.info("--> cuda is available in torch.")
except AssertionError:
logging.warning("--> it seems that cuda is not available in torch.")
try:
assert torch.backends.cudnn.is_available()
logging.info("--> cudnn is available in torch.")
except AssertionError:
logging.warning("--> it seems that cudnn is not available in torch.")
try:
assert chainer.backends.cuda.available
logging.info("--> cuda is available in chainer.")
except AssertionError:
logging.warning("--> it seems that cuda is not available in chainer.")
try:
assert chainer.backends.cuda.cudnn_enabled
logging.info("--> cudnn is available in chainer.")
except AssertionError:
logging.warning("--> it seems that cudnn is not available in chainer.")
try:
from cupy.cuda import nccl # NOQA
logging.info("--> nccl is installed.")
except ImportError:
logging.warning("--> it seems that nccl is not installed. multi-gpu is not enabled.")
logging.warning("--> if you want to use multi-gpu, please install it and then re-setup.")
try:
assert torch.cuda.device_count() > 1
logging.info("--> multi-gpu is available (#gpus = %d)." % torch.cuda.device_count())
except AssertionError:
logging.warning("--> it seems that only single gpu is available.")
logging.warning('--> maybe your machine has only one gpu.')
logging.info("cuda availableness check done.")
|
5,634 |
def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):
"""Upsample, FIR filter, and downsample
Parameters
----------
h : array_like
1-dimensional FIR (finite-impulse response) filter coefficients.
x : array_like
Input signal array.
up : int, optional
Upsampling rate. Default is 1.
down : int, optional
Downsampling rate. Default is 1.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
mode : str, optional
The signal extension mode to use. The set
``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to
modes provided by ``numpy.pad``. ``"smooth"`` implements a smooth
extension by extending based on the slope of the last 2 points at each
end of the array. ``"antireflect"`` and ``"antisymmetric"`` are
anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode
`"line"` extends the signal based on a linear trend defined by the
first and last points along the ``axis``.
.. versionadded:: 1.4.0
cval : float, optional
The constant value to use when ``mode == "constant"``.
.. versionadded:: 1.4.0
Returns
-------
y : ndarray
The output signal array. Dimensions will be the same as `x` except
for along `axis`, which will change size according to the `h`,
`up`, and `down` parameters.
Notes
-----
The algorithm is an implementation of the block diagram shown on page 129
of the Vaidyanathan text [1]_ (Figure 4.3-8d).
.. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,
Prentice Hall, 1993.
The direct approach of upsampling by factor of P with zero insertion,
FIR filtering of length ``N``, and downsampling by factor of Q is
O(N*Q) per output sample. The polyphase implementation used here is
O(N/P).
.. versionadded:: 0.18
Examples
--------
Simple operations:
>>> from scipy.signal import upfirdn
>>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter
array([ 1., 2., 3., 2., 1.])
>>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion
array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])
>>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold
array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])
>>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation
array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])
>>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3
array([ 0., 3., 6., 9.])
>>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3
array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])
Apply a single filter to multiple signals:
>>> x = np.reshape(np.arange(8), (4, 2))
>>> x
array([[0, 1],
[2, 3],
[4, 5],
[6, 7]])
Apply along the last dimension of ``x``:
>>> h = [1, 1]
>>> upfirdn(h, x, 2)
array([[ 0., 0., 1., 1.],
[ 2., 2., 3., 3.],
[ 4., 4., 5., 5.],
[ 6., 6., 7., 7.]])
Apply along the 0th dimension of ``x``:
>>> upfirdn(h, x, 2, axis=0)
array([[ 0., 1.],
[ 0., 1.],
[ 2., 3.],
[ 2., 3.],
[ 4., 5.],
[ 4., 5.],
[ 6., 7.],
[ 6., 7.]])
"""
x = np.asarray(x)
ufd = _UpFIRDn(h, x.dtype, up, down)
# This is equivalent to (but faster than) using np.apply_along_axis
return ufd.apply_filter(x, axis, mode, cval)
|
def upfirdn(h, x, up=1, down=1, axis=-1, mode='constant', cval=0):
"""Upsample, FIR filter, and downsample
Parameters
----------
h : array_like
1-dimensional FIR (finite-impulse response) filter coefficients.
x : array_like
Input signal array.
up : int, optional
Upsampling rate. Default is 1.
down : int, optional
Downsampling rate. Default is 1.
axis : int, optional
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
mode : str, optional
The signal extension mode to use. The set
``{"constant", "symmetric", "reflect", "edge", "wrap"}`` correspond to
modes provided by :func:`numpy.pad`. ``"smooth"`` implements a smooth
extension by extending based on the slope of the last 2 points at each
end of the array. ``"antireflect"`` and ``"antisymmetric"`` are
anti-symmetric versions of ``"reflect"`` and ``"symmetric"``. The mode
`"line"` extends the signal based on a linear trend defined by the
first and last points along the ``axis``.
.. versionadded:: 1.4.0
cval : float, optional
The constant value to use when ``mode == "constant"``.
.. versionadded:: 1.4.0
Returns
-------
y : ndarray
The output signal array. Dimensions will be the same as `x` except
for along `axis`, which will change size according to the `h`,
`up`, and `down` parameters.
Notes
-----
The algorithm is an implementation of the block diagram shown on page 129
of the Vaidyanathan text [1]_ (Figure 4.3-8d).
.. [1] P. P. Vaidyanathan, Multirate Systems and Filter Banks,
Prentice Hall, 1993.
The direct approach of upsampling by factor of P with zero insertion,
FIR filtering of length ``N``, and downsampling by factor of Q is
O(N*Q) per output sample. The polyphase implementation used here is
O(N/P).
.. versionadded:: 0.18
Examples
--------
Simple operations:
>>> from scipy.signal import upfirdn
>>> upfirdn([1, 1, 1], [1, 1, 1]) # FIR filter
array([ 1., 2., 3., 2., 1.])
>>> upfirdn([1], [1, 2, 3], 3) # upsampling with zeros insertion
array([ 1., 0., 0., 2., 0., 0., 3., 0., 0.])
>>> upfirdn([1, 1, 1], [1, 2, 3], 3) # upsampling with sample-and-hold
array([ 1., 1., 1., 2., 2., 2., 3., 3., 3.])
>>> upfirdn([.5, 1, .5], [1, 1, 1], 2) # linear interpolation
array([ 0.5, 1. , 1. , 1. , 1. , 1. , 0.5, 0. ])
>>> upfirdn([1], np.arange(10), 1, 3) # decimation by 3
array([ 0., 3., 6., 9.])
>>> upfirdn([.5, 1, .5], np.arange(10), 2, 3) # linear interp, rate 2/3
array([ 0. , 1. , 2.5, 4. , 5.5, 7. , 8.5, 0. ])
Apply a single filter to multiple signals:
>>> x = np.reshape(np.arange(8), (4, 2))
>>> x
array([[0, 1],
[2, 3],
[4, 5],
[6, 7]])
Apply along the last dimension of ``x``:
>>> h = [1, 1]
>>> upfirdn(h, x, 2)
array([[ 0., 0., 1., 1.],
[ 2., 2., 3., 3.],
[ 4., 4., 5., 5.],
[ 6., 6., 7., 7.]])
Apply along the 0th dimension of ``x``:
>>> upfirdn(h, x, 2, axis=0)
array([[ 0., 1.],
[ 0., 1.],
[ 2., 3.],
[ 2., 3.],
[ 4., 5.],
[ 4., 5.],
[ 6., 7.],
[ 6., 7.]])
"""
x = np.asarray(x)
ufd = _UpFIRDn(h, x.dtype, up, down)
# This is equivalent to (but faster than) using np.apply_along_axis
return ufd.apply_filter(x, axis, mode, cval)
|
27,697 |
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None):
ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)
for valindex, parameterset in enumerate(parametersets)
]
# All ids must be unique!
unique_ids = set(ids)
if len(unique_ids) != len(ids):
# Record the number of occurances of each test id
testid_counts = Counter(ids)
# Map the test id to its next suffix.
testid_suffixes = Counter(unique_ids)
for testid in testid_suffixes.keys():
testid_suffixes[testid] -= 1 # start each suffix at 0
# Suffix non-unique ids to make them unique:
for index, testid in enumerate(ids):
if testid_counts[testid] > 1:
ids[index] = f"{testid}{testid_suffixes[testid]}"
testid_suffixes[testid] += 1
return ids
|
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None, item=None):
ids = [
_idvalset(valindex, parameterset, argnames, idfn, ids, config=config, item=item)
for valindex, parameterset in enumerate(parametersets)
]
# All ids must be unique!
unique_ids = set(ids)
if len(unique_ids) != len(ids):
# Record the number of occurances of each test id
testid_counts = Counter(ids)
# Map the test id to its next suffix.
testid_suffixes = Counter(unique_ids)
for testid in testid_suffixes.keys():
testid_suffixes[testid] -= 1 # start each suffix at 0
# Suffix non-unique ids to make them unique:
for index, testid in enumerate(ids):
if testid_counts[testid] > 1:
ids[index] = "{}{}".format(testid, testid_suffixes[testid])
testid_suffixes[testid] += 1
return ids
|
7,120 |
def blue_noise(shape, radius=0.01, k=30, seed=None):
"""
Function to add random noise of various types to a floating-point image.
Parameters
----------
shape : tuple
Two-dimensional domain (width x height) over which to sample noise
radius : float
Minimum distance between samples
k : int
Limit of samples to choose before rejection (typically k = 30)
seed : int
If provided, this will set the random seed before generating noise,
for valid pseudo-random comparisons.
Notes:
------
This function implements the method introduced in "Fast Poisson Disk
Sampling in Arbitrary Dimensions, Robert Bridson, Siggraph, 2007" for
generating (fast) blue noise.
See also:
---------
https://github.com/scikit-image/scikit-image/issues/2380
"""
def squared_distance(p0, p1):
return (p0[0]-p1[0])**2 + (p0[1]-p1[1])**2
def random_point_around(p, k=1):
# WARNING: This is not uniform around p but we can live with it
R = rng.uniform(radius, 2*radius, k)
T = rng.uniform(0, 2*np.pi, k)
P = np.empty((k, 2))
P[:, 0] = p[0]+R*np.sin(T)
P[:, 1] = p[1]+R*np.cos(T)
return P
def in_limits(p):
return 0 <= p[0] < width and 0 <= p[1] < height
def neighborhood(shape, index, n=2):
row, col = index
row0, row1 = max(row-n, 0), min(row+n+1, shape[0])
col0, col1 = max(col-n, 0), min(col+n+1, shape[1])
I = np.dstack(np.mgrid[row0:row1, col0:col1])
I = I.reshape(I.size//2, 2).tolist()
I.remove([row, col])
return I
def in_neighborhood(p):
i, j = int(p[0]/cellsize), int(p[1]/cellsize)
if M[i, j]:
return True
for (i, j) in N[(i, j)]:
if M[i, j] and squared_distance(p, P[i, j]) < squared_radius:
return True
return False
def add_point(p):
points.append(p)
i, j = int(p[0]/cellsize), int(p[1]/cellsize)
P[i, j], M[i, j] = p, True
# When given a seed, we use a private random generator in order to not
# disturb the default and global random generator
if seed is not None:
from numpy.random.mtrand import RandomState
rng = RandomState(seed=seed)
else:
rng = np.random
# Get width and height
width, height = shape
# Here 2 corresponds to the number of dimension
cellsize = radius/np.sqrt(2)
rows = int(np.ceil(width/cellsize))
cols = int(np.ceil(height/cellsize))
# Squared radius because we'll compare squared distance
squared_radius = radius*radius
# Positions cells
P = np.zeros((rows, cols, 2), dtype=np.float32)
M = np.zeros((rows, cols), dtype=bool)
# Cache generation for neighborhood
N = {}
for i in range(rows):
for j in range(cols):
N[(i, j)] = neighborhood(M.shape, (i, j), 2)
points = []
add_point((rng.uniform(width), rng.uniform(height)))
while len(points):
i = np.random.randint(len(points))
p = points[i]
del points[i]
Q = random_point_around(p, k)
for q in Q:
if in_limits(q) and not in_neighborhood(q):
add_point(q)
return P[M]
|
def blue_noise(shape, radius=0.01, k=30, seed=None):
"""
Function to add random noise of various types to a floating-point image.
Parameters
----------
shape : tuple
Two-dimensional domain (width x height) over which to sample noise
radius : float
Minimum distance between samples
k : int, optional
Limit of samples to choose before rejection (typically k = 30)
seed : int
If provided, this will set the random seed before generating noise,
for valid pseudo-random comparisons.
Notes:
------
This function implements the method introduced in "Fast Poisson Disk
Sampling in Arbitrary Dimensions, Robert Bridson, Siggraph, 2007" for
generating (fast) blue noise.
See also:
---------
https://github.com/scikit-image/scikit-image/issues/2380
"""
def squared_distance(p0, p1):
return (p0[0]-p1[0])**2 + (p0[1]-p1[1])**2
def random_point_around(p, k=1):
# WARNING: This is not uniform around p but we can live with it
R = rng.uniform(radius, 2*radius, k)
T = rng.uniform(0, 2*np.pi, k)
P = np.empty((k, 2))
P[:, 0] = p[0]+R*np.sin(T)
P[:, 1] = p[1]+R*np.cos(T)
return P
def in_limits(p):
return 0 <= p[0] < width and 0 <= p[1] < height
def neighborhood(shape, index, n=2):
row, col = index
row0, row1 = max(row-n, 0), min(row+n+1, shape[0])
col0, col1 = max(col-n, 0), min(col+n+1, shape[1])
I = np.dstack(np.mgrid[row0:row1, col0:col1])
I = I.reshape(I.size//2, 2).tolist()
I.remove([row, col])
return I
def in_neighborhood(p):
i, j = int(p[0]/cellsize), int(p[1]/cellsize)
if M[i, j]:
return True
for (i, j) in N[(i, j)]:
if M[i, j] and squared_distance(p, P[i, j]) < squared_radius:
return True
return False
def add_point(p):
points.append(p)
i, j = int(p[0]/cellsize), int(p[1]/cellsize)
P[i, j], M[i, j] = p, True
# When given a seed, we use a private random generator in order to not
# disturb the default and global random generator
if seed is not None:
from numpy.random.mtrand import RandomState
rng = RandomState(seed=seed)
else:
rng = np.random
# Get width and height
width, height = shape
# Here 2 corresponds to the number of dimension
cellsize = radius/np.sqrt(2)
rows = int(np.ceil(width/cellsize))
cols = int(np.ceil(height/cellsize))
# Squared radius because we'll compare squared distance
squared_radius = radius*radius
# Positions cells
P = np.zeros((rows, cols, 2), dtype=np.float32)
M = np.zeros((rows, cols), dtype=bool)
# Cache generation for neighborhood
N = {}
for i in range(rows):
for j in range(cols):
N[(i, j)] = neighborhood(M.shape, (i, j), 2)
points = []
add_point((rng.uniform(width), rng.uniform(height)))
while len(points):
i = np.random.randint(len(points))
p = points[i]
del points[i]
Q = random_point_around(p, k)
for q in Q:
if in_limits(q) and not in_neighborhood(q):
add_point(q)
return P[M]
|
32,502 |
def log_request_errors(response, **kwargs):
"""A requests hook for logging errors."""
def _truncate(text):
"""Truncate a string to a max of 1024 chars."""
if len(text) > 1024:
return f"{text[:1024]}...({len(text) - 1024} truncated)"
else:
return text
if response.status_code >= 400:
try:
response_text = _truncate(response.text)
except Exception:
response_text = "**No response text given**"
LOG(f"[{response.status_code}] {response.url}: {response_text}")
return response
|
def log_request_errors(response, **kwargs):
"""A requests hook for logging errors."""
def _truncate(text):
"""Truncate a string to a max of 1024 chars."""
if len(text) > 1024:
return f"{text[:1024]}...({len(text) - 1024} truncated)"
else:
return text
if response.status_code >= 400:
try:
response_text = _truncate(response.text)
except Exception:
response_text = "**No response text given**"
demisto.info(f"[{response.status_code}] {response.url}: {response_text}")
return response
|
50,503 |
def parse_base_file():
r = requests.get(
url=PLOS_METRICS_INITIAL_FILE_DOWNLOAD_URL,
headers={'Content-type': 'application/CSV'},
auth=bearer_token_auth(PLOS_METRICS_OSF_TOKEN)
)
csvr = csv.DictReader(ensure_str(r.content).splitlines(), fieldnames=COL_HEADERS)
headers = next(csvr)
for k, v in headers.items():
assert k == v, f'Unexpected header row: {k} and {v} do not match'
return csvr
|
def parse_base_file():
r = requests.get(
url=PLOS_METRICS_INITIAL_FILE_DOWNLOAD_URL,
headers={'Content-type': 'application/CSV'},
auth=bearer_token_auth(PLOS_METRICS_OSF_TOKEN)
)
csvr = csv.DictReader(ensure_str(r.content).splitlines())
assert csvr.fieldnames == COL_HEADERS, f'Unexpected headers: expected {COL_HEADERS}, got {csvr.fieldnames}'
return csvr
|
41,821 |
def save_best_config(input_config_file: str, output_config_file: str, study: optuna.Study) -> None:
"""Save resulting jsonnet replacing masks with best params in the experiment.
Args:
input_config_file:
Config file used in AllenNLPExecutor.
output_config_file:
Path to output a optimized jsonnet config file.
study:
A optimized study instance.
``optimized`` means it requires ``study.best_trial_id`` is not empty.
"""
best_config = _save_best_config(input_config_file, study)
with open(output_config_file, "w") as f:
json.dump(best_config, f, indent=4)
|
def save_best_config(input_config_file: str, output_config_file: str, study: optuna.Study) -> None:
"""Save resulting jsonnet replacing masks with best params in the experiment.
Args:
input_config_file:
Config file used in AllenNLPExecutor.
output_config_file:
Path to output a optimized jsonnet config file.
study:
A optimized study instance.
``optimized`` means it requires ``study.best_trial_id`` is not empty.
"""
best_config = _dump_best_config(input_config_file, study)
with open(output_config_file, "w") as f:
json.dump(best_config, f, indent=4)
|
11,876 |
def getrgb(color):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue[, alpha])``
"""
color = color.lower()
rgb = colormap.get(color, None)
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
# check for known string formats
if re.match("#[a-f0-9]{3}$", color):
return (int(color[1] * 2, 16), int(color[2] * 2, 16), int(color[3] * 2, 16))
if re.match("#[a-f0-9]{4}$", color):
return (
int(color[1] * 2, 16),
int(color[2] * 2, 16),
int(color[3] * 2, 16),
int(color[4] * 2, 16),
)
if re.match("#[a-f0-9]{6}$", color):
return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16))
if re.match("#[a-f0-9]{8}$", color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
int(color[7:9], 16),
)
m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5),
)
m = re.match(
r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color
)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5),
)
m = re.match(
r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color
)
if m:
from colorsys import hsv_to_rgb
rgb = hsv_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(2)) / 100.0,
float(m.group(3)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5),
)
m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)))
raise ValueError(f"unknown color specifier: {repr(color)}")
|
def getrgb(color):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue[, alpha])``
"""
color = color.lower()
rgb = colormap.get(color, None)
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = getrgb(rgb)
return rgb
# check for known string formats
if re.match("#[a-f0-9]{3}$", color):
return (int(color[1] * 2, 16), int(color[2] * 2, 16), int(color[3] * 2, 16))
if re.match("#[a-f0-9]{4}$", color):
return (
int(color[1] * 2, 16),
int(color[2] * 2, 16),
int(color[3] * 2, 16),
int(color[4] * 2, 16),
)
if re.match("#[a-f0-9]{6}$", color):
return (int(color[1:3], 16), int(color[3:5], 16), int(color[5:7], 16))
if re.match("#[a-f0-9]{8}$", color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
int(color[7:9], 16),
)
m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (int(m.group(1)), int(m.group(2)), int(m.group(3)))
m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5),
)
m = re.match(
r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color
)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5),
)
m = re.match(
r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$", color
)
if m:
from colorsys import hsv_to_rgb
rgb = hsv_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(2)) / 100.0,
float(m.group(3)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5),
)
m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (int(m.group(1)), int(m.group(2)), int(m.group(3)), int(m.group(4)))
raise ValueError(f"unknown color specifier: {color!r}")
|
14,090 |
def _explore(
df,
column=None,
cmap=None,
color=None,
m=None,
tiles="OpenStreetMap",
attr=None,
tooltip=True,
popup=False,
highlight=True,
categorical=False,
legend=True,
scheme=None,
k=5,
vmin=None,
vmax=None,
width="100%",
height="100%",
categories=None,
classification_kwds=None,
control_scale=True,
marker_type=None,
marker_kwds={},
style_kwds={},
highlight_kwds={},
missing_kwds={},
tooltip_kwds={},
popup_kwds={},
legend_kwds={},
**kwargs,
):
"""Interactive map based on GeoPandas and folium/leaflet.js
Generate an interactive leaflet map based on :class:`~geopandas.GeoDataFrame`
Parameters
----------
column : str, np.array, pd.Series (default None)
The name of the dataframe column, :class:`numpy.array`,
or :class:`pandas.Series` to be plotted. If :class:`numpy.array` or
:class:`pandas.Series` are used then it must have same length as dataframe.
cmap : str, matplotlib.Colormap, branca.colormap or function (default None)
The name of a colormap recognized by ``matplotlib``, a list-like of colors,
:class:`matplotlib.colors.Colormap`, a :class:`branca.colormap.ColorMap` or
function that returns a named color or hex based on the column
value, e.g.::
def my_colormap(value): # scalar value defined in 'column'
if value > 1:
return "green"
return "red"
color : str, array-like (default None)
Named color or a list-like of colors (named or hex).
m : folium.Map (default None)
Existing map instance on which to draw the plot.
tiles : str, xyzservices.TileProvider (default 'OpenStreetMap')
Map tileset to use. Can choose from this list of built-in tiles or pass
:class:`xyzservices.TileProvider`:
``["OpenStreetMap", "Stamen Terrain", “Stamen Toner", “Stamen Watercolor"
"CartoDB positron", “CartoDB dark_matter"]``
You can pass a custom tileset to Folium by passing a Leaflet-style URL
to the tiles parameter: ``http://{s}.yourtiles.com/{z}/{x}/{y}.png``.
Be sure to check their terms and conditions and to provide attribution with
the ``attr`` keyword.
attr : str (default None)
Map tile attribution; only required if passing custom tile URL.
tooltip : bool, str, int, list (default True)
Display GeoDataFrame attributes when hovering over the object.
Integer specifies first n columns to be included, ``True`` includes all
columns. ``False`` removes tooltip. Pass string or list of strings to specify a
column(s). Defaults to ``True``.
popup : bool, str, int, list (default False)
Input GeoDataFrame attributes for object displayed when clicking.
Integer specifies first n columns to be included, ``True`` includes all
columns. ``False`` removes tooltip. Pass string or list of strings to specify a
column(s). Defaults to ``False``.
highlight : bool (default True)
Enable highlight functionality when hovering over a geometry.
categorical : bool (default False)
If ``False``, ``cmap`` will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default True)
Plot a legend in choropleth plots.
Ignored if no ``column`` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires ``mapclassify`` >= 2.4.0).
A :func:`mapclassify.classify` will be used
under the hood. Supported are all schemes provided by ``mapclassify`` (e.g.
``'BoxPlot'``, ``'EqualInterval'``, ``'FisherJenks'``, ``'FisherJenksSampled'``,
``'HeadTailBreaks'``, ``'JenksCaspall'``, ``'JenksCaspallForced'``,
``'JenksCaspallSampled'``, ``'MaxP'``, ``'MaximumBreaks'``,
``'NaturalBreaks'``, ``'Quantiles'``, ``'Percentiles'``, ``'StdMean'``,
``'UserDefined'``). Arguments can be passed in ``classification_kwds``.
k : int (default 5)
Number of classes
vmin : None or float (default None)
Minimum value of ``cmap``. If ``None``, the minimum data value
in the column to be plotted is used. Cannot be higher than minimum data value.
vmax : None or float (default None)
Maximum value of ``cmap``. If ``None``, the maximum data value
in the column to be plotted is used. Cannot be lower than maximum data value.
width : pixel int or percentage string (default: '100%')
Width of the folium :class:`~folium.folium.Map`. If the argument
m is given explicitly, width is ignored.
height : pixel int or percentage string (default: '100%')
Height of the folium :class:`~folium.folium.Map`. If the argument
m is given explicitly, height is ignored.
categories : list-like
Ordered list-like object of categories to be used for categorical plot.
classification_kwds : dict (default None)
Keyword arguments to pass to mapclassify
control_scale : bool, (default True)
Whether to add a control scale on the map.
marker_type : str, folium.Circle, folium.CircleMarker, folium.Marker (default None)
Allowed string options are ('marker', 'circle', 'circle_marker')
marker_kwds: dict (default {})
Additional keywords to be passed to the selected ``marker_type``, e.g.:
radius : float
Radius of the circle, in meters (for ``'circle'``) or pixels
(for ``circle_marker``).
icon : folium.map.Icon
the :class:`folium.map.Icon` object to use to render the marker.
draggable : bool (default False)
Set to True to be able to drag the marker around the map.
style_kwds : dict (default {})
Additional style to be passed to folium ``style_function``:
stroke : bool (default True)
Whether to draw stroke along the path. Set it to ``False`` to
disable borders on polygons or circles.
color : str
Stroke color
weight : int
Stroke width in pixels
opacity : float (default 1.0)
Stroke opacity
fill : boolean (default True)
Whether to fill the path with color. Set it to ``False`` to
disable filling on polygons or circles.
fillColor : str
Fill color. Defaults to the value of the color option
fillOpacity : float (default 0.5)
Fill opacity.
Plus all supported by :func:`folium.vector_layers.path_options`.
highlight_kwds : dict (default {})
Style to be passed to folium highlight_function. Uses the same keywords
as ``style_kwds``. When empty, defaults to ``{"fillOpacity": 0.75}``.
tooltip_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.features.GeoJsonTooltip`,
e.g. ``aliases``, ``labels``, or ``sticky``.
popup_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.features.GeoJsonPopup`,
e.g. ``aliases`` or ``labels``.
legend_kwds : dict (default {})
Additional keywords to be passed to the legend.
Currently supported customisation:
caption : string
Custom caption of the legend. Defaults to the column name.
Additional accepted keywords when ``scheme`` is specified:
colorbar : bool (default True)
An option to control the style of the legend. If True, continuous
colorbar will be used. If False, categorical legend will be used for bins.
scale : bool (default True)
Scale bins along the colorbar axis according to the bin edges (True)
or use the equal length for each bin (False)
fmt : string (default "{:.2f}")
A formatting specification for the bin edges of the classes in the
legend. For example, to have no decimals: ``{"fmt": "{:.0f}"}``. Applies
if ``colorbar=False``.
labels : list-like
A list of legend labels to override the auto-generated labels.
Needs to have the same number of elements as the number of
classes (`k`). Applies if ``colorbar=False``.
interval : boolean (default False)
An option to control brackets from mapclassify legend.
If True, open/closed interval brackets are shown in the legend.
Applies if ``colorbar=False``.
max_labels : int, default 10
Maximum number of colorbar tick labels (requires branca>=0.5.0)
**kwargs : dict
Additional options to be passed on to the folium :class:`~folium.folium.Map`
or :class:`folium.features.GeoJson`.
Returns
-------
m : folium.folium.Map
folium :class:`~folium.folium.Map` instance
Examples
--------
>>> df = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
>>> df.head() # doctest: +SKIP
pop_est continent name iso_a3 \
gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
2 603253 Africa W. Sahara ESH 906.5 POLYGON (\
(-8.66559 27.65643, -8.66512 27.58948...
3 35623680 North America Canada CAN 1674000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -122.9742...
4 326625791 North America United States of America USA 18560000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -120.0000...
>>> df.explore("pop_est", cmap="Blues") # doctest: +SKIP
"""
try:
import branca as bc
import folium
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mapclassify import classify
except ImportError:
raise ImportError(
"The 'folium', 'matplotlib' and 'mapclassify' packages are required for "
"'explore()'. You can install them using "
"'conda install -c conda-forge folium matplotlib mapclassify' "
"or 'pip install folium matplotlib mapclassify'."
)
gdf = df.copy()
# convert LinearRing to LineString
rings_mask = df.geom_type == "LinearRing"
if rings_mask.any():
gdf.geometry[rings_mask] = gdf.geometry[rings_mask].apply(
lambda g: LineString(g)
)
if gdf.crs is None:
kwargs["crs"] = "Simple"
tiles = None
elif not gdf.crs.equals(4326):
gdf = gdf.to_crs(4326)
# create folium.Map object
if m is None:
# Get bounds to specify location and map extent
bounds = gdf.total_bounds
location = kwargs.pop("location", None)
if location is None:
x = mean([bounds[0], bounds[2]])
y = mean([bounds[1], bounds[3]])
location = (y, x)
if "zoom_start" in kwargs.keys():
fit = False
else:
fit = True
else:
fit = False
# get a subset of kwargs to be passed to folium.Map
map_kwds = {i: kwargs[i] for i in kwargs.keys() if i in _MAP_KWARGS}
# xyzservices.providers object
if hasattr(tiles, "build_url"):
attr = attr if attr else tiles.attribution
map_kwds["min_zoom"] = tiles.get("min_zoom", 0)
map_kwds["max_zoom"] = tiles.get("max_zoom", 18)
tiles = tiles.build_url()
m = folium.Map(
location=location,
control_scale=control_scale,
tiles=tiles,
attr=attr,
width=width,
height=height,
**map_kwds,
)
# fit bounds to get a proper zoom level
if fit:
m.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]])
for map_kwd in _MAP_KWARGS:
kwargs.pop(map_kwd, None)
nan_idx = None
if column is not None:
if pd.api.types.is_list_like(column):
if len(column) != gdf.shape[0]:
raise ValueError(
"The GeoDataframe and given column have different number of rows."
)
else:
column_name = "__plottable_column"
gdf[column_name] = column
column = column_name
elif pd.api.types.is_categorical_dtype(gdf[column]):
if categories is not None:
raise ValueError(
"Cannot specify 'categories' when column has categorical dtype"
)
categorical = True
elif gdf[column].dtype is np.dtype("O") or categories:
categorical = True
nan_idx = pd.isna(gdf[column])
if categorical:
cat = pd.Categorical(gdf[column][~nan_idx], categories=categories)
N = len(cat.categories)
cmap = cmap if cmap else "tab20"
# colormap exists in matplotlib
if cmap in plt.colormaps():
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, N)(cat.codes)
)
legend_colors = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, N)(range(N))
)
# colormap is matplotlib.Colormap
elif isinstance(cmap, colors.Colormap):
color = np.apply_along_axis(colors.to_hex, 1, cmap(cat.codes))
legend_colors = np.apply_along_axis(colors.to_hex, 1, cmap(range(N)))
# custom list of colors
elif pd.api.types.is_list_like(cmap):
if N > len(cmap):
cmap = cmap * (N // len(cmap) + 1)
color = np.take(cmap, cat.codes)
legend_colors = np.take(cmap, range(N))
else:
raise ValueError(
"'cmap' is invalid. For categorical plots, pass either valid "
"named matplotlib colormap or a list-like of colors."
)
elif callable(cmap):
# List of colors based on Branca colormaps or self-defined functions
color = list(map(lambda x: cmap(x), df[column]))
else:
vmin = gdf[column].min() if not vmin else vmin
vmax = gdf[column].max() if not vmax else vmax
if vmin > gdf[column].min():
warn(
"'vmin' cannot be higher than minimum value. "
"Setting vmin to minimum.",
UserWarning,
stacklevel=3,
)
vmin = gdf[column].min()
if vmax < gdf[column].max():
warn(
"'vmax' cannot be lower than maximum value. "
"Setting vmax to maximum.",
UserWarning,
stacklevel=3,
)
vmax = gdf[column].max()
# get bins
if scheme is not None:
if classification_kwds is None:
classification_kwds = {}
if "k" not in classification_kwds:
classification_kwds["k"] = k
binning = classify(
np.asarray(gdf[column][~nan_idx]), scheme, **classification_kwds
)
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, k)(binning.yb)
)
else:
bins = np.linspace(vmin, vmax, 257)[1:]
binning = classify(
np.asarray(gdf[column][~nan_idx]), "UserDefined", bins=bins
)
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, 256)(binning.yb)
)
# we cannot color default 'marker'
if marker_type is None:
marker_type = "circle"
# set default style
if "fillOpacity" not in style_kwds:
style_kwds["fillOpacity"] = 0.5
if "weight" not in style_kwds:
style_kwds["weight"] = 2
# specify color
if color is not None:
if (
isinstance(color, str)
and isinstance(gdf, geopandas.GeoDataFrame)
and color in gdf.columns
): # use existing column
def _style_color(x):
return {
"fillColor": x["properties"][color],
**style_kwds,
}
style_function = _style_color
else: # assign new column
if isinstance(gdf, geopandas.GeoSeries):
gdf = geopandas.GeoDataFrame(geometry=gdf)
if nan_idx is not None and nan_idx.any():
nan_color = missing_kwds.pop("color", None)
gdf["__folium_color"] = nan_color
gdf.loc[~nan_idx, "__folium_color"] = color
else:
gdf["__folium_color"] = color
stroke_color = style_kwds.pop("color", None)
if not stroke_color:
def _style_column(x):
return {
"fillColor": x["properties"]["__folium_color"],
"color": x["properties"]["__folium_color"],
**style_kwds,
}
style_function = _style_column
else:
def _style_stroke(x):
return {
"fillColor": x["properties"]["__folium_color"],
"color": stroke_color,
**style_kwds,
}
style_function = _style_stroke
else: # use folium default
def _style_default(x):
return {**style_kwds}
style_function = _style_default
if highlight:
if "fillOpacity" not in highlight_kwds:
highlight_kwds["fillOpacity"] = 0.75
def _style_highlight(x):
return {**highlight_kwds}
highlight_function = _style_highlight
else:
highlight_function = None
marker = marker_type
if marker_type is not None and isinstance(marker_type, str):
if marker_type == "marker":
marker = folium.Marker(**marker_kwds)
elif marker_type == "circle":
marker = folium.Circle(**marker_kwds)
elif marker_type == "circle_marker":
marker = folium.CircleMarker(**marker_kwds)
else:
raise ValueError(
"Only 'marker', 'circle', and 'circle_marker' are "
"supported as marker values"
)
# remove additional geometries
if isinstance(gdf, geopandas.GeoDataFrame):
non_active_geoms = [
name
for name, val in (gdf.dtypes == "geometry").items()
if val and name != gdf.geometry.name
]
gdf = gdf.drop(columns=non_active_geoms)
# preprare tooltip and popup
if isinstance(gdf, geopandas.GeoDataFrame):
# add named index to the tooltip
if gdf.index.name is not None:
gdf = gdf.reset_index()
# specify fields to show in the tooltip
tooltip = _tooltip_popup("tooltip", tooltip, gdf, **tooltip_kwds)
popup = _tooltip_popup("popup", popup, gdf, **popup_kwds)
else:
tooltip = None
popup = None
# add dataframe to map
folium.GeoJson(
gdf.__geo_interface__,
tooltip=tooltip,
popup=popup,
marker=marker,
style_function=style_function,
highlight_function=highlight_function,
**kwargs,
).add_to(m)
if legend:
# NOTE: overlaps will be resolved in branca #88
caption = column if not column == "__plottable_column" else ""
caption = legend_kwds.pop("caption", caption)
if categorical:
categories = cat.categories.to_list()
legend_colors = legend_colors.tolist()
if nan_idx.any() and nan_color:
categories.append(missing_kwds.pop("label", "NaN"))
legend_colors.append(nan_color)
_categorical_legend(m, caption, categories, legend_colors)
elif column is not None:
cbar = legend_kwds.pop("colorbar", True)
colormap_kwds = {}
if "max_labels" in legend_kwds:
colormap_kwds["max_labels"] = legend_kwds.pop("max_labels")
if scheme:
cb_colors = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, binning.k)(range(binning.k))
)
if cbar:
if legend_kwds.pop("scale", True):
index = [vmin] + binning.bins.tolist()
else:
index = None
colorbar = bc.colormap.StepColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
index=index,
**colormap_kwds,
)
else:
fmt = legend_kwds.pop("fmt", "{:.2f}")
if "labels" in legend_kwds:
categories = legend_kwds["labels"]
else:
categories = binning.get_legend_classes(fmt)
show_interval = legend_kwds.pop("interval", False)
if not show_interval:
categories = [c[1:-1] for c in categories]
if nan_idx.any() and nan_color:
categories.append(missing_kwds.pop("label", "NaN"))
cb_colors = np.append(cb_colors, nan_color)
_categorical_legend(m, caption, categories, cb_colors)
else:
if isinstance(cmap, bc.colormap.ColorMap):
colorbar = cmap
else:
mp_cmap = cm.get_cmap(cmap)
cb_colors = np.apply_along_axis(
colors.to_hex, 1, mp_cmap(range(mp_cmap.N))
)
# linear legend
if mp_cmap.N > 20:
colorbar = bc.colormap.LinearColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
**colormap_kwds,
)
# steps
else:
colorbar = bc.colormap.StepColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
**colormap_kwds,
)
if cbar:
if nan_idx.any() and nan_color:
_categorical_legend(
m, "", [missing_kwds.pop("label", "NaN")], [nan_color]
)
m.add_child(colorbar)
return m
|
def _explore(
df,
column=None,
cmap=None,
color=None,
m=None,
tiles="OpenStreetMap",
attr=None,
tooltip=True,
popup=False,
highlight=True,
categorical=False,
legend=True,
scheme=None,
k=5,
vmin=None,
vmax=None,
width="100%",
height="100%",
categories=None,
classification_kwds=None,
control_scale=True,
marker_type=None,
marker_kwds={},
style_kwds={},
highlight_kwds={},
missing_kwds={},
tooltip_kwds={},
popup_kwds={},
legend_kwds={},
**kwargs,
):
"""Interactive map based on GeoPandas and folium/leaflet.js
Generate an interactive leaflet map based on :class:`~geopandas.GeoDataFrame`
Parameters
----------
column : str, np.array, pd.Series (default None)
The name of the dataframe column, :class:`numpy.array`,
or :class:`pandas.Series` to be plotted. If :class:`numpy.array` or
:class:`pandas.Series` are used then it must have same length as dataframe.
cmap : str, matplotlib.Colormap, branca.colormap or function (default None)
The name of a colormap recognized by ``matplotlib``, a list-like of colors,
:class:`matplotlib.colors.Colormap`, a :class:`branca.colormap.ColorMap` or
function that returns a named color or hex based on the column
value, e.g.::
def my_colormap(value): # scalar value defined in 'column'
if value > 1:
return "green"
return "red"
color : str, array-like (default None)
Named color or a list-like of colors (named or hex).
m : folium.Map (default None)
Existing map instance on which to draw the plot.
tiles : str, xyzservices.TileProvider (default 'OpenStreetMap')
Map tileset to use. Can choose from this list of built-in tiles or pass
:class:`xyzservices.TileProvider`:
``["OpenStreetMap", "Stamen Terrain", “Stamen Toner", “Stamen Watercolor"
"CartoDB positron", “CartoDB dark_matter"]``
You can pass a custom tileset to Folium by passing a Leaflet-style URL
to the tiles parameter: ``http://{s}.yourtiles.com/{z}/{x}/{y}.png``.
Be sure to check their terms and conditions and to provide attribution with
the ``attr`` keyword.
attr : str (default None)
Map tile attribution; only required if passing custom tile URL.
tooltip : bool, str, int, list (default True)
Display GeoDataFrame attributes when hovering over the object.
Integer specifies first n columns to be included, ``True`` includes all
columns. ``False`` removes tooltip. Pass string or list of strings to specify a
column(s). Defaults to ``True``.
popup : bool, str, int, list (default False)
Input GeoDataFrame attributes for object displayed when clicking.
Integer specifies first n columns to be included, ``True`` includes all
columns. ``False`` removes tooltip. Pass string or list of strings to specify a
column(s). Defaults to ``False``.
highlight : bool (default True)
Enable highlight functionality when hovering over a geometry.
categorical : bool (default False)
If ``False``, ``cmap`` will reflect numerical values of the
column being plotted. For non-numerical columns, this
will be set to True.
legend : bool (default True)
Plot a legend in choropleth plots.
Ignored if no ``column`` is given.
scheme : str (default None)
Name of a choropleth classification scheme (requires ``mapclassify`` >= 2.4.0).
A :func:`mapclassify.classify` will be used
under the hood. Supported are all schemes provided by ``mapclassify`` (e.g.
``'BoxPlot'``, ``'EqualInterval'``, ``'FisherJenks'``, ``'FisherJenksSampled'``,
``'HeadTailBreaks'``, ``'JenksCaspall'``, ``'JenksCaspallForced'``,
``'JenksCaspallSampled'``, ``'MaxP'``, ``'MaximumBreaks'``,
``'NaturalBreaks'``, ``'Quantiles'``, ``'Percentiles'``, ``'StdMean'``,
``'UserDefined'``). Arguments can be passed in ``classification_kwds``.
k : int (default 5)
Number of classes
vmin : None or float (default None)
Minimum value of ``cmap``. If ``None``, the minimum data value
in the column to be plotted is used. Cannot be higher than minimum data value.
vmax : None or float (default None)
Maximum value of ``cmap``. If ``None``, the maximum data value
in the column to be plotted is used. Cannot be lower than maximum data value.
width : pixel int or percentage string (default: '100%')
Width of the folium :class:`~folium.folium.Map`. If the argument
m is given explicitly, width is ignored.
height : pixel int or percentage string (default: '100%')
Height of the folium :class:`~folium.folium.Map`. If the argument
m is given explicitly, height is ignored.
categories : list-like
Ordered list-like object of categories to be used for categorical plot.
classification_kwds : dict (default None)
Keyword arguments to pass to mapclassify
control_scale : bool, (default True)
Whether to add a control scale on the map.
marker_type : str, folium.Circle, folium.CircleMarker, folium.Marker (default None)
Allowed string options are ('marker', 'circle', 'circle_marker')
marker_kwds: dict (default {})
Additional keywords to be passed to the selected ``marker_type``, e.g.:
radius : float
Radius of the circle, in meters (for ``'circle'``) or pixels
(for ``circle_marker``).
icon : folium.map.Icon
the :class:`folium.map.Icon` object to use to render the marker.
draggable : bool (default False)
Set to True to be able to drag the marker around the map.
style_kwds : dict (default {})
Additional style to be passed to folium ``style_function``:
stroke : bool (default True)
Whether to draw stroke along the path. Set it to ``False`` to
disable borders on polygons or circles.
color : str
Stroke color
weight : int
Stroke width in pixels
opacity : float (default 1.0)
Stroke opacity
fill : boolean (default True)
Whether to fill the path with color. Set it to ``False`` to
disable filling on polygons or circles.
fillColor : str
Fill color. Defaults to the value of the color option
fillOpacity : float (default 0.5)
Fill opacity.
Plus all supported by :func:`folium.vector_layers.path_options`.
highlight_kwds : dict (default {})
Style to be passed to folium highlight_function. Uses the same keywords
as ``style_kwds``. When empty, defaults to ``{"fillOpacity": 0.75}``.
tooltip_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.features.GeoJsonTooltip`,
e.g. ``aliases``, ``labels``, or ``sticky``.
popup_kwds : dict (default {})
Additional keywords to be passed to :class:`folium.features.GeoJsonPopup`,
e.g. ``aliases`` or ``labels``.
legend_kwds : dict (default {})
Additional keywords to be passed to the legend.
Currently supported customisation:
caption : string
Custom caption of the legend. Defaults to the column name.
Additional accepted keywords when ``scheme`` is specified:
colorbar : bool (default True)
An option to control the style of the legend. If True, continuous
colorbar will be used. If False, categorical legend will be used for bins.
scale : bool (default True)
Scale bins along the colorbar axis according to the bin edges (True)
or use the equal length for each bin (False)
fmt : string (default "{:.2f}")
A formatting specification for the bin edges of the classes in the
legend. For example, to have no decimals: ``{"fmt": "{:.0f}"}``. Applies
if ``colorbar=False``.
labels : list-like
A list of legend labels to override the auto-generated labels.
Needs to have the same number of elements as the number of
classes (`k`). Applies if ``colorbar=False``.
interval : boolean (default False)
An option to control brackets from mapclassify legend.
If True, open/closed interval brackets are shown in the legend.
Applies if ``colorbar=False``.
max_labels : int, default 10
Maximum number of colorbar tick labels (requires branca>=0.5.0)
**kwargs : dict
Additional options to be passed on to the folium :class:`~folium.folium.Map`
or :class:`folium.features.GeoJson`.
Returns
-------
m : folium.folium.Map
folium :class:`~folium.folium.Map` instance
Examples
--------
>>> df = geopandas.read_file(geopandas.datasets.get_path("naturalearth_lowres"))
>>> df.head(2) # doctest: +SKIP
pop_est continent name iso_a3 \
gdp_md_est geometry
0 920938 Oceania Fiji FJI 8374.0 MULTIPOLY\
GON (((180.00000 -16.06713, 180.00000...
1 53950935 Africa Tanzania TZA 150600.0 POLYGON (\
(33.90371 -0.95000, 34.07262 -1.05982...
2 603253 Africa W. Sahara ESH 906.5 POLYGON (\
(-8.66559 27.65643, -8.66512 27.58948...
3 35623680 North America Canada CAN 1674000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -122.9742...
4 326625791 North America United States of America USA 18560000.0 MULTIPOLY\
GON (((-122.84000 49.00000, -120.0000...
>>> df.explore("pop_est", cmap="Blues") # doctest: +SKIP
"""
try:
import branca as bc
import folium
import matplotlib.cm as cm
import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mapclassify import classify
except ImportError:
raise ImportError(
"The 'folium', 'matplotlib' and 'mapclassify' packages are required for "
"'explore()'. You can install them using "
"'conda install -c conda-forge folium matplotlib mapclassify' "
"or 'pip install folium matplotlib mapclassify'."
)
gdf = df.copy()
# convert LinearRing to LineString
rings_mask = df.geom_type == "LinearRing"
if rings_mask.any():
gdf.geometry[rings_mask] = gdf.geometry[rings_mask].apply(
lambda g: LineString(g)
)
if gdf.crs is None:
kwargs["crs"] = "Simple"
tiles = None
elif not gdf.crs.equals(4326):
gdf = gdf.to_crs(4326)
# create folium.Map object
if m is None:
# Get bounds to specify location and map extent
bounds = gdf.total_bounds
location = kwargs.pop("location", None)
if location is None:
x = mean([bounds[0], bounds[2]])
y = mean([bounds[1], bounds[3]])
location = (y, x)
if "zoom_start" in kwargs.keys():
fit = False
else:
fit = True
else:
fit = False
# get a subset of kwargs to be passed to folium.Map
map_kwds = {i: kwargs[i] for i in kwargs.keys() if i in _MAP_KWARGS}
# xyzservices.providers object
if hasattr(tiles, "build_url"):
attr = attr if attr else tiles.attribution
map_kwds["min_zoom"] = tiles.get("min_zoom", 0)
map_kwds["max_zoom"] = tiles.get("max_zoom", 18)
tiles = tiles.build_url()
m = folium.Map(
location=location,
control_scale=control_scale,
tiles=tiles,
attr=attr,
width=width,
height=height,
**map_kwds,
)
# fit bounds to get a proper zoom level
if fit:
m.fit_bounds([[bounds[1], bounds[0]], [bounds[3], bounds[2]]])
for map_kwd in _MAP_KWARGS:
kwargs.pop(map_kwd, None)
nan_idx = None
if column is not None:
if pd.api.types.is_list_like(column):
if len(column) != gdf.shape[0]:
raise ValueError(
"The GeoDataframe and given column have different number of rows."
)
else:
column_name = "__plottable_column"
gdf[column_name] = column
column = column_name
elif pd.api.types.is_categorical_dtype(gdf[column]):
if categories is not None:
raise ValueError(
"Cannot specify 'categories' when column has categorical dtype"
)
categorical = True
elif gdf[column].dtype is np.dtype("O") or categories:
categorical = True
nan_idx = pd.isna(gdf[column])
if categorical:
cat = pd.Categorical(gdf[column][~nan_idx], categories=categories)
N = len(cat.categories)
cmap = cmap if cmap else "tab20"
# colormap exists in matplotlib
if cmap in plt.colormaps():
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, N)(cat.codes)
)
legend_colors = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, N)(range(N))
)
# colormap is matplotlib.Colormap
elif isinstance(cmap, colors.Colormap):
color = np.apply_along_axis(colors.to_hex, 1, cmap(cat.codes))
legend_colors = np.apply_along_axis(colors.to_hex, 1, cmap(range(N)))
# custom list of colors
elif pd.api.types.is_list_like(cmap):
if N > len(cmap):
cmap = cmap * (N // len(cmap) + 1)
color = np.take(cmap, cat.codes)
legend_colors = np.take(cmap, range(N))
else:
raise ValueError(
"'cmap' is invalid. For categorical plots, pass either valid "
"named matplotlib colormap or a list-like of colors."
)
elif callable(cmap):
# List of colors based on Branca colormaps or self-defined functions
color = list(map(lambda x: cmap(x), df[column]))
else:
vmin = gdf[column].min() if not vmin else vmin
vmax = gdf[column].max() if not vmax else vmax
if vmin > gdf[column].min():
warn(
"'vmin' cannot be higher than minimum value. "
"Setting vmin to minimum.",
UserWarning,
stacklevel=3,
)
vmin = gdf[column].min()
if vmax < gdf[column].max():
warn(
"'vmax' cannot be lower than maximum value. "
"Setting vmax to maximum.",
UserWarning,
stacklevel=3,
)
vmax = gdf[column].max()
# get bins
if scheme is not None:
if classification_kwds is None:
classification_kwds = {}
if "k" not in classification_kwds:
classification_kwds["k"] = k
binning = classify(
np.asarray(gdf[column][~nan_idx]), scheme, **classification_kwds
)
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, k)(binning.yb)
)
else:
bins = np.linspace(vmin, vmax, 257)[1:]
binning = classify(
np.asarray(gdf[column][~nan_idx]), "UserDefined", bins=bins
)
color = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, 256)(binning.yb)
)
# we cannot color default 'marker'
if marker_type is None:
marker_type = "circle"
# set default style
if "fillOpacity" not in style_kwds:
style_kwds["fillOpacity"] = 0.5
if "weight" not in style_kwds:
style_kwds["weight"] = 2
# specify color
if color is not None:
if (
isinstance(color, str)
and isinstance(gdf, geopandas.GeoDataFrame)
and color in gdf.columns
): # use existing column
def _style_color(x):
return {
"fillColor": x["properties"][color],
**style_kwds,
}
style_function = _style_color
else: # assign new column
if isinstance(gdf, geopandas.GeoSeries):
gdf = geopandas.GeoDataFrame(geometry=gdf)
if nan_idx is not None and nan_idx.any():
nan_color = missing_kwds.pop("color", None)
gdf["__folium_color"] = nan_color
gdf.loc[~nan_idx, "__folium_color"] = color
else:
gdf["__folium_color"] = color
stroke_color = style_kwds.pop("color", None)
if not stroke_color:
def _style_column(x):
return {
"fillColor": x["properties"]["__folium_color"],
"color": x["properties"]["__folium_color"],
**style_kwds,
}
style_function = _style_column
else:
def _style_stroke(x):
return {
"fillColor": x["properties"]["__folium_color"],
"color": stroke_color,
**style_kwds,
}
style_function = _style_stroke
else: # use folium default
def _style_default(x):
return {**style_kwds}
style_function = _style_default
if highlight:
if "fillOpacity" not in highlight_kwds:
highlight_kwds["fillOpacity"] = 0.75
def _style_highlight(x):
return {**highlight_kwds}
highlight_function = _style_highlight
else:
highlight_function = None
marker = marker_type
if marker_type is not None and isinstance(marker_type, str):
if marker_type == "marker":
marker = folium.Marker(**marker_kwds)
elif marker_type == "circle":
marker = folium.Circle(**marker_kwds)
elif marker_type == "circle_marker":
marker = folium.CircleMarker(**marker_kwds)
else:
raise ValueError(
"Only 'marker', 'circle', and 'circle_marker' are "
"supported as marker values"
)
# remove additional geometries
if isinstance(gdf, geopandas.GeoDataFrame):
non_active_geoms = [
name
for name, val in (gdf.dtypes == "geometry").items()
if val and name != gdf.geometry.name
]
gdf = gdf.drop(columns=non_active_geoms)
# preprare tooltip and popup
if isinstance(gdf, geopandas.GeoDataFrame):
# add named index to the tooltip
if gdf.index.name is not None:
gdf = gdf.reset_index()
# specify fields to show in the tooltip
tooltip = _tooltip_popup("tooltip", tooltip, gdf, **tooltip_kwds)
popup = _tooltip_popup("popup", popup, gdf, **popup_kwds)
else:
tooltip = None
popup = None
# add dataframe to map
folium.GeoJson(
gdf.__geo_interface__,
tooltip=tooltip,
popup=popup,
marker=marker,
style_function=style_function,
highlight_function=highlight_function,
**kwargs,
).add_to(m)
if legend:
# NOTE: overlaps will be resolved in branca #88
caption = column if not column == "__plottable_column" else ""
caption = legend_kwds.pop("caption", caption)
if categorical:
categories = cat.categories.to_list()
legend_colors = legend_colors.tolist()
if nan_idx.any() and nan_color:
categories.append(missing_kwds.pop("label", "NaN"))
legend_colors.append(nan_color)
_categorical_legend(m, caption, categories, legend_colors)
elif column is not None:
cbar = legend_kwds.pop("colorbar", True)
colormap_kwds = {}
if "max_labels" in legend_kwds:
colormap_kwds["max_labels"] = legend_kwds.pop("max_labels")
if scheme:
cb_colors = np.apply_along_axis(
colors.to_hex, 1, cm.get_cmap(cmap, binning.k)(range(binning.k))
)
if cbar:
if legend_kwds.pop("scale", True):
index = [vmin] + binning.bins.tolist()
else:
index = None
colorbar = bc.colormap.StepColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
index=index,
**colormap_kwds,
)
else:
fmt = legend_kwds.pop("fmt", "{:.2f}")
if "labels" in legend_kwds:
categories = legend_kwds["labels"]
else:
categories = binning.get_legend_classes(fmt)
show_interval = legend_kwds.pop("interval", False)
if not show_interval:
categories = [c[1:-1] for c in categories]
if nan_idx.any() and nan_color:
categories.append(missing_kwds.pop("label", "NaN"))
cb_colors = np.append(cb_colors, nan_color)
_categorical_legend(m, caption, categories, cb_colors)
else:
if isinstance(cmap, bc.colormap.ColorMap):
colorbar = cmap
else:
mp_cmap = cm.get_cmap(cmap)
cb_colors = np.apply_along_axis(
colors.to_hex, 1, mp_cmap(range(mp_cmap.N))
)
# linear legend
if mp_cmap.N > 20:
colorbar = bc.colormap.LinearColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
**colormap_kwds,
)
# steps
else:
colorbar = bc.colormap.StepColormap(
cb_colors,
vmin=vmin,
vmax=vmax,
caption=caption,
**colormap_kwds,
)
if cbar:
if nan_idx.any() and nan_color:
_categorical_legend(
m, "", [missing_kwds.pop("label", "NaN")], [nan_color]
)
m.add_child(colorbar)
return m
|
8,410 |
def identify_jwst_s2d_fits(origin, *args, **kwargs):
"""
Check whether the given file is a JWST s2d spectral data product.
"""
is_jwst = _identify_jwst_fits(args[0])
with fits.open(args[0], memmap=False) as hdulist:
if is_jwst and "SCI" in hdulist:
return True
else:
return False
|
def identify_jwst_s2d_fits(origin, *args, **kwargs):
"""
Check whether the given file is a JWST s2d spectral data product.
"""
is_jwst = _identify_jwst_fits(args[0])
with fits.open(args[0], memmap=False) as hdulist:
return is_jwst and "SCI" in hdulist
|
25,773 |
def _import_from_importer(network, importer, basename, skip_time=False):
"""
Import network data from importer.
Parameters
----------
skip_time : bool
Skip importing time
"""
attrs = importer.get_attributes()
current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")]
pypsa_version = None
if attrs is not None:
network.name = attrs.pop('name')
try:
pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")]
except KeyError:
pypsa_version = None
for attr, val in attrs.items():
setattr(network, attr, val)
##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types
if pypsa_version is None or pypsa_version < current_pypsa_version:
logger.warning(dedent("""
Importing PyPSA from older version of PyPSA than current version {}.
Please read the release notes at https://pypsa.org/doc/release_notes.html
carefully to prepare your network for import.
""").format(network.pypsa_version))
importer.pypsa_version = pypsa_version
importer.current_pypsa_version = current_pypsa_version
# if there is snapshots.csv, read in snapshot data
df = importer.get_snapshots()
# read in investment period weightings
investment_periods = importer.get_investment_periods()
if df is not None:
# check if imported snapshots have MultiIndex
snapshot_levels = set(["period", "snapshot"]).intersection(df.columns)
if snapshot_levels:
df.set_index(sorted(snapshot_levels), inplace=True)
network.set_snapshots(df.index)
cols = ['objective', 'generators', 'stores']
if not df.columns.intersection(cols).empty:
network.snapshot_weightings = df.reindex(index=network.snapshots,
columns=cols)
elif "weightings" in df.columns:
network.snapshot_weightings = df["weightings"].reindex(network.snapshots)
network.set_snapshots(df.index)
if investment_periods is not None:
network._investment_period_weightings = (
investment_periods.reindex(network.investment_period_weightings.index))
imported_components = []
# now read in other components; make sure buses and carriers come first
for component in ["Bus", "Carrier"] + sorted(network.all_components - {"Bus", "Carrier", "SubNetwork"}):
list_name = network.components[component]["list_name"]
df = importer.get_static(list_name)
if df is None:
if component == "Bus":
logger.error("Error, no buses found")
return
else:
continue
import_components_from_dataframe(network, df, component)
if not skip_time:
for attr, df in importer.get_series(list_name):
df.set_index(network.snapshots, inplace=True)
import_series_from_dataframe(network, df, component, attr)
logger.debug(getattr(network,list_name))
imported_components.append(list_name)
logger.info("Imported network{} has {}".format(" " + basename, ", ".join(imported_components)))
|
def _import_from_importer(network, importer, basename, skip_time=False):
"""
Import network data from importer.
Parameters
----------
skip_time : bool
Skip importing time
"""
attrs = importer.get_attributes()
current_pypsa_version = [int(s) for s in network.pypsa_version.split(".")]
pypsa_version = None
if attrs is not None:
network.name = attrs.pop('name')
try:
pypsa_version = [int(s) for s in attrs.pop("pypsa_version").split(".")]
except KeyError:
pypsa_version = None
for attr, val in attrs.items():
setattr(network, attr, val)
##https://docs.python.org/3/tutorial/datastructures.html#comparing-sequences-and-other-types
if pypsa_version is None or pypsa_version < current_pypsa_version:
logger.warning(dedent("""
Importing PyPSA from older version of PyPSA than current version {}.
Please read the release notes at https://pypsa.org/doc/release_notes.html
carefully to prepare your network for import.
""").format(network.pypsa_version))
importer.pypsa_version = pypsa_version
importer.current_pypsa_version = current_pypsa_version
# if there is snapshots.csv, read in snapshot data
df = importer.get_snapshots()
# read in investment period weightings
investment_periods = importer.get_investment_periods()
if df is not None:
# check if imported snapshots have MultiIndex
snapshot_levels = set(["period", "snapshot"]).intersection(df.columns)
if snapshot_levels:
df.set_index(sorted(snapshot_levels), inplace=True)
network.set_snapshots(df.index)
cols = ['objective', 'generators', 'stores']
if not df.columns.intersection(cols).empty:
network.snapshot_weightings = df.reindex(index=network.snapshots,
columns=cols)
elif "weightings" in df.columns:
network.snapshot_weightings = df["weightings"].reindex(network.snapshots)
network.set_snapshots(df.index)
if investment_periods is not None:
network.investment_period_weightings = (
investment_periods.reindex(network.investment_period_weightings.index))
imported_components = []
# now read in other components; make sure buses and carriers come first
for component in ["Bus", "Carrier"] + sorted(network.all_components - {"Bus", "Carrier", "SubNetwork"}):
list_name = network.components[component]["list_name"]
df = importer.get_static(list_name)
if df is None:
if component == "Bus":
logger.error("Error, no buses found")
return
else:
continue
import_components_from_dataframe(network, df, component)
if not skip_time:
for attr, df in importer.get_series(list_name):
df.set_index(network.snapshots, inplace=True)
import_series_from_dataframe(network, df, component, attr)
logger.debug(getattr(network,list_name))
imported_components.append(list_name)
logger.info("Imported network{} has {}".format(" " + basename, ", ".join(imported_components)))
|
18,016 |
def add_pattern_exemptions(line, codes):
"""Add a flake8 exemption to a line."""
if line.startswith('#'):
return line
line = line.rstrip('\n')
# Line is already ignored
if line.endswith('# noqa'):
return line + '\n'
orig_len = len(line)
codes = set(codes)
# don't add E501 unless the line is actually too long, as it can mask
# other errors like trailing whitespace
if orig_len <= 79 and "E501" in codes:
codes.remove("E501")
if not codes:
return line + "\n"
exemptions = ','.join(sorted(codes))
# append exemption to line
if '# noqa: ' in line:
line += ',{0}'.format(exemptions)
elif line: # ignore noqa on empty lines
line += ' # noqa: {0}'.format(exemptions)
# if THIS made the line too long, add an exemption for that
if len(line) > max_line_length and orig_len <= max_line_length:
line += ',E501'
return line + '\n'
|
def add_pattern_exemptions(line, codes):
"""Add a flake8 exemption to a line."""
if line.startswith('#'):
return line
line = line.rstrip('\n')
# Line is already ignored
if line.endswith('# noqa'):
return line + '\n'
orig_len = len(line)
codes = set(codes)
# don't add E501 unless the line is actually too long, as it can mask
# other errors like trailing whitespace
if orig_len <= max_line_length and "E501" in codes:
codes.remove("E501")
if not codes:
return line + "\n"
exemptions = ','.join(sorted(codes))
# append exemption to line
if '# noqa: ' in line:
line += ',{0}'.format(exemptions)
elif line: # ignore noqa on empty lines
line += ' # noqa: {0}'.format(exemptions)
# if THIS made the line too long, add an exemption for that
if len(line) > max_line_length and orig_len <= max_line_length:
line += ',E501'
return line + '\n'
|
59,300 |
def assemble(asm):
if asm in assembly_cache:
return binascii.unhexlify(assembly_cache[asm])
return binascii.unhexlify(_ks_assemble(asm))
|
def assemble(asm: str) -> bytes:
"""
Assemble the given string.
An assembly cache is first checked, and if there is no entry there, then Keystone is used.
"""
if asm in assembly_cache:
return binascii.unhexlify(assembly_cache[asm])
return binascii.unhexlify(_ks_assemble(asm))
|
22,363 |
def make_same_length(list1, list2):
# If either list is 1 item, we'll append to it until its length is the same as the other.
if len(list1) == 1:
for _i in range(1, len(list2)):
list1.append(list1[0])
elif len(list2) == 1:
for _i in range(1, len(list1)):
list2.append(list2[0])
return list1, list2
|
def make_same_length(list1, list2):
# If either list is 1 item, we'll append to it until its length is the same as the other.
if len(list1) == 1:
for _ in range(1, len(list2)):
list1.append(list1[0])
elif len(list2) == 1:
for _i in range(1, len(list1)):
list2.append(list2[0])
return list1, list2
|
31,862 |
def convert_date_to_unix(date_str, date_format="%m/%d/%Y"):
"""
Convert a given string with MM/DD/YYYY format to millis since epoch
"""
date = datetime.strptime(date_str, date_format)
return int((date - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
|
def convert_date_to_unix(date_str, date_format="%m/%d/%Y"):
"""
Convert the given string in the given format (by default - MM/DD/YYYY) to millis since epoch
"""
date = datetime.strptime(date_str, date_format)
return int((date - datetime.utcfromtimestamp(0)).total_seconds() * 1000)
|
30,455 |
def get_self_deployed_token():
if not (AUTH_ID and SELF_TENANT_ID and APP_SECRET):
return_error('You must provide the Tenant ID, Application ID and Client Secret.')
integration_context = demisto.getIntegrationContext()
if integration_context and integration_context['token_expiration_time']:
token_expiration_time = integration_context['token_expiration_time']
now = int(time.time())
if token_expiration_time < now:
return integration_context['token']
url = 'https://login.windows.net/{}/oauth2/token'.format(SELF_TENANT_ID)
resource_app_id_uri = 'https://api.securitycenter.windows.com'
data = {
'resource': resource_app_id_uri,
'client_id': APP_ID,
'client_secret': APP_SECRET,
'grant_type': 'client_credentials'
}
response = requests.post(url, data, verify=USE_SSL)
body = response.json()
if response.status_code != 200:
return_error('Error in Microsoft authorization: {}'.format(str(body)))
demisto.setIntegrationContext({
'token_expiration_time': body['expires_on'],
'token': body['access_token']
})
return body['access_token']
|
def get_self_deployed_token():
if not (AUTH_ID and SELF_TENANT_ID and APP_SECRET):
return_error('You must provide the Tenant ID, Application ID and Client Secret.')
integration_context = demisto.getIntegrationContext()
if integration_context and integration_context['token_expiration_time']:
token_expiration_time = integration_context['token_expiration_time']
now = int(time.time())
if token_expiration_time < now:
return integration_context['token']
url = 'https://login.windows.net/{}/oauth2/token'.format(SELF_TENANT_ID)
resource_app_id_uri = 'https://api.securitycenter.windows.com'
data = {
'resource': resource_app_id_uri,
'client_id': APP_ID,
'client_secret': APP_SECRET,
'grant_type': 'client_credentials'
}
response = requests.post(url, data, verify=USE_SSL)
body = response.json()
if response.status_code != 200:
return_error('Error in Microsoft authorization: {}'.format(str(body)))
demisto.setIntegrationContext({
'token_expiration_time': body['expires_on'],
'token': body['access_token']
})
return body.get('access_token')
|
31,206 |
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
token = demisto.params().get('token')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/rest')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = demisto.params().get('fetch_time', '3 days').strip()
proxy = demisto.params().get('proxy', False)
headers = {
"Authorization": token
}
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers
)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'mantis-get-issue-by-id':
mantis_get_issue_by_id_command(client, args)
elif demisto.command() == 'mantis-get-issues':
mantis_get_all_issues_command(client, args)
elif demisto.command() == 'mantis-create-issue':
mantis_create_issue_command(client, args)
elif demisto.command() == 'mantis-add-note':
matis_create_note_command(client, args)
elif demisto.command() == 'mantis-close-issue':
mantis_close_issue_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
token = demisto.params().get('token')
# get the service API url
base_url = urljoin(demisto.params()['url'], '/api/rest')
verify_certificate = not demisto.params().get('insecure', False)
# How much time before the first fetch to retrieve incidents
first_fetch_time = demisto.params().get('fetch_time', '3 days').strip()
proxy = demisto.params().get('proxy', False)
headers = {
"Authorization": token
}
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
proxy=proxy,
headers=headers
)
args = demisto.args()
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
return_results(test_module(client))
elif demisto.command() == 'fetch-incidents':
# Set and define the fetch incidents command to run after activated via integration settings.
next_run, incidents = fetch_incidents(
client=client,
last_run=demisto.getLastRun(),
first_fetch_time=first_fetch_time)
demisto.setLastRun(next_run)
demisto.incidents(incidents)
elif demisto.command() == 'mantis-get-issue-by-id':
mantis_get_issue_by_id_command(client, args)
elif demisto.command() == 'mantis-get-issues':
mantis_get_all_issues_command(client, args)
elif demisto.command() == 'mantis-create-issue':
mantis_create_issue_command(client, args)
elif demisto.command() == 'mantis-add-note':
matis_create_note_command(client, args)
elif demisto.command() == 'mantis-close-issue':
mantis_close_issue_command(client, args)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
57,661 |
def get_endpoint_context(res=None, endpoint_id=None):
if res is None:
res = http_get('/endpoints/{}'.format(endpoint_id)).get('data', [])
endpoint_context = []
for endpoint in res:
endpoint_attributes = endpoint.get('attributes', {})
current_endpoint_context = {
'Hostname': endpoint_attributes.get('hostname'),
'ID': endpoint.get('id'),
'OS': endpoint_attributes.get('platform'),
'OSVersion': endpoint_attributes.get('operating_system'),
'IsIsolated': endpoint_attributes.get('is_isolated'),
'IsDecommissioned': endpoint_attributes.get('is_decommissioned'),
}
ip_addresses = []
mac_addresses = []
for address in endpoint_attributes.get('endpoint_network_addresses', []):
address_attributes = address.get('attributes', {})
ip_address_object = address_attributes.get('ip_address', {})
if ip_address_object:
ip_address_attributes = ip_address_object.get('attributes', {})
if ip_address_attributes:
ip_addresses.append(ip_address_attributes.get('ip_address'))
mac_address_object = address_attributes.get('mac_address', {})
if mac_address_object:
mac_address_attributes = mac_address_object.get('attributes', {})
if mac_address_attributes:
mac_addresses.append(mac_address_attributes.get('address'))
if ip_addresses:
current_endpoint_context['IPAddress'] = ip_addresses
if mac_addresses:
current_endpoint_context['MACAddress'] = mac_addresses
endpoint_context.append(current_endpoint_context)
return endpoint_context
|
def get_endpoint_context(res=None, endpoint_id=None):
if res is None:
res = http_get('/endpoints/{}'.format(endpoint_id)).get('data', [])
endpoint_context = []
for endpoint in res:
endpoint_attributes = endpoint.get('attributes', {})
current_endpoint_context = {
'Hostname': endpoint_attributes.get('hostname'),
'ID': endpoint.get('id'),
'OS': endpoint_attributes.get('platform'),
'OSVersion': endpoint_attributes.get('operating_system'),
'IsIsolated': endpoint_attributes.get('is_isolated'),
'IsDecommissioned': endpoint_attributes.get('is_decommissioned'),
}
ip_addresses = []
mac_addresses = []
for address in endpoint_attributes.get('endpoint_network_addresses', []):
address_attributes = address.get('attributes', {})
if address_attributes:
ip_address_object = address_attributes.get('ip_address', {})
if ip_address_object:
ip_address_attributes = ip_address_object.get('attributes', {})
if ip_address_attributes:
ip_addresses.append(ip_address_attributes.get('ip_address'))
mac_address_object = address_attributes.get('mac_address', {})
if mac_address_object:
mac_address_attributes = mac_address_object.get('attributes', {})
if mac_address_attributes:
mac_addresses.append(mac_address_attributes.get('address'))
if ip_addresses:
current_endpoint_context['IPAddress'] = ip_addresses
if mac_addresses:
current_endpoint_context['MACAddress'] = mac_addresses
endpoint_context.append(current_endpoint_context)
return endpoint_context
|
32,316 |
def main():
# Args is always stronger. Get last run even stronger
demisto_params = demisto.params() | demisto.args() | demisto.getLastRun()
events_to_add_per_request = demisto_params.get('events_to_add_per_request', 1000)
try:
events_to_add_per_request = int(events_to_add_per_request)
except ValueError:
events_to_add_per_request = 1000
headers = {'Authorization': f"Bearer {demisto_params['auth_credendtials']['password']}",
'Accept': 'application/vnd.github.v3+json'}
demisto_params['headers'] = headers
demisto_params['params'] = GithubParams(**demisto_params)
request = GithubEventsRequestConfig(**demisto_params)
options = IntegrationOptions.parse_obj(demisto_params)
client = GithubClient(request, options)
get_events = GithubGetEvents(client, options)
command = demisto.command()
try:
if command == 'test-module':
get_events.run()
return_results('ok')
elif command in ('github-get-events', 'fetch-events'):
events = get_events.run()
if command == 'fetch-events':
if events:
demisto.setLastRun(GithubGetEvents.get_last_run(events))
else:
send_events_to_xsiam([], 'github', demisto_params.get('product'))
elif command == 'github-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('Github Logs', events, headerTransform=pascalToSpace),
outputs_prefix='Github.Logs',
outputs_key_field='@timestamp',
outputs=events,
raw_response=events,
)
return_results(command_results)
while len(events) > 0:
send_events_to_xsiam(events[:events_to_add_per_request], 'github',
demisto_params.get('product'))
events = events[events_to_add_per_request:]
except Exception as e:
return_error(str(e))
|
def main():
# Args is always stronger. Get last run even stronger
demisto_params = demisto.params() | demisto.args() | demisto.getLastRun()
events_to_add_per_request = demisto_params.get('events_to_add_per_request', 1000)
try:
events_to_add_per_request = int(events_to_add_per_request)
except ValueError:
events_to_add_per_request = 1000
headers = {'Authorization': f"Bearer {demisto_params['auth_credendtials']['password']}",
'Accept': 'application/vnd.github.v3+json'}
demisto_params['headers'] = headers
demisto_params['params'] = GithubParams(**demisto_params)
request = GithubEventsRequestConfig(**demisto_params)
options = IntegrationOptions.parse_obj(demisto_params)
client = GithubClient(request, options)
get_events = GithubGetEvents(client, options)
command = demisto.command()
try:
if command == 'test-module':
get_events.run()
return_results('ok')
elif command in ('github-get-events', 'fetch-events'):
events = get_events.run()
if command == 'fetch-events':
if events:
demisto.setLastRun(GithubGetEvents.get_last_run(events))
else:
send_events_to_xsiam([], 'github', demisto_params.get('product'))
elif command == 'github-get-events':
command_results = CommandResults(
readable_output=tableToMarkdown('Github Logs', events, headerTransform=pascalToSpace),
outputs_prefix='Github.Logs',
outputs_key_field='@timestamp',
outputs=events,
raw_response=events,
)
return_results(command_results)
while events:
send_events_to_xsiam(events[:events_to_add_per_request], 'github',
demisto_params.get('product'))
events = events[events_to_add_per_request:]
except Exception as e:
return_error(str(e))
|
46,575 |
def invalid_cases():
rng = Random(1234)
for (name, (typ, offsets)) in PRESET_CONTAINERS.items():
# using mode_max_count, so that the extra byte cannot be picked up as normal list content
yield f'{name}_extra_byte', \
invalid_test_case(lambda: serialize(
container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff')
if len(offsets) != 0:
# Note: there are many more ways to have invalid offsets,
# these are just example to get clients started looking into hardening ssz.
for mode in [RandomizationMode.mode_random,
RandomizationMode.mode_nil_count,
RandomizationMode.mode_one_count,
RandomizationMode.mode_max_count]:
if len(offsets) != 0:
for index, offset_index in enumerate(offsets):
yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x + 1
))
yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: 0
))
if index == 0:
yield f'{name}_{mode.to_name()}_first offset_{offset_index}_minus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x - 1
))
if mode == RandomizationMode.mode_max_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:2]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_overflow', \
invalid_test_case(lambda: serialized)
if mode == RandomizationMode.mode_one_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:1]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_wrong_byte_length', \
invalid_test_case(lambda: serialized)
|
def invalid_cases():
rng = Random(1234)
for (name, (typ, offsets)) in PRESET_CONTAINERS.items():
# using mode_max_count, so that the extra byte cannot be picked up as normal list content
yield f'{name}_extra_byte', \
invalid_test_case(lambda: serialize(
container_case_fn(rng, RandomizationMode.mode_max_count, typ)) + b'\xff')
if len(offsets) != 0:
# Note: there are many more ways to have invalid offsets,
# these are just example to get clients started looking into hardening ssz.
for mode in [RandomizationMode.mode_random,
RandomizationMode.mode_nil_count,
RandomizationMode.mode_one_count,
RandomizationMode.mode_max_count]:
if len(offsets) != 0:
for index, offset_index in enumerate(offsets):
yield f'{name}_{mode.to_name()}_offset_{offset_index}_plus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x + 1
))
yield f'{name}_{mode.to_name()}_offset_{offset_index}_zeroed', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: 0
))
if index == 0:
yield f'{name}_{mode.to_name()}_first offset_{offset_index}_minus_one', \
invalid_test_case(lambda: mod_offset(
b=serialize(container_case_fn(rng, mode, typ)),
offset_index=offset_index,
change=lambda x: x - 1
))
if mode == RandomizationMode.mode_max_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[:2]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_overflow', \
invalid_test_case(lambda: serialized)
if mode == RandomizationMode.mode_one_count:
serialized = serialize(container_case_fn(rng, mode, typ))
serialized = serialized + serialized[0:1]
yield f'{name}_{mode.to_name()}_last offset_{offset_index}_wrong_byte_length', \
invalid_test_case(lambda: serialized)
|
35,991 |
def traverse_graph(starting_pks, max_iterations=None, get_links=False, **kwargs):
"""
This function will return the set of all nodes that can be connected
to a list of initial nodes through any sequence of specified authorized
links and directions.
:param starting_pks:
A list with the (valid) pks of all starting nodes.
:param traverse_links:
A dictionary asigning a boolean value to each of the graph traversal
rules.
"""
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
from aiida.orm import Node
from aiida.orm.querybuilder import QueryBuilder
from aiida.tools.graph.age_entities import Basket
from aiida.tools.graph.age_rules import UpdateRule, RuleSequence, RuleSaveWalkers, RuleSetWalkers
from aiida.common import exceptions
from aiida.common.links import GraphTraversalRules
follow_backwards = []
follow_forwards = []
# Create the dictionary with graph traversal rules to be applied
# (This uses the delete dict because it has the same names and directions
# as the export dict, which is all this function needs)
for name, rule in GraphTraversalRules.DELETE.value.items():
# Check that all rules are explicitly provided
if name not in kwargs:
raise exceptions.ValidationError('traversal rule {} was not provided'.format(name))
follow = kwargs.pop(name)
if not isinstance(follow, bool):
raise ValueError('the value of rule {} must be boolean, but it is: {}'.format(name, follow))
if follow:
if rule.direction == 'forward':
follow_forwards.append(rule.link_type.value)
elif rule.direction == 'backward':
follow_backwards.append(rule.link_type.value)
else:
raise exceptions.InternalError(
'unrecognized direction `{}` for graph traversal rule'.format(rule.direction)
)
if kwargs:
raise exceptions.ValidationError('unrecognized keywords: {}'.format(', '.join(kwargs.keys())))
links_backwards = {'type': {'in': follow_backwards}}
links_forwards = {'type': {'in': follow_forwards}}
operational_set = set(starting_pks)
if not operational_set:
if get_links:
output = {'nodes': set(), 'links': set()}
else:
output = {'nodes': set(), 'links': None}
return output
query_nodes = QueryBuilder()
query_nodes.append(Node, project=['id'], filters={'id': {'in': operational_set}})
existing_pks = {pk[0] for pk in query_nodes.all()}
missing_pks = operational_set.difference(existing_pks)
if missing_pks:
raise exceptions.NotExistent(
'The following pks are not in the database and must be pruned before this call: {}'.format(missing_pks)
)
basket = Basket(nodes=operational_set)
rules = []
if follow_forwards and follow_backwards:
stash = basket.get_template()
rules += [RuleSaveWalkers(stash)]
if follow_forwards:
query_outgoing = QueryBuilder()
query_outgoing.append(Node, tag='sources')
query_outgoing.append(Node, edge_filters=links_forwards, with_incoming='sources')
rule_outgoing = UpdateRule(query_outgoing, max_iterations=1, track_edges=get_links)
rules += [rule_outgoing]
if follow_forwards and follow_backwards:
rules += [RuleSetWalkers(stash)]
if follow_backwards:
query_incomong = QueryBuilder()
query_incomong.append(Node, tag='sources')
query_incomong.append(Node, edge_filters=links_backwards, with_outgoing='sources')
rule_incoming = UpdateRule(query_incomong, max_iterations=1, track_edges=get_links)
rules += [rule_incoming]
if max_iterations is None:
max_iterations = inf
else:
if isinstance(max_iterations, int):
pass
elif max_iterations is inf:
pass
else:
raise TypeError('Max_iterations has to be an integer or infinity')
rulesequence = RuleSequence(rules, max_iterations=max_iterations)
results = rulesequence.run(basket)
output = {}
output['nodes'] = results.nodes.get_keys()
if get_links:
output['links'] = results['nodes_nodes'].get_keys()
else:
output['links'] = None
return output
|
def traverse_graph(starting_pks, max_iterations=None, get_links=False, **kwargs):
"""
This function will return the set of all nodes that can be connected
to a list of initial nodes through any sequence of specified authorized
links and directions.
:param starting_pks:
A list with the (valid) pks of all starting nodes.
:param traverse_links:
A dictionary asigning a boolean value to each of the graph traversal
rules.
"""
# pylint: disable=too-many-locals,too-many-statements,too-many-branches
from aiida.orm import Node
from aiida.orm.querybuilder import QueryBuilder
from aiida.tools.graph.age_entities import Basket
from aiida.tools.graph.age_rules import UpdateRule, RuleSequence, RuleSaveWalkers, RuleSetWalkers
from aiida.common import exceptions
from aiida.common.links import GraphTraversalRules
follow_backwards = []
follow_forwards = []
# Create the dictionary with graph traversal rules to be applied
# (This uses the delete dict because it has the same names and directions
# as the export dict, which is all this function needs)
for name, rule in GraphTraversalRules.DELETE.value.items():
# Check that all rules are explicitly provided
if name not in kwargs:
raise exceptions.ValidationError('traversal rule {} was not provided'.format(name))
follow = kwargs.pop(name)
if not isinstance(follow, bool):
raise ValueError('the value of rule {} must be boolean, but it is: {}'.format(name, follow))
if follow:
if rule.direction == 'forward':
follow_forwards.append(rule.link_type.value)
elif rule.direction == 'backward':
follow_backwards.append(rule.link_type.value)
else:
raise exceptions.InternalError(
'unrecognized direction `{}` for graph traversal rule'.format(rule.direction)
)
if kwargs:
raise exceptions.ValidationError('unrecognized keywords: {}'.format(', '.join(kwargs.keys())))
links_backwards = {'type': {'in': follow_backwards}}
links_forwards = {'type': {'in': follow_forwards}}
operational_set = set(starting_pks)
if not operational_set:
if get_links:
output = {'nodes': set(), 'links': set()}
else:
output = {'nodes': set(), 'links': None}
return output
query_nodes = QueryBuilder()
query_nodes.append(Node, project=['id'], filters={'id': {'in': operational_set}})
existing_pks = {pk[0] for pk in query_nodes.all()}
missing_pks = operational_set.difference(existing_pks)
if missing_pks:
raise exceptions.NotExistent(
'The following pks are not in the database and must be pruned before this call: {}'.format(missing_pks)
)
basket = Basket(nodes=operational_set)
rules = []
if follow_forwards and follow_backwards:
stash = basket.get_template()
rules += [RuleSaveWalkers(stash)]
if follow_forwards:
query_outgoing = QueryBuilder()
query_outgoing.append(Node, tag='sources')
query_outgoing.append(Node, edge_filters=links_forwards, with_incoming='sources')
rule_outgoing = UpdateRule(query_outgoing, max_iterations=1, track_edges=get_links)
rules += [rule_outgoing]
if follow_forwards and follow_backwards:
rules += [RuleSetWalkers(stash)]
if follow_backwards:
query_incoming = QueryBuilder()
query_incomong.append(Node, tag='sources')
query_incomong.append(Node, edge_filters=links_backwards, with_outgoing='sources')
rule_incoming = UpdateRule(query_incomong, max_iterations=1, track_edges=get_links)
rules += [rule_incoming]
if max_iterations is None:
max_iterations = inf
else:
if isinstance(max_iterations, int):
pass
elif max_iterations is inf:
pass
else:
raise TypeError('Max_iterations has to be an integer or infinity')
rulesequence = RuleSequence(rules, max_iterations=max_iterations)
results = rulesequence.run(basket)
output = {}
output['nodes'] = results.nodes.get_keys()
if get_links:
output['links'] = results['nodes_nodes'].get_keys()
else:
output['links'] = None
return output
|
34,663 |
def _create_augmentation_summary(
pooled_intents: Set,
changed_intents: Set,
classification_report: Dict[Text, Dict[Text, float]],
intent_report: Dict[Text, float],
) -> Tuple[
Dict[Text, Dict[Text, float]], Dict[Text, float],
]:
intent_summary = collections.defaultdict(dict)
for intent in (
pooled_intents | changed_intents | {"micro avg", "macro avg", "weighted avg"}
):
if intent not in classification_report.keys():
continue
intent_results_original = classification_report[intent]
intent_results = intent_report[intent]
# Record performance changes for augmentation based on the diversity criterion
precision_change = (
intent_results["precision"] - intent_results_original["precision"]
)
recall_change = intent_results["recall"] - intent_results_original["recall"]
f1_change = intent_results["f1-score"] - intent_results_original["f1-score"]
intent_results["precision_change"] = intent_summary[intent][
"precision_change"
] = precision_change
intent_results["recall_change"] = intent_summary[intent][
"recall_change"
] = recall_change
intent_results["f1-score_change"] = intent_summary[intent][
"f1-score_change"
] = f1_change
intent_report[intent] = intent_results
return (intent_summary, intent_report)
|
def _create_augmentation_summary(
pooled_intents: Set,
changed_intents: Set,
classification_report: Dict[Text, Dict[Text, float]],
intent_report: Dict[Text, float],
) -> Tuple[
Dict[Text, Dict[Text, float]], Dict[Text, float],
]:
intent_summary = collections.defaultdict(dict)
for intent in (
pooled_intents | changed_intents | {"micro avg", "macro avg", "weighted avg"}
):
if intent not in classification_report:
continue
intent_results_original = classification_report[intent]
intent_results = intent_report[intent]
# Record performance changes for augmentation based on the diversity criterion
precision_change = (
intent_results["precision"] - intent_results_original["precision"]
)
recall_change = intent_results["recall"] - intent_results_original["recall"]
f1_change = intent_results["f1-score"] - intent_results_original["f1-score"]
intent_results["precision_change"] = intent_summary[intent][
"precision_change"
] = precision_change
intent_results["recall_change"] = intent_summary[intent][
"recall_change"
] = recall_change
intent_results["f1-score_change"] = intent_summary[intent][
"f1-score_change"
] = f1_change
intent_report[intent] = intent_results
return (intent_summary, intent_report)
|
6,679 |
def make_einvoice(invoice, generate_ewb=False):
validate_mandatory_fields(invoice)
schema = read_json("einv_template")
transaction_details = get_transaction_details(invoice)
item_list = get_item_list(invoice)
doc_details = get_doc_details(invoice)
invoice_value_details = get_invoice_value_details(invoice)
seller_details = get_party_details(invoice.company_address)
if invoice.gst_category == "Overseas":
buyer_details = get_overseas_address_details(invoice.customer_address)
else:
buyer_details = get_party_details(invoice.customer_address)
place_of_supply = get_place_of_supply(invoice, invoice.doctype)
if place_of_supply:
place_of_supply = place_of_supply.split("-")[0]
else:
place_of_supply = sanitize_for_json(invoice.billing_address_gstin)[:2]
buyer_details.update(dict(place_of_supply=place_of_supply))
seller_details.update(dict(legal_name=invoice.company))
buyer_details.update(dict(legal_name=invoice.customer_name or invoice.customer))
shipping_details = payment_details = prev_doc_details = eway_bill_details = frappe._dict({})
if invoice.shipping_address_name and invoice.customer_address != invoice.shipping_address_name:
if invoice.gst_category == "Overseas":
shipping_details = get_overseas_address_details(invoice.shipping_address_name)
else:
shipping_details = get_party_details(invoice.shipping_address_name, skip_gstin_validation=True)
dispatch_details = frappe._dict({})
if invoice.dispatch_address_name:
dispatch_details = get_party_details(invoice.dispatch_address_name, skip_gstin_validation=True)
if invoice.is_pos and invoice.base_paid_amount:
payment_details = get_payment_details(invoice)
if invoice.is_return and invoice.return_against:
prev_doc_details = get_return_doc_reference(invoice)
if (
(
invoice.transporter
or (
(invoice.mode_of_transport and invoice.mode_of_transport != "Road")
or (invoice.mode_of_transport == "Road" and invoice.vehicle_no)
)
)
and not invoice.is_return
and generate_ewb == "true"
):
if invoice.mode_of_transport == "Road" and not invoice.vehicle_no:
frappe.throw(_("Eway Bill : Vehicle No is mandatory if Mode of Transport is Road"))
else:
eway_bill_details = get_eway_bill_details(invoice)
# not yet implemented
period_details = export_details = frappe._dict({})
einvoice = schema.format(
transaction_details=transaction_details,
doc_details=doc_details,
dispatch_details=dispatch_details,
seller_details=seller_details,
buyer_details=buyer_details,
shipping_details=shipping_details,
item_list=item_list,
invoice_value_details=invoice_value_details,
payment_details=payment_details,
period_details=period_details,
prev_doc_details=prev_doc_details,
export_details=export_details,
eway_bill_details=eway_bill_details,
)
try:
einvoice = safe_json_load(einvoice)
einvoice = santize_einvoice_fields(einvoice)
except json.JSONDecodeError:
raise
except Exception:
show_link_to_error_log(invoice, einvoice)
try:
validate_totals(einvoice)
except Exception:
log_error(einvoice)
raise
return einvoice
|
def make_einvoice(invoice, generate_ewb=False):
validate_mandatory_fields(invoice)
schema = read_json("einv_template")
transaction_details = get_transaction_details(invoice)
item_list = get_item_list(invoice)
doc_details = get_doc_details(invoice)
invoice_value_details = get_invoice_value_details(invoice)
seller_details = get_party_details(invoice.company_address)
if invoice.gst_category == "Overseas":
buyer_details = get_overseas_address_details(invoice.customer_address)
else:
buyer_details = get_party_details(invoice.customer_address)
place_of_supply = get_place_of_supply(invoice, invoice.doctype)
if place_of_supply:
place_of_supply = place_of_supply.split("-")[0]
else:
place_of_supply = sanitize_for_json(invoice.billing_address_gstin)[:2]
buyer_details.update(dict(place_of_supply=place_of_supply))
seller_details.update(dict(legal_name=invoice.company))
buyer_details.update(dict(legal_name=invoice.customer_name or invoice.customer))
shipping_details = payment_details = prev_doc_details = eway_bill_details = frappe._dict({})
if invoice.shipping_address_name and invoice.customer_address != invoice.shipping_address_name:
if invoice.gst_category == "Overseas":
shipping_details = get_overseas_address_details(invoice.shipping_address_name)
else:
shipping_details = get_party_details(invoice.shipping_address_name, skip_gstin_validation=True)
dispatch_details = frappe._dict({})
if invoice.dispatch_address_name:
dispatch_details = get_party_details(invoice.dispatch_address_name, skip_gstin_validation=True)
if invoice.is_pos and invoice.base_paid_amount:
payment_details = get_payment_details(invoice)
if invoice.is_return and invoice.return_against:
prev_doc_details = get_return_doc_reference(invoice)
if (
(
invoice.transporter
or (
(invoice.mode_of_transport and invoice.mode_of_transport != "Road")
or (invoice.mode_of_transport == "Road" and invoice.vehicle_no)
)
)
and not invoice.is_return
and generate_ewb == "true"
):
if invoice.mode_of_transport == "Road" and not invoice.vehicle_no:
frappe.throw(_("Vehicle No is mandatory to generate Eway Bill if Mode of Transport is Road"))
else:
eway_bill_details = get_eway_bill_details(invoice)
# not yet implemented
period_details = export_details = frappe._dict({})
einvoice = schema.format(
transaction_details=transaction_details,
doc_details=doc_details,
dispatch_details=dispatch_details,
seller_details=seller_details,
buyer_details=buyer_details,
shipping_details=shipping_details,
item_list=item_list,
invoice_value_details=invoice_value_details,
payment_details=payment_details,
period_details=period_details,
prev_doc_details=prev_doc_details,
export_details=export_details,
eway_bill_details=eway_bill_details,
)
try:
einvoice = safe_json_load(einvoice)
einvoice = santize_einvoice_fields(einvoice)
except json.JSONDecodeError:
raise
except Exception:
show_link_to_error_log(invoice, einvoice)
try:
validate_totals(einvoice)
except Exception:
log_error(einvoice)
raise
return einvoice
|
24,644 |
def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10):
r"""
Returns an array of null point object, representing
the null points of the given vector space.
Parameters
----------
vspace: array_like
The vector space as constructed by the vector_space function which is
A 1 by 3 array with the first element containing the coordinates,
the second element containing the vector values,
and the third element containing the delta values for each dimension.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occured
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
"""
nullpoints = []
for i in range(len(vspace[0][0]) - 1):
for j in range(len(vspace[0][0][0]) - 1):
for k in range(len(vspace[0][0][0][0]) - 1):
if _reduction(vspace, [i, j, k]):
if _trilinear_analysis(vspace, [i, j, k]):
loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err)
if loc is not None:
p = NullPoint(loc, "N/A")
if p not in nullpoints:
nullpoints.append(p)
return nullpoints
|
def _vspace_iterator(vspace, MAX_ITERATIONS=500, err=1e-10):
r"""
Returns an array of null point object, representing
the null points of the given vector space.
Parameters
----------
vspace: array_like
The vector space as constructed by the vector_space function which is
A 1 by 3 array with the first element containing the coordinates,
the second element containing the vector values,
and the third element containing the delta values for each dimension.
MAX_ITERATIONS: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occurred
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of NullPoint objects representing the nullpoints
of the given vector space.
"""
nullpoints = []
for i in range(len(vspace[0][0]) - 1):
for j in range(len(vspace[0][0][0]) - 1):
for k in range(len(vspace[0][0][0][0]) - 1):
if _reduction(vspace, [i, j, k]):
if _trilinear_analysis(vspace, [i, j, k]):
loc = _locate_null_point(vspace, [i, j, k], MAX_ITERATIONS, err)
if loc is not None:
p = NullPoint(loc, "N/A")
if p not in nullpoints:
nullpoints.append(p)
return nullpoints
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.