id
int64 11
59.9k
| original
stringlengths 33
150k
| modified
stringlengths 37
150k
|
---|---|---|
35,043 | def _get_name_static(canonical, dtype, shape, batch_dim=None):
"""Get name for static shape tensor array op corresponding
to the canonical name"""
dim_names = []
for dim in shape:
if isinstance(dim, Any):
dim_names.append("any")
else:
dim_names.append(str(dim))
shape_str = "_".join(dim_names)
if len(shape_str) == 0:
shape_str = "scalar"
if canonical == "tensor_t":
return "static_tensor_{}_{}_t".format(dtype, shape_str)
if not batch_dim or canonical == "tensor_constructor" or canonical == "tensor_nil":
return "{}_{}_{}".format(canonical, dtype, shape_str)
if batch_dim != 1:
return "{}_{}_{}".format(canonical, dtype, shape_str)
else:
return "{}_{}_batch{}_{}".format(canonical, dtype, str(batch_dim), shape_str)
| def _get_name_static(canonical, dtype, shape, batch_dim=None):
"""Get name for static shape tensor array op corresponding
to the canonical name"""
dim_names = []
for dim in shape:
if isinstance(dim, Any):
dim_names.append("any")
else:
dim_names.append(str(dim))
shape_str = "_".join(dim_names)
if len(shape_str) == 0:
shape_str = "scalar"
if canonical == "tensor_t":
return "static_tensor_{}_{}_t".format(dtype, shape_str)
if not batch_dim or canonical == "tensor_constructor" or canonical == "tensor_nil":
return "{}_{}_{}".format(canonical, dtype, shape_str)
if batch_dim != 1:
return "{}_{}_{}".format(canonical, dtype, shape_str)
return "{}_{}_batch{}_{}".format(canonical, dtype, str(batch_dim), shape_str)
|
46,628 | def meta_fcnet(fname_objective: str, fname_cost: str, noise: bool=True) -> Tuple[UserFunctionWrapper, ParameterSpace]:
"""
Interface to the Meta-FCNet benchmark which imitates the hyperparameter optimization of a
fully connected neural network on OpenML like classification datasets.
Offline generated function samples can be download here:
http://www.ml4aad.org/wp-content/uploads/2019/05/profet_data.tar.gz
NOTE: make sure that the index for the objective function and the cost function match,
e.g for sample_objective_i.pkl and sample_cost.pkl the index i should be the same.
For further information about Profet and the generated meta-surrogate benchmarks see:
Meta-Surrogate Benchmarking for Hyperparameter Optimization
A. Klein and Z. Dai and F. Hutter and N. Lawrence and J. Gonzalez
arXiv:1905.12982 [cs.LG] (2019)
:param fname_objective: filename for the objective function
:param fname_cost: filename for the cost function
:param noise: determines whether to add noise on the function value or not
:return: Tuple of user function object and parameter space
"""
parameter_space = ParameterSpace([
ContinuousParameter('lr', 0, 1), # original space [1e-6, 1e-1]
ContinuousParameter('batch_size', 0, 1), # original space [8, 128]
ContinuousParameter('n_units_1', 0, 1), # original space [16, 512]
ContinuousParameter('n_units_2', 0, 1), # original space [16, 512]
ContinuousParameter('dropout_1', 0, 1), # original space [0, 0.99]
ContinuousParameter('dropout_2', 0, 1), # original space [0, 0.99]
])
data = pickle.load(open(fname_objective, "rb"))
x_mean_objective = data["x_mean"]
x_std_objective = data["x_std"]
task_feature_objective = data["task_feature"]
objective = get_default_architecture_classification(x_mean_objective.shape[0]).float()
objective.load_state_dict(data["state_dict"])
data = pickle.load(open(fname_cost, "rb"))
x_mean_cost = data["x_mean"]
x_std_cost = data["x_std"]
y_mean_cost = data["y_mean"]
y_std_cost = data["y_std"]
task_feature_cost = data["task_feature"]
cost = get_default_architecture_cost(x_mean_cost.shape[0]).float()
cost.load_state_dict(data["state_dict"])
def objective_function(config, with_noise=True):
Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
o = objective.forward(x_norm).data.numpy()
m = o[:, 0]
log_v = o[:, 1]
if with_noise:
feval = np.random.randn() * np.sqrt(np.exp(log_v)) + m
else:
feval = m
Ht = np.repeat(task_feature_cost[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_cost) / x_std_cost).float()
o = cost.forward(x_norm).data.numpy()
log_m = o[:, 0] * y_std_cost + y_mean_cost
log_log_v = o[:, 1] * y_std_cost ** 2
if with_noise:
log_c = np.random.randn() * np.sqrt(np.exp(log_log_v)) + log_m
else:
log_c = log_m
return feval[:, None], np.exp(log_c)[:, None]
f = partial(objective_function, with_noise=noise)
return f, parameter_space
| def meta_fcnet(fname_objective: str, fname_cost: str, noise: bool=True) -> Tuple[UserFunctionWrapper, ParameterSpace]:
"""
Interface to the Meta-FCNet benchmark which imitates the hyperparameter optimization of a
fully connected neural network on OpenML like classification datasets.
Offline generated function samples can be download here:
http://www.ml4aad.org/wp-content/uploads/2019/05/profet_data.tar.gz
NOTE: make sure that the index for the objective function and the cost function match,
e.g for sample_objective_i.pkl and sample_cost_i.pkl the index i should be the same.
For further information about Profet and the generated meta-surrogate benchmarks see:
Meta-Surrogate Benchmarking for Hyperparameter Optimization
A. Klein and Z. Dai and F. Hutter and N. Lawrence and J. Gonzalez
arXiv:1905.12982 [cs.LG] (2019)
:param fname_objective: filename for the objective function
:param fname_cost: filename for the cost function
:param noise: determines whether to add noise on the function value or not
:return: Tuple of user function object and parameter space
"""
parameter_space = ParameterSpace([
ContinuousParameter('lr', 0, 1), # original space [1e-6, 1e-1]
ContinuousParameter('batch_size', 0, 1), # original space [8, 128]
ContinuousParameter('n_units_1', 0, 1), # original space [16, 512]
ContinuousParameter('n_units_2', 0, 1), # original space [16, 512]
ContinuousParameter('dropout_1', 0, 1), # original space [0, 0.99]
ContinuousParameter('dropout_2', 0, 1), # original space [0, 0.99]
])
data = pickle.load(open(fname_objective, "rb"))
x_mean_objective = data["x_mean"]
x_std_objective = data["x_std"]
task_feature_objective = data["task_feature"]
objective = get_default_architecture_classification(x_mean_objective.shape[0]).float()
objective.load_state_dict(data["state_dict"])
data = pickle.load(open(fname_cost, "rb"))
x_mean_cost = data["x_mean"]
x_std_cost = data["x_std"]
y_mean_cost = data["y_mean"]
y_std_cost = data["y_std"]
task_feature_cost = data["task_feature"]
cost = get_default_architecture_cost(x_mean_cost.shape[0]).float()
cost.load_state_dict(data["state_dict"])
def objective_function(config, with_noise=True):
Ht = np.repeat(task_feature_objective[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_objective) / x_std_objective).float()
o = objective.forward(x_norm).data.numpy()
m = o[:, 0]
log_v = o[:, 1]
if with_noise:
feval = np.random.randn() * np.sqrt(np.exp(log_v)) + m
else:
feval = m
Ht = np.repeat(task_feature_cost[None, :], config.shape[0], axis=0)
x = np.concatenate((config, Ht), axis=1)
x_norm = torch.from_numpy((x - x_mean_cost) / x_std_cost).float()
o = cost.forward(x_norm).data.numpy()
log_m = o[:, 0] * y_std_cost + y_mean_cost
log_log_v = o[:, 1] * y_std_cost ** 2
if with_noise:
log_c = np.random.randn() * np.sqrt(np.exp(log_log_v)) + log_m
else:
log_c = log_m
return feval[:, None], np.exp(log_c)[:, None]
f = partial(objective_function, with_noise=noise)
return f, parameter_space
|
8,066 | def test_simple_write_compressed(tmpdir):
data, header = sunpy.io.fits.read(AIA_171_IMAGE)[0]
outfile = tmpdir / "test.fits"
sunpy.io.fits.write(str(outfile), data, header, hdu_type=fits.CompImageHDU)
assert outfile.exists()
with fits.open(str(outfile)) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
| def test_simple_write_compressed(tmpdir):
outfile = os.path.join(tmpdir, "test.fits")
outfile = tmpdir / "test.fits"
sunpy.io.fits.write(str(outfile), data, header, hdu_type=fits.CompImageHDU)
assert outfile.exists()
with fits.open(str(outfile)) as hdul:
assert len(hdul) == 2
assert isinstance(hdul[1], fits.CompImageHDU)
|
14,021 | def write_to_db(gdf, engine, index, tbl, srid, geom_name, if_exists):
import io
import csv
# Convert columns to lists and make a generator
args = [list(gdf[i]) for i in gdf.columns]
if index:
args.insert(0, list(gdf.index))
data_iter = zip(*args)
# get list of columns using pandas
keys = tbl.insert_data()[0]
columns = ", ".join('"{}"'.format(k) for k in list(keys))
s_buf = io.StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
conn = engine.raw_connection()
cur = conn.cursor()
try:
# If appending to an existing table, temporarily change
# the srid to 0, and update the SRID afterwards
if if_exists == "append":
sql = "SELECT UpdateGeometrySRID('{schema}','{tbl}','{geom}',{crs})".format(
schema=tbl.table.schema, tbl=tbl.table.name, geom=geom_name, crs=0
)
cur.execute(sql)
sql = "COPY {} ({}) FROM STDIN WITH CSV".format(tbl.table.fullname, columns)
cur.copy_expert(sql=sql, file=s_buf)
# SRID needs to be updated afterwards as the current approach does not support
# the use of EWKT geometries.
sql = "SELECT UpdateGeometrySRID('{schema}','{tbl}','{geom}',{srid})".format(
schema=tbl.table.schema, tbl=tbl.table.name, geom=geom_name, srid=srid
)
cur.execute(sql)
conn.commit()
except Exception as e:
conn.connection.rollback()
raise e
conn.close()
| def _write_to_db(gdf, engine, index, tbl, srid, geom_name, if_exists):
import io
import csv
# Convert columns to lists and make a generator
args = [list(gdf[i]) for i in gdf.columns]
if index:
args.insert(0, list(gdf.index))
data_iter = zip(*args)
# get list of columns using pandas
keys = tbl.insert_data()[0]
columns = ", ".join('"{}"'.format(k) for k in list(keys))
s_buf = io.StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
conn = engine.raw_connection()
cur = conn.cursor()
try:
# If appending to an existing table, temporarily change
# the srid to 0, and update the SRID afterwards
if if_exists == "append":
sql = "SELECT UpdateGeometrySRID('{schema}','{tbl}','{geom}',{crs})".format(
schema=tbl.table.schema, tbl=tbl.table.name, geom=geom_name, crs=0
)
cur.execute(sql)
sql = "COPY {} ({}) FROM STDIN WITH CSV".format(tbl.table.fullname, columns)
cur.copy_expert(sql=sql, file=s_buf)
# SRID needs to be updated afterwards as the current approach does not support
# the use of EWKT geometries.
sql = "SELECT UpdateGeometrySRID('{schema}','{tbl}','{geom}',{srid})".format(
schema=tbl.table.schema, tbl=tbl.table.name, geom=geom_name, srid=srid
)
cur.execute(sql)
conn.commit()
except Exception as e:
conn.connection.rollback()
raise e
conn.close()
|
8,682 | def handle_init(options):
"""Use config's wizard to initialize a new configuration file for the bot
:param options: argument parser's parsed options
.. note::
Due to how the config's wizard works, the configuration filename's
extension must be ``.cfg``.
"""
config_filename = utils.find_config(
config.DEFAULT_HOMEDIR,
getattr(options, 'config', None) or 'default')
config_name, ext = os.path.splitext(config_filename)
if ext and ext != '.cfg':
tools.stderr('Configuration wizard accepts .cfg file only')
return 1
elif not ext:
config_filename = config_name + '.cfg'
if os.path.isfile(config_filename):
tools.stderr('Configuration file %s already exists' % config_filename)
return 1
print('Starting Sopel config wizard for: %s' % config_filename)
config._wizard('all', config_name)
| def handle_init(options):
"""Use config wizard to initialize a new configuration file for the bot
:param options: argument parser's parsed options
.. note::
Due to how the config's wizard works, the configuration filename's
extension must be ``.cfg``.
"""
config_filename = utils.find_config(
config.DEFAULT_HOMEDIR,
getattr(options, 'config', None) or 'default')
config_name, ext = os.path.splitext(config_filename)
if ext and ext != '.cfg':
tools.stderr('Configuration wizard accepts .cfg file only')
return 1
elif not ext:
config_filename = config_name + '.cfg'
if os.path.isfile(config_filename):
tools.stderr('Configuration file %s already exists' % config_filename)
return 1
print('Starting Sopel config wizard for: %s' % config_filename)
config._wizard('all', config_name)
|
16,110 | def setup(hass: HomeAssistantType, config: Any) -> bool:
"""Set up the Amcrest IP Camera component."""
hass.data.setdefault(DATA_AMCREST, {DEVICES: {}, CAMERAS: []})
for device in config[DOMAIN]:
name: str = device[CONF_NAME]
username: str = device[CONF_USERNAME]
password: str = device[CONF_PASSWORD]
api = AmcrestChecker(
hass, name, device[CONF_HOST], device[CONF_PORT], username, password
)
ffmpeg_arguments = device[CONF_FFMPEG_ARGUMENTS]
resolution = RESOLUTION_LIST[device[CONF_RESOLUTION]]
binary_sensors = device.get(CONF_BINARY_SENSORS)
sensors = device.get(CONF_SENSORS)
stream_source = device[CONF_STREAM_SOURCE]
control_light = device.get(CONF_CONTROL_LIGHT)
# currently aiohttp only works with basic authentication
# only valid for mjpeg streaming
if device[CONF_AUTHENTICATION] == HTTP_BASIC_AUTHENTICATION:
authentication: aiohttp.BasicAuth | None = aiohttp.BasicAuth(
username, password
)
else:
authentication = None
hass.data[DATA_AMCREST][DEVICES][name] = AmcrestDevice(
api,
authentication,
ffmpeg_arguments,
stream_source,
resolution,
control_light,
)
discovery.load_platform(hass, CAMERA, DOMAIN, {CONF_NAME: name}, config)
event_codes = []
if binary_sensors:
discovery.load_platform(
hass,
BINARY_SENSOR,
DOMAIN,
{CONF_NAME: name, CONF_BINARY_SENSORS: binary_sensors},
config,
)
maybe_event_codes = [
BINARY_SENSORS[sensor_type].code
for sensor_type in binary_sensors
if sensor_type not in BINARY_POLLED_SENSORS
]
event_codes = [code for code in maybe_event_codes if code is not None]
_start_event_monitor(hass, name, api, event_codes)
if sensors:
discovery.load_platform(
hass, SENSOR, DOMAIN, {CONF_NAME: name, CONF_SENSORS: sensors}, config
)
if not hass.data[DATA_AMCREST][DEVICES]:
return False
def have_permission(user: User | None, entity_id: str) -> bool:
return not user or user.permissions.check_entity(entity_id, POLICY_CONTROL)
async def async_extract_from_service(call: ServiceCallType) -> list[str]:
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
else:
user = None
if call.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_ALL:
# Return all entity_ids user has permission to control.
return [
entity_id
for entity_id in hass.data[DATA_AMCREST][CAMERAS]
if have_permission(user, entity_id)
]
if call.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_NONE:
return []
call_ids = await async_extract_entity_ids(hass, call)
entity_ids = []
for entity_id in hass.data[DATA_AMCREST][CAMERAS]:
if entity_id not in call_ids:
continue
if not have_permission(user, entity_id):
raise Unauthorized(
context=call.context, entity_id=entity_id, permission=POLICY_CONTROL
)
entity_ids.append(entity_id)
return entity_ids
async def async_service_handler(call: ServiceCallType) -> None:
args = []
for arg in CAMERA_SERVICES[call.service][2]:
args.append(call.data[arg])
for entity_id in await async_extract_from_service(call):
async_dispatcher_send(hass, service_signal(call.service, entity_id), *args)
for service, params in CAMERA_SERVICES.items():
hass.services.register(DOMAIN, service, async_service_handler, params[0])
return True
| def setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the Amcrest IP Camera component."""
hass.data.setdefault(DATA_AMCREST, {DEVICES: {}, CAMERAS: []})
for device in config[DOMAIN]:
name: str = device[CONF_NAME]
username: str = device[CONF_USERNAME]
password: str = device[CONF_PASSWORD]
api = AmcrestChecker(
hass, name, device[CONF_HOST], device[CONF_PORT], username, password
)
ffmpeg_arguments = device[CONF_FFMPEG_ARGUMENTS]
resolution = RESOLUTION_LIST[device[CONF_RESOLUTION]]
binary_sensors = device.get(CONF_BINARY_SENSORS)
sensors = device.get(CONF_SENSORS)
stream_source = device[CONF_STREAM_SOURCE]
control_light = device.get(CONF_CONTROL_LIGHT)
# currently aiohttp only works with basic authentication
# only valid for mjpeg streaming
if device[CONF_AUTHENTICATION] == HTTP_BASIC_AUTHENTICATION:
authentication: aiohttp.BasicAuth | None = aiohttp.BasicAuth(
username, password
)
else:
authentication = None
hass.data[DATA_AMCREST][DEVICES][name] = AmcrestDevice(
api,
authentication,
ffmpeg_arguments,
stream_source,
resolution,
control_light,
)
discovery.load_platform(hass, CAMERA, DOMAIN, {CONF_NAME: name}, config)
event_codes = []
if binary_sensors:
discovery.load_platform(
hass,
BINARY_SENSOR,
DOMAIN,
{CONF_NAME: name, CONF_BINARY_SENSORS: binary_sensors},
config,
)
maybe_event_codes = [
BINARY_SENSORS[sensor_type].code
for sensor_type in binary_sensors
if sensor_type not in BINARY_POLLED_SENSORS
]
event_codes = [code for code in maybe_event_codes if code is not None]
_start_event_monitor(hass, name, api, event_codes)
if sensors:
discovery.load_platform(
hass, SENSOR, DOMAIN, {CONF_NAME: name, CONF_SENSORS: sensors}, config
)
if not hass.data[DATA_AMCREST][DEVICES]:
return False
def have_permission(user: User | None, entity_id: str) -> bool:
return not user or user.permissions.check_entity(entity_id, POLICY_CONTROL)
async def async_extract_from_service(call: ServiceCallType) -> list[str]:
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
else:
user = None
if call.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_ALL:
# Return all entity_ids user has permission to control.
return [
entity_id
for entity_id in hass.data[DATA_AMCREST][CAMERAS]
if have_permission(user, entity_id)
]
if call.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_NONE:
return []
call_ids = await async_extract_entity_ids(hass, call)
entity_ids = []
for entity_id in hass.data[DATA_AMCREST][CAMERAS]:
if entity_id not in call_ids:
continue
if not have_permission(user, entity_id):
raise Unauthorized(
context=call.context, entity_id=entity_id, permission=POLICY_CONTROL
)
entity_ids.append(entity_id)
return entity_ids
async def async_service_handler(call: ServiceCallType) -> None:
args = []
for arg in CAMERA_SERVICES[call.service][2]:
args.append(call.data[arg])
for entity_id in await async_extract_from_service(call):
async_dispatcher_send(hass, service_signal(call.service, entity_id), *args)
for service, params in CAMERA_SERVICES.items():
hass.services.register(DOMAIN, service, async_service_handler, params[0])
return True
|
30,458 | def role_to_entry(title, role):
context = {
'ETag': role.get('etag').strip('"'),
'IsSuperAdminRole': bool(role.get('isSuperAdminRole')) if role.get('isSuperAdminRole') else False,
'IsSystemRole': bool(role.get('isSystemRole')) if role.get('isSystemRole') else False,
'Kind': role.get('kind'),
'Description': role.get('roleDescription'),
'ID': role.get('roleId'),
'Name': role.get('roleName'),
'Privilege': parse_privileges(role.get('rolePrivileges'))
}
headers = ['ETag', 'IsSuperAdminRole', 'IsSystemRole', 'Kind', 'Description',
'ID', 'Name']
details_hr = tableToMarkdown(title, context, headers, removeNull=True)
privileges = context.get('Privilege', [])
privileges_headers = ['ServiceID', 'Name']
privileges_title = 'Role {} privileges:'.format(context.get('ID'))
privileges_hr = tableToMarkdown(privileges_title, privileges, privileges_headers, removeNull=True)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': context,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': details_hr + privileges_hr,
'EntryContext': {'Gmail.Role(val.ID && val.ID == obj.ID)': context}
}
| def role_to_entry(title, role):
context = {
'ETag': role.get('etag').strip('"'),
'IsSuperAdminRole': bool(role.get('isSuperAdminRole')) if role.get('isSuperAdminRole') else False,
'IsSystemRole': bool(role.get('isSystemRole')) if role.get('isSystemRole') else False,
'Kind': role.get('kind'),
'Description': role.get('roleDescription'),
'ID': role.get('roleId'),
'Name': role.get('roleName'),
'Privilege': parse_privileges(role.get('rolePrivileges', []))
}
headers = ['ETag', 'IsSuperAdminRole', 'IsSystemRole', 'Kind', 'Description',
'ID', 'Name']
details_hr = tableToMarkdown(title, context, headers, removeNull=True)
privileges = context.get('Privilege', [])
privileges_headers = ['ServiceID', 'Name']
privileges_title = 'Role {} privileges:'.format(context.get('ID'))
privileges_hr = tableToMarkdown(privileges_title, privileges, privileges_headers, removeNull=True)
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': context,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': details_hr + privileges_hr,
'EntryContext': {'Gmail.Role(val.ID && val.ID == obj.ID)': context}
}
|
5,643 | def _fftconv_faster(x, h, mode, test=True):
"""
See if using fftconvolve or convolve is faster.
Parameters
----------
x : np.ndarray
Signal
h : np.ndarray
Kernel
mode : str
Mode passed to convolve
Returns
-------
fft_faster : bool
Notes
-----
The big O ratios were found to hold to different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
mid 2014 15-inch MacBook Pro with 16GB of RAM and an Intel 2.5GHz i7
processor.
"""
fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)
big_O_constant = _get_fft_constant(mode, x.ndim, _prod(x.shape), _prod(h.shape))
return big_O_constant * fft_ops < direct_ops
| def _fftconv_faster(x, h, mode, test=True):
"""
See if using fftconvolve or convolve is faster.
Parameters
----------
x : np.ndarray
Signal
h : np.ndarray
Kernel
mode : str
Mode passed to convolve
Returns
-------
fft_faster : bool
Notes
-----
The big O ratios were found to hold to different machines, which makes
sense as it's the ratio that matters (the effective speed of the computer
is found in both big O constants). Regardless, this had been tuned on an
mid 2014 15-inch MacBook Pro with 16GB of RAM and an Intel 2.5GHz i7
processor.
"""
fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)
big_O_constant = _get_fft_constant(mode, x.ndim, x.size, h.size)
return big_O_constant * fft_ops < direct_ops
|
21,251 | def save_and_upload_disclosure(
interface, disclosure_key, response, data
) -> Optional[FinancialDisclosure]:
sha1_hash = sha1(response.content)
if FinancialDisclosure.objects.filter(sha1=sha1_hash).exists():
logger.error(
"PDF already in system.",
extra={"disclosure_id": disclosure_key},
)
interface.delete(disclosure_key)
return
page_count = microservice(
service="page-count",
filename=data["url"].split("/")[-1],
file=response.content,
).content
if not page_count:
logger.error(
msg=f"Page count failed",
extra={"disclosure_id": disclosure_key, "url": data["url"]},
)
interface.delete(disclosure_key)
return
# Make disclosure
disclosure = FinancialDisclosure(
year=int(data["year"]),
page_count=page_count,
person=Person.objects.get(id=data["person_id"]),
sha1=sha1_hash,
has_been_extracted=False,
report_type=data.get("report_type", -1),
download_filepath=data.get("url"),
)
# Save and upload & generate thumbnail
disclosure.filepath.save(
f"{disclosure.person.slug}-disclosure.{data['year']}.pdf",
ContentFile(response.content),
)
logger.info(
f"Uploaded to https://{settings.AWS_S3_CUSTOM_DOMAIN}/"
f"{disclosure.filepath}"
)
return disclosure
| def save_and_upload_disclosure(
interface, disclosure_key, response, data
) -> Optional[FinancialDisclosure]:
sha1_hash = sha1(response.content)
if FinancialDisclosure.objects.filter(sha1=sha1_hash).exists():
logger.error(
"PDF already in system.",
extra={"disclosure_id": disclosure_key},
)
interface.delete(disclosure_key)
return
page_count = microservice(
service="page-count",
filename=data["url"].split("/")[-1],
file=response.content,
).content
if not page_count:
logger.error(
msg=f"Page count failed",
extra={"disclosure_id": disclosure_key, "url": data["url"]},
)
interface.delete(disclosure_key)
return
# Make disclosure
disclosure = FinancialDisclosure(
year=int(data["year"]),
page_count=page_count,
person=Person.objects.get(id=data["person_id"]),
sha1=sha1_hash,
has_been_extracted=False,
report_type=data.get("report_type", REPORT_TYPES.UNKNOWN),
download_filepath=data.get("url"),
)
# Save and upload & generate thumbnail
disclosure.filepath.save(
f"{disclosure.person.slug}-disclosure.{data['year']}.pdf",
ContentFile(response.content),
)
logger.info(
f"Uploaded to https://{settings.AWS_S3_CUSTOM_DOMAIN}/"
f"{disclosure.filepath}"
)
return disclosure
|
38,273 | def get_status_code_distribution(db_session, endpoint_id):
results = db_session.query(Request.status_code, func.count(Request.status_code)).filter(
Request.endpoint_id == endpoint_id, Request.status_code.isnot(None)).group_by(Request.status_code).all()
total_count = 0
for (_, frequency) in results:
total_count += frequency
distribution = {}
for (status_code, frequency) in results:
distribution[status_code] = frequency / total_count
return distribution
| def get_status_code_distribution(db_session, endpoint_id):
results = db_session.query(Request.status_code, func.count(Request.status_code)).filter(
Request.endpoint_id == endpoint_id, Request.status_code.isnot(None)).group_by(Request.status_code).all()
total_count = sum([f for (_, f) in results])
for (_, frequency) in results:
total_count += frequency
distribution = {}
for (status_code, frequency) in results:
distribution[status_code] = frequency / total_count
return distribution
|
30,768 | def list_of_objects_to_readable_output(name, items, headers):
"""Creates readable output from list of items
:type name: ``str``
:param name: readable output table name
:type items: ``List[Dict[str, any]]``
:param items: original list of objects
:type headers: ``List[Dict[str, any]]``
:param headers: original list of objects
:return:
returns an ``str`` with markdown format
:rtype: ``str``
"""
return tableToMarkdown(name, list_of_object_to_list_subset(items, *headers), headers)
| def list_of_objects_to_readable_output(name, items, headers):
"""Creates readable output from list of items
:type name: ``str``
:param name: readable output table name
:type items: ``List[Dict[str, any]]``
:param items: original list of objects
:type headers: ``List[Dict[str, any]]``
:param headers: original list of objects
:return:
returns an ``str`` with markdown format
:rtype: ``str``
"""
return tableToMarkdown(name, list_of_object_to_list_subset(items, *headers), headers, removeNull=True)
|
31,767 | def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
headers['PRIVATE-TOKEN'] = f'{params["api_key"]}'
command = demisto.command()
LOG(f'Command being called is {command}')
try:
urllib3.disable_warnings()
client = Client(urljoin(url, ""), verify_certificate, proxy, headers=headers)
commands = {
'gitlab-get-projects': get_projects_command,
'gitlab-projects-get-access-requests': projects_get_access_requests_command,
'gitlab-projects-request-access': projects_request_access_command,
'gitlab-projects-approve-access': projects_approve_access_command,
'gitlab-projects-deny-access': projects_deny_access_command,
'gitlab-projects-get-repository-branches': projects_get_repository_branches_command,
'gitlab-projects-create-repository-branch': projects_create_repository_branch_command,
'gitlab-projects-delete-repository-branch': projects_delete_repository_branch_command,
'gitlab-projects-delete-repository-merged-branches': projects_delete_repository_merged_branches_command,
'gitlab-get-version': get_version_command,
'gitlab-pipelines-schedules-list': gitlab_pipelines_schedules_list_command,
'gitlab-pipelines-list': gitlab_pipelines_list_command,
'gitlab-jobs-list': gitlab_jobs_list_command,
'gitlab-artifact-get': gitlab_artifact_get_command,
'gitlab-merge-requests-list': gitlab_merge_requests_list_command,
'gitlab-get-merge-request': gitlab_get_merge_request_command,
'gitlab-issues-list': gitlab_issues_list_command,
'gitlab-create-issue': gitlab_create_issue_command,
'gitlab-edit-issue': gitlab_edit_issue_command,
'gitlab-group-projects-list': gitlab_group_projects_list_command,
'gitlab-get-raw-file': gitlab_get_raw_file_command
}
if command == 'test-module':
test_module(client)
else:
return_results(commands[command](client, args))
except Exception as e:
return_error(str(e))
| def main():
params = demisto.params()
args = demisto.args()
url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
headers = {}
headers['PRIVATE-TOKEN'] = f'{params["api_key"]}'
command = demisto.command()
LOG(f'Command being called is {command}')
try:
urllib3.disable_warnings()
client = Client(urljoin(url, ""), verify_certificate, proxy, headers=headers)
commands = {
'gitlab-get-projects': get_projects_command,
'gitlab-projects-get-access-requests': projects_get_access_requests_command,
'gitlab-projects-request-access': projects_request_access_command,
'gitlab-projects-approve-access': projects_approve_access_command,
'gitlab-projects-deny-access': projects_deny_access_command,
'gitlab-projects-get-repository-branches': projects_get_repository_branches_command,
'gitlab-projects-create-repository-branch': projects_create_repository_branch_command,
'gitlab-projects-delete-repository-branch': projects_delete_repository_branch_command,
'gitlab-projects-delete-repository-merged-branches': projects_delete_repository_merged_branches_command,
'gitlab-get-version': get_version_command,
'gitlab-pipelines-schedules-list': gitlab_pipelines_schedules_list_command,
'gitlab-pipelines-list': gitlab_pipelines_list_command,
'gitlab-jobs-list': gitlab_jobs_list_command,
'gitlab-artifact-get': gitlab_artifact_get_command,
'gitlab-merge-requests-list': gitlab_merge_requests_list_command,
'gitlab-get-merge-request': gitlab_get_merge_request_command,
'gitlab-issues-list': gitlab_issues_list_command,
'gitlab-create-issue': gitlab_create_issue_command,
'gitlab-issue-edit': gitlab_edit_issue_command,
'gitlab-group-projects-list': gitlab_group_projects_list_command,
'gitlab-get-raw-file': gitlab_get_raw_file_command
}
if command == 'test-module':
test_module(client)
else:
return_results(commands[command](client, args))
except Exception as e:
return_error(str(e))
|
3,584 | def _unresolve_domain(domain):
"""
Unresolve domain.
:param str domain: Domain to extrac the project slug from.
:returns: A tuple with the project slug, domain object, and if the domain
is external.
"""
public_domain = _get_domain_from_host(settings.PUBLIC_DOMAIN)
external_domain = _get_domain_from_host(settings.RTD_EXTERNAL_VERSION_DOMAIN)
subdomain, *rest_of_domain = domain.split(".", maxsplit=1)
rest_of_domain = rest_of_domain[0] if rest_of_domain else ""
if public_domain in domain:
# Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`.
if public_domain == rest_of_domain:
project_slug = subdomain
return project_slug, None, False
# TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example,
# but these might be phishing, so let's ignore them for now.
return None, None, False
if external_domain in domain:
# Serve custom versions on external-host-domain.
if external_domain == rest_of_domain:
try:
project_slug, _ = subdomain.rsplit("--", maxsplit=1)
return project_slug, None, True
except ValueError:
return None, None, False
# Custom domain.
domain_object = (
Domain.objects.filter(domain=domain).prefetch_related("project").first()
)
if domain_object:
project_slug = domain_object.project.slug
return project_slug, domain_object, False
return None, None, None
| def _unresolve_domain(domain):
"""
Unresolve domain.
:param str domain: Domain to extract the project slug from.
:returns: A tuple with the project slug, domain object, and if the domain
is external.
"""
public_domain = _get_domain_from_host(settings.PUBLIC_DOMAIN)
external_domain = _get_domain_from_host(settings.RTD_EXTERNAL_VERSION_DOMAIN)
subdomain, *rest_of_domain = domain.split(".", maxsplit=1)
rest_of_domain = rest_of_domain[0] if rest_of_domain else ""
if public_domain in domain:
# Serve from the PUBLIC_DOMAIN, ensuring it looks like `foo.PUBLIC_DOMAIN`.
if public_domain == rest_of_domain:
project_slug = subdomain
return project_slug, None, False
# TODO: This can catch some possibly valid domains (docs.readthedocs.io.com) for example,
# but these might be phishing, so let's ignore them for now.
return None, None, False
if external_domain in domain:
# Serve custom versions on external-host-domain.
if external_domain == rest_of_domain:
try:
project_slug, _ = subdomain.rsplit("--", maxsplit=1)
return project_slug, None, True
except ValueError:
return None, None, False
# Custom domain.
domain_object = (
Domain.objects.filter(domain=domain).prefetch_related("project").first()
)
if domain_object:
project_slug = domain_object.project.slug
return project_slug, domain_object, False
return None, None, None
|
31,344 | def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'Picus-Attack-Result-List': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
| def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
''' EXECUTION '''
#LOG('command is %s' % (demisto.command(), ))
demisto.debug(f'Command being called is {demisto.command()}')
try:
LOG('Command being called is {command}'.format(command=demisto.command()))
if demisto.command() == 'Picus-GetAccessToken':
getAccessToken()
elif demisto.command() == 'Picus-Vector-Compare': # Makes a comparison of the given vector's results
token = getAccessToken()
demisto.results(vectorCompare(token))
elif demisto.command() == 'picus-attack-result-list': # Returns the list of the attack results\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(attackResultList(token))
elif demisto.command() == 'Picus-Specific-Threats-Results': # Returns the list of the attack results of a single threat\nhave optional
token = getAccessToken()
demisto.results(specificThreatsResults(token))
elif demisto.command() == 'Picus-Peer-List': # Returns the peer list with current statuses
token = getAccessToken()
demisto.results(peerList(token))
elif demisto.command() == 'Picus-EMail-Peer-List': # Returns the E-Mail peer list with current statuses
token = getAccessToken()
demisto.results(eMailPeerList(token))
elif demisto.command() == 'Picus-Attack-All-Vectors': # Schedules given attack on all possible vectors
token = getAccessToken()
demisto.results(attackAllVectors(token))
elif demisto.command() == 'Picus-Attack-Single': # Schedules a single attack on requested vector
token = getAccessToken()
demisto.results(attackSingle(token))
elif demisto.command() == 'Picus-Trigger-Update': # Triggers the update mechanism manually, returns if the update-command is taken successfully
token = getAccessToken()
demisto.results(triggerUpdate(token))
elif demisto.command() == 'Picus-Version': # Returns the current version and the update time config
token = getAccessToken()
demisto.results(version(token))
elif demisto.command() == 'Picus-Threat-List': # Returns the list of the threats\nhave optional parameters for pagination and filtration
token = getAccessToken()
demisto.results(threatList(token))
elif demisto.command() == 'Picus-Mitigation-List': # Returns the list of the mitigations of threats\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(mitigationList(token))
elif demisto.command() == 'Picus-Mitre-Matrix': # Returns the mitre matrix metadata\ntakes no parameters
token = getAccessToken()
demisto.results(mitreMatrix(token))
elif demisto.command() == 'Picus-Sigma-Rules-List': # Returns the list of the sigma rules of scenario actions\nhave optional parameters for pagination and filtration, this route may not be used associated with your license
token = getAccessToken()
demisto.results(sigmaRulesList(token))
elif demisto.command() == 'Picus-Vector-List': # Returns the list of the vectors all disabled and enabled ones\nhave optional parameters for pagination
token = getAccessToken()
demisto.results(vectorList(token))
elif demisto.command() == 'test-module':
demisto.results(test_module())
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {demisto.command()} command.\nError:\n{str(e)}')
|
52,007 | def test_windows_dumpfiles(image, volatility, python):
json_file = open('./test/known_files.json')
known_files = json.load(json_file)
failed_chksms = 0
if sys.platform == 'win32':
file_name = ntpath.basename(image)
else:
file_name = os.path.basename(image)
try:
for addr in known_files["windows_dumpfiles"][file_name]:
path = tempfile.mkdtemp()
rc, out, err = runvol_plugin("windows.dumpfiles.DumpFiles", image, volatility, python, globalargs=["-o", path], pluginargs=["--virtaddr", addr])
for file in os.listdir(path):
fp = open(os.path.join(path, file), "rb")
if hashlib.md5(fp.read()).hexdigest() not in known_files["windows_dumpfiles"][file_name][addr]:
failed_chksms += 1
fp.close()
shutil.rmtree(path)
json_file.close()
assert failed_chksms == 0
assert rc == 0
except Exception as e:
json_file.close()
print("Key Error raised on " + str(e))
assert False
| def test_windows_dumpfiles(image, volatility, python):
json_file = open('./test/known_files.json')
known_files = json.load(json_file)
failed_chksms = 0
if sys.platform == 'win32':
file_name = ntpath.basename(image)
else:
file_name = os.path.basename(image)
try:
for addr in known_files["windows_dumpfiles"][file_name]:
path = tempfile.mkdtemp()
rc, out, err = runvol_plugin("windows.dumpfiles.DumpFiles", image, volatility, python, globalargs=["-o", path], pluginargs=["--virtaddr", addr])
for file in os.listdir(path):
with open(os.path.join(path, file), "rb") as fp:
if hashlib.md5(fp.read()).hexdigest() not in known_files["windows_dumpfiles"][file_name][addr]:
failed_chksms += 1
shutil.rmtree(path)
json_file.close()
assert failed_chksms == 0
assert rc == 0
except Exception as e:
json_file.close()
print("Key Error raised on " + str(e))
assert False
|
6,511 | def get_context(context):
settings = frappe.get_doc("E Commerce Settings")
context.categories_enabled = settings.enable_field_filters
if context.categories_enabled:
categories = [row.fieldname for row in settings.filter_fields]
context.tabs = get_tabs(categories)
if settings.slideshow:
context.slideshow = get_slideshow(settings.slideshow)
context.no_cache = 1
| def get_context(context):
settings = frappe.get_cached_doc("E Commerce Settings")
context.categories_enabled = settings.enable_field_filters
if context.categories_enabled:
categories = [row.fieldname for row in settings.filter_fields]
context.tabs = get_tabs(categories)
if settings.slideshow:
context.slideshow = get_slideshow(settings.slideshow)
context.no_cache = 1
|
3,294 | def find_roots(stack_path, source_path):
"""
Returns a tuple containing the stack_root, and the source_root.
If there is no overlap, raise an exception since this should not happen
"""
overlap_to_check = stack_path
stack_root = ""
while overlap_to_check:
# see if our path ends with the overlap we want
if source_path.endswith(overlap_to_check):
# determine the source root by removing the overlap
source_root = source_path.rpartition(overlap_to_check)[0]
return (stack_root, source_root)
# increase the stack root specificity
# while decreasing the overlap
stack_root += overlap_to_check[0]
overlap_to_check = overlap_to_check[1:]
# validate_source_url should have ensured the file names match
# so if we get here something went wrong and there is a bug
raise Exception("Could not common root from paths")
| def find_roots(stack_path, source_path):
"""
Returns a tuple containing the stack_root, and the source_root.
If there is no overlap, raise an exception since this should not happen
"""
overlap_to_check = stack_path
stack_root = ""
while overlap_to_check:
# see if our path ends with the overlap we want
if source_path.endswith(overlap_to_check):
# determine the source root by removing the overlap
source_root = source_path.rpartition(overlap_to_check)[0]
return (stack_root, source_root)
# increase the stack root specificity
# while decreasing the overlap
stack_root += overlap_to_check[0]
overlap_to_check = overlap_to_check[1:]
# validate_source_url should have ensured the file names match
# so if we get here something went wrong and there is a bug
raise Exception("Could not find common root from paths")
|
58,181 | def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
secret_key = demisto.params().get('credentials').get('password')
client_key = demisto.params().get('credentials').get('identifier')
organisation_id = demisto.params().get('organization_id')
# get the service API url
base_url = demisto.params().get("api_url")
# How much time before the first fetch to retrieve incidents
proxy = demisto.params().get('proxy', False)
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
organisation_id=organisation_id,
secret_key=secret_key,
client_key=client_key,
proxy=proxy)
commands = {
'test-module': test_module,
"umbrella-reporting-destination-list":
get_destinations_list,
"umbrella-reporting-category-list":
get_categories_list,
"umbrella-reporting-identity-list":
get_identities_list,
"umbrella-reporting-event-type-list":
get_event_types_list,
"umbrella-reporting-file-list":
get_file_list,
"umbrella-reporting-threat-list":
get_threat_list,
"umbrella-reporting-activity-list":
get_activity_list,
"umbrella-reporting-activity-get":
get_activity_by_traffic_type,
"umbrella-reporting-summary-list":
get_summary_list
}
args = demisto.args()
command = demisto.command()
if command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError
# Log exceptions
except Exception as e:
return_error(
f'Failed to execute {demisto.command()} command. Error: {str(e)}')
| def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
secret_key = demisto.params().get('credentials').get('password')
client_key = demisto.params().get('credentials').get('identifier')
organisation_id = demisto.params().get('organization_id')
# get the service API url
base_url = demisto.params().get("api_url")
# How much time before the first fetch to retrieve incidents
proxy = params.get('proxy', False)
handle_proxy()
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
organisation_id=organisation_id,
secret_key=secret_key,
client_key=client_key,
proxy=proxy)
commands = {
'test-module': test_module,
"umbrella-reporting-destination-list":
get_destinations_list,
"umbrella-reporting-category-list":
get_categories_list,
"umbrella-reporting-identity-list":
get_identities_list,
"umbrella-reporting-event-type-list":
get_event_types_list,
"umbrella-reporting-file-list":
get_file_list,
"umbrella-reporting-threat-list":
get_threat_list,
"umbrella-reporting-activity-list":
get_activity_list,
"umbrella-reporting-activity-get":
get_activity_by_traffic_type,
"umbrella-reporting-summary-list":
get_summary_list
}
args = demisto.args()
command = demisto.command()
if command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError
# Log exceptions
except Exception as e:
return_error(
f'Failed to execute {demisto.command()} command. Error: {str(e)}')
|
14,420 | def get_mfa_state(email, env):
c = open_database(env)
c.execute('SELECT secret, mru_token FROM totp_credentials WHERE user_email=?', (email,))
credential_row = c.fetchone()
if (credential_row == None):
return { 'type': None }
return {
'type': 'totp',
'secret': credential_row[0],
'mru_token': credential_row[1]
}
| def get_mfa_state(email, env):
c = open_database(env)
c.execute('SELECT secret, mru_token FROM totp_credentials WHERE user_email=?', (email,))
credential_row = c.fetchone()
if credential_row is None:
return { 'type': None }
return {
'type': 'totp',
'secret': credential_row[0],
'mru_token': credential_row[1]
}
|
14,174 | def isyeildable(thing: Any) -> bool: # name?
"""
Returns ``True`` if thing can be used in a yield/await statement in cocotb
"""
return iscoroutine(thing) or isinstance(thing, (cocotb.triggers.Trigger, cocotb.triggers.Waitable))
| def isyieldable(thing: Any) -> bool:
"""
Returns ``True`` if thing can be used in a yield/await statement in cocotb
"""
return iscoroutine(thing) or isinstance(thing, (cocotb.triggers.Trigger, cocotb.triggers.Waitable))
|
18,992 | def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.json" page at the location given in
cache_prefix. This page contains a link for each binary package (.yaml)
under cache_prefix.
"""
try:
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if entry.endswith('.yaml'))
except KeyError as inst:
msg = 'No packages at {0}: {1}'.format(cache_prefix, inst)
tty.warn(msg)
return
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = 'Encountered problem listing packages at {0}: {1}'.format(
cache_prefix, err)
tty.warn(msg)
return
tty.debug('Retrieving spec.yaml files from {0} to build index'.format(
cache_prefix))
all_mirror_specs = {}
for file_path in file_list:
try:
yaml_url = url_util.join(cache_prefix, file_path)
tty.debug('fetching {0}'.format(yaml_url))
_, _, yaml_file = web_util.read_from_url(yaml_url)
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
spec_dict = syaml.load(yaml_contents)
s = Spec.from_yaml(yaml_contents)
num_deps = 0
for d in s.traverse(root=False):
num_deps += 1
all_mirror_specs[s.dag_hash()] = {
'yaml_url': yaml_url,
'spec': s,
'num_deps': num_deps,
'binary_cache_checksum': spec_dict['binary_cache_checksum'],
'buildinfo': spec_dict['buildinfo'],
}
except (URLError, web_util.SpackWebError) as url_err:
tty.error('Error reading spec.yaml: {0}'.format(file_path))
tty.error(url_err)
sorted_specs = sorted(all_mirror_specs.keys(),
key=lambda k: all_mirror_specs[k]['num_deps'])
tmpdir = tempfile.mkdtemp()
db_root_dir = os.path.join(tmpdir, 'db_root')
db = spack_db.Database(None, db_dir=db_root_dir,
enable_transaction_locking=False,
record_fields=['spec', 'ref_count', 'in_buildcache'])
try:
tty.debug('Specs sorted by number of dependencies:')
for dag_hash in sorted_specs:
spec_record = all_mirror_specs[dag_hash]
s = spec_record['spec']
num_deps = spec_record['num_deps']
tty.debug(' {0}/{1} -> {2}'.format(
s.name, dag_hash[:7], num_deps))
if num_deps > 0:
# Check each of this spec's dependencies (which we have already
# processed), as they are the source of truth for their own
# full hash. If the full hash we have for any deps does not
# match what those deps have themselves, then we need to splice
# this spec with those deps, and push this spliced spec
# (spec.yaml file) back to the mirror, as well as update the
# all_mirror_specs dictionary with this spliced spec.
to_splice = []
for dep in s.dependencies():
dep_dag_hash = dep.dag_hash()
if dep_dag_hash in all_mirror_specs:
true_dep = all_mirror_specs[dep_dag_hash]['spec']
if true_dep.full_hash() != dep.full_hash():
to_splice.append(true_dep)
if to_splice:
tty.debug(' needs the following deps spliced:')
for true_dep in to_splice:
tty.debug(' {0}/{1}'.format(
true_dep.name, true_dep.dag_hash()[:7]))
s = s.splice(true_dep, True)
# Push this spliced spec back to the mirror
spliced_yaml = s.to_dict(hash=ht.full_hash)
for key in ['binary_cache_checksum', 'buildinfo']:
spliced_yaml[key] = spec_record[key]
temp_yaml_path = os.path.join(tmpdir, 'spliced.spec.yaml')
with open(temp_yaml_path, 'w') as fd:
fd.write(syaml.dump(spliced_yaml))
spliced_yaml_url = spec_record['yaml_url']
web_util.push_to_url(
temp_yaml_path, spliced_yaml_url, keep_original=False)
tty.debug(' spliced and wrote {0}'.format(
spliced_yaml_url))
spec_record['spec'] = s
db.add(s, None)
db.mark(s, 'in_buildcache', True)
# Now that we have fixed any old spec yamls that might have had the wrong
# full hash for their dependencies, we can generate the index, compute
# the hash, and push those files to the mirror.
index_json_path = os.path.join(db_root_dir, 'index.json')
with open(index_json_path, 'w') as f:
db._write_to_file(f)
# Read the index back in and compute it's hash
with open(index_json_path) as f:
index_string = f.read()
index_hash = compute_hash(index_string)
# Write the hash out to a local file
index_hash_path = os.path.join(db_root_dir, 'index.json.hash')
with open(index_hash_path, 'w') as f:
f.write(index_hash)
# Push the index itself
web_util.push_to_url(
index_json_path,
url_util.join(cache_prefix, 'index.json'),
keep_original=False,
extra_args={'ContentType': 'application/json'})
# Push the hash
web_util.push_to_url(
index_hash_path,
url_util.join(cache_prefix, 'index.json.hash'),
keep_original=False,
extra_args={'ContentType': 'text/plain'})
except Exception as err:
msg = 'Encountered problem pushing package index to {0}: {1}'.format(
cache_prefix, err)
tty.warn(msg)
traceback.print_exc(file=sys.stdout)
finally:
shutil.rmtree(tmpdir)
| def generate_package_index(cache_prefix):
"""Create the build cache index page.
Creates (or replaces) the "index.json" page at the location given in
cache_prefix. This page contains a link for each binary package (.yaml)
under cache_prefix.
"""
try:
file_list = (
entry
for entry in web_util.list_url(cache_prefix)
if entry.endswith('.yaml'))
except KeyError as inst:
msg = 'No packages at {0}: {1}'.format(cache_prefix, inst)
tty.warn(msg)
return
except Exception as err:
# If we got some kind of S3 (access denied or other connection
# error), the first non boto-specific class in the exception
# hierarchy is Exception. Just print a warning and return
msg = 'Encountered problem listing packages at {0}: {1}'.format(
cache_prefix, err)
tty.warn(msg)
return
tty.debug('Retrieving spec.yaml files from {0} to build index'.format(
cache_prefix))
all_mirror_specs = {}
for file_path in file_list:
try:
yaml_url = url_util.join(cache_prefix, file_path)
tty.debug('fetching {0}'.format(yaml_url))
_, _, yaml_file = web_util.read_from_url(yaml_url)
yaml_contents = codecs.getreader('utf-8')(yaml_file).read()
spec_dict = syaml.load(yaml_contents)
s = Spec.from_yaml(yaml_contents)
num_deps = len(list(s.traverse(root=False)))
all_mirror_specs[s.dag_hash()] = {
'yaml_url': yaml_url,
'spec': s,
'num_deps': num_deps,
'binary_cache_checksum': spec_dict['binary_cache_checksum'],
'buildinfo': spec_dict['buildinfo'],
}
except (URLError, web_util.SpackWebError) as url_err:
tty.error('Error reading spec.yaml: {0}'.format(file_path))
tty.error(url_err)
sorted_specs = sorted(all_mirror_specs.keys(),
key=lambda k: all_mirror_specs[k]['num_deps'])
tmpdir = tempfile.mkdtemp()
db_root_dir = os.path.join(tmpdir, 'db_root')
db = spack_db.Database(None, db_dir=db_root_dir,
enable_transaction_locking=False,
record_fields=['spec', 'ref_count', 'in_buildcache'])
try:
tty.debug('Specs sorted by number of dependencies:')
for dag_hash in sorted_specs:
spec_record = all_mirror_specs[dag_hash]
s = spec_record['spec']
num_deps = spec_record['num_deps']
tty.debug(' {0}/{1} -> {2}'.format(
s.name, dag_hash[:7], num_deps))
if num_deps > 0:
# Check each of this spec's dependencies (which we have already
# processed), as they are the source of truth for their own
# full hash. If the full hash we have for any deps does not
# match what those deps have themselves, then we need to splice
# this spec with those deps, and push this spliced spec
# (spec.yaml file) back to the mirror, as well as update the
# all_mirror_specs dictionary with this spliced spec.
to_splice = []
for dep in s.dependencies():
dep_dag_hash = dep.dag_hash()
if dep_dag_hash in all_mirror_specs:
true_dep = all_mirror_specs[dep_dag_hash]['spec']
if true_dep.full_hash() != dep.full_hash():
to_splice.append(true_dep)
if to_splice:
tty.debug(' needs the following deps spliced:')
for true_dep in to_splice:
tty.debug(' {0}/{1}'.format(
true_dep.name, true_dep.dag_hash()[:7]))
s = s.splice(true_dep, True)
# Push this spliced spec back to the mirror
spliced_yaml = s.to_dict(hash=ht.full_hash)
for key in ['binary_cache_checksum', 'buildinfo']:
spliced_yaml[key] = spec_record[key]
temp_yaml_path = os.path.join(tmpdir, 'spliced.spec.yaml')
with open(temp_yaml_path, 'w') as fd:
fd.write(syaml.dump(spliced_yaml))
spliced_yaml_url = spec_record['yaml_url']
web_util.push_to_url(
temp_yaml_path, spliced_yaml_url, keep_original=False)
tty.debug(' spliced and wrote {0}'.format(
spliced_yaml_url))
spec_record['spec'] = s
db.add(s, None)
db.mark(s, 'in_buildcache', True)
# Now that we have fixed any old spec yamls that might have had the wrong
# full hash for their dependencies, we can generate the index, compute
# the hash, and push those files to the mirror.
index_json_path = os.path.join(db_root_dir, 'index.json')
with open(index_json_path, 'w') as f:
db._write_to_file(f)
# Read the index back in and compute it's hash
with open(index_json_path) as f:
index_string = f.read()
index_hash = compute_hash(index_string)
# Write the hash out to a local file
index_hash_path = os.path.join(db_root_dir, 'index.json.hash')
with open(index_hash_path, 'w') as f:
f.write(index_hash)
# Push the index itself
web_util.push_to_url(
index_json_path,
url_util.join(cache_prefix, 'index.json'),
keep_original=False,
extra_args={'ContentType': 'application/json'})
# Push the hash
web_util.push_to_url(
index_hash_path,
url_util.join(cache_prefix, 'index.json.hash'),
keep_original=False,
extra_args={'ContentType': 'text/plain'})
except Exception as err:
msg = 'Encountered problem pushing package index to {0}: {1}'.format(
cache_prefix, err)
tty.warn(msg)
traceback.print_exc(file=sys.stdout)
finally:
shutil.rmtree(tmpdir)
|
1,594 | def _incremental_weighted_mean_and_var(X, sample_weight,
last_weighted_mean,
last_weighted_variance,
last_weight_sum):
"""Calculate weighted mean and variance batch update
last_weighted_mean and last_weighted_variance are statistics
computed at the last step by the function. Both must be
initialized to 0.0. In case no scaling is required
last_weighted_variance can be None. The weighted_mean is
always required and returned because necessary for the
calculation of the weighted_variance. last_weight sum is
the sum of weights encountered until now.
Derived from the paper "Incremental calculation of
weighted mean and variance",
by Tony Finch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for statistics update
sample_weight : array-like, shape (n_samples,)
last_weighted_mean : array-like, shape: (n_features,)
last_weighted_variance : array-like, shape: (n_features,)
last_weight_sum : array-like, shape (n_features,)
Returns
-------
updated_weighted_mean : array, shape (n_features,)
updated_weighted_variance : array, shape (n_features,)
If None, only weighted_mean is computed
updated_weight_sum : array, shape (n_features,)
Notes
-----
NaNs in X are ignored.
References
----------
Tony Finch
"Incremental calculation of weighted mean and variance"
University of Cambridge Computing Service, February 2009
"""
# last = stats until now
# new = the current increment
# updated = the aggregated stats
M = np.isnan(X)
sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1)))
new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel()
total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0)
X_0 = np.where(np.isnan(X), 0, X)
new_weighted_mean = \
_safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0)
new_weighted_mean *= total_weight_sum / new_weight_sum
updated_weight_sum = last_weight_sum + new_weight_sum
updated_weighted_mean = (
(last_weight_sum * last_weighted_mean +
new_weight_sum * new_weighted_mean) / updated_weight_sum)
if last_weighted_variance is None:
updated_weighted_variance = None
else:
X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2)
new_weighted_variance = \
_safe_accumulator_op(
np.average, X_0, weights=sample_weight, axis=0)
new_weighted_variance *= total_weight_sum / new_weight_sum
new_element = (
new_weight_sum *
(new_weighted_variance +
(new_weighted_mean - updated_weighted_mean) ** 2))
last_element = (
last_weight_sum *
(last_weighted_variance +
(last_weighted_mean - updated_weighted_mean) ** 2))
updated_weighted_variance = (
new_element + last_element) / updated_weight_sum
return updated_weighted_mean, updated_weighted_variance, updated_weight_sum
| def _incremental_weighted_mean_and_var(X, sample_weight,
last_weighted_mean,
last_weighted_variance,
last_weight_sum):
"""Calculate weighted mean and variance batch update
last_weighted_mean and last_weighted_variance are statistics
computed at the last step by the function. Both must be
initialized to 0.0. In case no scaling is required
last_weighted_variance can be None. The weighted_mean is
always required and returned because necessary for the
calculation of the weighted_variance. last_weight sum is
the sum of weights encountered until now.
Derived from the paper "Incremental calculation of
weighted mean and variance",
by Tony Finch.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Data to use for statistics update
sample_weight : array-like, shape (n_samples,)
last_weighted_mean : array-like, shape: (n_features,)
last_weighted_variance : array-like, shape: (n_features,)
last_weight_sum : array-like, shape (n_features,)
Returns
-------
updated_weighted_mean : array, shape (n_features,)
updated_weighted_variance : array, shape (n_features,)
If None, only weighted_mean is computed
updated_weight_sum : array, shape (n_features,)
Notes
-----
NaNs in X are ignored.
References
----------
Tony Finch
"Incremental calculation of weighted mean and variance"
University of Cambridge Computing Service, February 2009
"""
# last = stats until now
# new = the current increment
# updated = the aggregated stats
M = np.isnan(X)
sample_weight_T = np.transpose(np.reshape(sample_weight, (-1, 1)))
new_weight_sum = _safe_accumulator_op(np.dot, sample_weight_T, ~M).ravel()
total_weight_sum = _safe_accumulator_op(np.sum, sample_weight, axis=0)
X_0 = np.where(nan_mask, 0, X)
new_weighted_mean = \
_safe_accumulator_op(np.average, X_0, weights=sample_weight, axis=0)
new_weighted_mean *= total_weight_sum / new_weight_sum
updated_weight_sum = last_weight_sum + new_weight_sum
updated_weighted_mean = (
(last_weight_sum * last_weighted_mean +
new_weight_sum * new_weighted_mean) / updated_weight_sum)
if last_weighted_variance is None:
updated_weighted_variance = None
else:
X_0 = np.where(np.isnan(X), 0, (X-new_weighted_mean)**2)
new_weighted_variance = \
_safe_accumulator_op(
np.average, X_0, weights=sample_weight, axis=0)
new_weighted_variance *= total_weight_sum / new_weight_sum
new_element = (
new_weight_sum *
(new_weighted_variance +
(new_weighted_mean - updated_weighted_mean) ** 2))
last_element = (
last_weight_sum *
(last_weighted_variance +
(last_weighted_mean - updated_weighted_mean) ** 2))
updated_weighted_variance = (
new_element + last_element) / updated_weight_sum
return updated_weighted_mean, updated_weighted_variance, updated_weight_sum
|
17,723 | def to_networkx(compound, names_only=False):
"""Create a NetworkX graph representing the hierarchy of a Compound.
Parameters
----------
compound : mb.Compound
The mbuild Compound that need to be converted.
names_only : bool, optional, default=False
Store only the names of the compounds in the graph,
appended with their IDs, for distinction even if they
have the same name. When set to False, the default
behavior, the nodes are the compounds themselves.
Return
------
G : networkx.DiGraph
Notes
-----
This digraph is not the bondgraph of the compound.
See Also
--------
mbuild.bond_graph
"""
nx = import_('networkx')
nodes = list()
edges = list()
if names_only:
nodes.append(compound.name + '_' + str(id(compound)))
else:
nodes.append(compound)
nodes, edges = _iterate_children(compound, nodes,
edges, names_only=names_only)
graph = nx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph
| def to_networkx(compound, names_only=False):
"""Create a NetworkX graph representing the hierarchy of a Compound.
Parameters
----------
compound : mb.Compound
The mbuild Compound that need to be converted.
names_only : bool, optional, default=False
Store only the names of the compounds in the graph,
appended with their IDs, for distinction even if they
have the same name. When set to False, the default
behavior, the nodes are the compounds themselves.
Returns
------
G : networkx.DiGraph
Notes
-----
This digraph is not the bondgraph of the compound.
See Also
--------
mbuild.bond_graph
"""
nx = import_('networkx')
nodes = list()
edges = list()
if names_only:
nodes.append(compound.name + '_' + str(id(compound)))
else:
nodes.append(compound)
nodes, edges = _iterate_children(compound, nodes,
edges, names_only=names_only)
graph = nx.DiGraph()
graph.add_nodes_from(nodes)
graph.add_edges_from(edges)
return graph
|
28,579 | def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp`` = `flatten`.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
| def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
observed=True,
color=None,
colors=None,
grid=None,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
filter_vars=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
labeller=None,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data: :class:`arviz.InferenceData` object
InferenceData object containing the observed and posterior/prior predictive data.
kind: str
Type of plot to display ("kde", "cumulative", or "scatter"). Defaults to `kde`.
alpha: float
Opacity of posterior/prior predictive density curves.
Defaults to `0.2` for ``kind`` = kde and cumulative, for scatter defaults to `0.7`.
mean: bool
Whether or not to plot the mean posterior/prior predictive distribution.
Defaults to ``True``.
observed: bool, default ``True``
Whether or not to plot the observed data.
color: str
Valid matplotlib ``color``. Defaults to `C0`.
color: list
List with valid matplotlib colors corresponding to the posterior/prior predictive
distribution, observed data and mean of the posterior/prior predictive distribution.
Defaults to ["C0", "k", "C1"].
grid : tuple
Number of rows and columns. Defaults to None, the rows and columns are
automatically inferred.
figsize: tuple
Figure size. If None, it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None, it will be
autoscaled based on ``figsize``.
data_pairs: dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, ``data_pairs = {'y' : 'y_hat'}``
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names: list of variable names
Variables to be plotted, if `None` all variable are plotted. Prefix the
variables by ``~`` when you want to exclude them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten: list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the ``coords`` argument. Defaults to flattening all of the dimensions.
flatten_pp: list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the ``coords`` argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for ``data_pairs``
parameters. If ``flatten`` is defined and ``flatten_pp`` is None, then
``flatten_pp = flatten``.
num_pp_samples: int
The number of posterior/prior predictive samples to plot. For ``kind`` = 'scatter' and
`animation` = ``False`` if defaults to a maximum of 5 samples and will set jitter to `0.7`.
unless defined. Otherwise it defaults to all provided samples.
random_seed: int
Random number generator seed passed to ``numpy.random.seed`` to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by ``num_pp_samples``.
jitter: float
If ``kind`` is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default `0`.
animated: bool
Create an animation of one posterior/prior predictive sample per frame.
Defaults to ``False``. Only works with matploblib backend.
To run animations inside a notebook you have to use the `nbAgg` matplotlib's backend.
Try with `%matplotlib notebook` or `%matplotlib nbAgg`. You can switch back to the
default matplotlib's backend with `%matplotlib inline` or `%matplotlib auto`.
If switching back and forth between matplotlib's backend, you may need to run twice the cell
with the animation.
If you experience problems rendering the animation try setting
`animation_kwargs({'blit':False}) or changing the matplotlib's backend (e.g. to TkAgg)
If you run the animation from a script write `ax, ani = az.plot_ppc(.)`
animation_kwargs : dict
Keywords passed to ``animation.FuncAnimation``. Ignored with matplotlib backend.
legend : bool
Add legend to figure. By default ``True``.
labeller : labeller instance, optional
Class providing the method ``make_pp_label`` to generate the labels in the plot titles.
Read the :ref:`label_guide` for more details and usage examples.
ax: numpy array-like of matplotlib axes or bokeh figures, optional
A 2D array of locations into which to plot the densities. If not supplied, Arviz will create
its own array of plot areas (and return it).
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default to "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`.
For additional documentation check the plotting method of the backend.
group: {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to `'posterior'`.
Other value can be `'prior'`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_bvp: Plot Bayesian p-value for observed data and Posterior/Prior predictive.
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data, data_pairs={"y":"y"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the ``coords`` and ``flatten`` parameters to plot selected variable dimensions
across multiple plots. We will now modify the dimension `obs_id` to contain
indicate the name of the county where the measure was taken. The change has to
be done on both ``posterior_predictive`` and ``observed_data`` groups, which is
why we will use :meth:`~arviz.InferenceData.map` to apply the same function to
both groups. Afterwards, we will select the counties to be plotted with the
``coords`` arg.
.. plot::
:context: close-figs
>>> obs_county = data.posterior["County"][data.constant_data["county_idx"]]
>>> data = data.assign_coords(obs_id=obs_county, groups="observed_vars")
>>> az.plot_ppc(data, coords={'obs_id': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'obs_id': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in (f"{group}_predictive", "observed_data"):
if not hasattr(data, groups):
raise TypeError(f'`data` argument must have the group "{groups}" for ppcplot')
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if colors is None:
colors = ["C0", "k", "C1"]
if isinstance(colors, str):
raise TypeError("colors should be a list with 3 items.")
if len(colors) != 3:
raise ValueError("colors should be a list with 3 items.")
if color is not None:
warnings.warn("color has been deprecated in favor of colors", FutureWarning)
colors[0] = color
if data_pairs is None:
data_pairs = {}
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
if backend == "bokeh":
if animated:
raise TypeError("Animation option is only supported with matplotlib backend.")
observed_data = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed_data.data_vars)
var_names = _var_names(var_names, observed_data, filter_vars)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset, filter_vars)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed_data.dims.keys())
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and " + f"{total_pp_samples}."
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed_data[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed_data.isel(coords),
skip_dims=set(flatten),
var_names=var_names,
combined=True,
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters, grid=grid)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
colors=colors,
jitter=jitter,
textsize=textsize,
mean=mean,
observed=observed,
total_pp_samples=total_pp_samples,
legend=legend,
labeller=labeller,
group=group,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
10,496 | def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
no_remove=dict(type='bool', default=False, aliases=['no-remove']),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
# We skip cache update in auto install the dependency if the
# user explicitly declared it with update_cache=no.
if module.params.get('update_cache') is False:
module.warn("Auto-installing missing dependency without updating cache: %s" % PYTHON_APT)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
no_remove = p['no_remove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache with exponential backoff
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = random.randint(0, 1000) / 1000.0
for retry in range(update_cache_retries):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
else:
module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, no_remove=no_remove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
no_remove=no_remove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
| def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
update_cache_retries=dict(type='int', default=5),
update_cache_retry_max_delay=dict(type='int', default=12),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', elements='str', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
no_remove=dict(type='bool', default=False, aliases=['no-remove']),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
# We skip cache update in auto install the dependency if the
# user explicitly declared it with update_cache=no.
if module.params.get('update_cache') is False:
module.warn("Auto-installing missing dependency without updating cache: %s" % PYTHON_APT)
else:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
no_remove = p['no_remove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache with exponential backoff
err = ''
update_cache_retries = module.params.get('update_cache_retries')
update_cache_retry_max_delay = module.params.get('update_cache_retry_max_delay')
randomize = random.randint(0, 1000) / 1000.0
for retry in range(update_cache_retries):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
# Use exponential backoff plus a little bit of randomness
delay = 2 ** retry + randomize
if delay > update_cache_retry_max_delay:
delay = update_cache_retry_max_delay + randomize
time.sleep(delay)
else:
module.fail_json(msg='Failed to update apt cache: %s' % (err if err else 'unknown reason'))
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, fail_on_autoremove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, no_remove=no_remove, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, no_remove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
no_remove=no_remove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
|
5,932 | def parse_editable(editable_req):
# type: (str) -> Tuple[Optional[str], str, Set[str]]
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
# If a file path is specified with extras, strip off the extras.
url_no_extras, extras = _strip_extras(url)
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
msg = (
'File "setup.py" not found. Directory cannot be installed '
'in editable mode: {}'.format(os.path.abspath(url_no_extras))
)
pyproject_path = make_pyproject_path(url_no_extras)
if os.path.isfile(pyproject_path):
msg += (
'\n(A "pyproject.toml" file was found, but editable '
'mode currently requires a setup.py based build.)'
)
raise InstallationError(msg)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, set()
for version_control in vcs:
if url.lower().startswith(f'{version_control}:'):
url = f'{version_control}+{url}'
break
link = Link(url)
if not link.is_vcs:
backends = ", ".join([backend.name + '+' for backend in vcs.backends])
raise InstallationError(
f'{editable_req} is not a valid editable requirement. '
f'It should either be a path to a local project or a VCS URL '
f'(beginning with {backends}).'
)
package_name = link.egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name for '{}', please specify one "
"with #egg=your_package_name".format(editable_req)
)
return package_name, url, set()
| def parse_editable(editable_req):
# type: (str) -> Tuple[Optional[str], str, Set[str]]
"""Parses an editable requirement into:
- a requirement name
- an URL
- extras
- editable options
Accepted requirements:
svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
.[some_extra]
"""
url = editable_req
# If a file path is specified with extras, strip off the extras.
url_no_extras, extras = _strip_extras(url)
if os.path.isdir(url_no_extras):
if not os.path.exists(os.path.join(url_no_extras, 'setup.py')):
msg = (
'File "setup.py" not found. Directory cannot be installed '
'in editable mode: {}'.format(os.path.abspath(url_no_extras))
)
pyproject_path = make_pyproject_path(url_no_extras)
if os.path.isfile(pyproject_path):
msg += (
'\n(A "pyproject.toml" file was found, but editable '
'mode currently requires a setup.py based build.)'
)
raise InstallationError(msg)
# Treating it as code that has already been checked out
url_no_extras = path_to_url(url_no_extras)
if url_no_extras.lower().startswith('file:'):
package_name = Link(url_no_extras).egg_fragment
if extras:
return (
package_name,
url_no_extras,
Requirement("placeholder" + extras.lower()).extras,
)
else:
return package_name, url_no_extras, set()
for version_control in vcs:
if url.lower().startswith(f'{version_control}:'):
url = f'{version_control}+{url}'
break
link = Link(url)
if not link.is_vcs:
backends = ", ".join(f"{backend.name}+" for backend in vcs.backends)
raise InstallationError(
f'{editable_req} is not a valid editable requirement. '
f'It should either be a path to a local project or a VCS URL '
f'(beginning with {backends}).'
)
package_name = link.egg_fragment
if not package_name:
raise InstallationError(
"Could not detect requirement name for '{}', please specify one "
"with #egg=your_package_name".format(editable_req)
)
return package_name, url, set()
|
1,848 | def test_poisson_zero_nodes():
"""Test that sum(y)=0 and therefore y_pred=0 never happens on nodes."""
X = [[0, 0], [0, 1], [0, 2], [0, 3],
[1, 0], [1, 2], [1, 2], [1, 3]]
y = [0, 0, 0, 0, 1, 2, 3, 4]
reg = DecisionTreeRegressor(criterion="poisson", random_state=1)
reg.fit(X, y)
assert np.all(reg.predict(X) > 0)
| def test_poisson_zero_nodes():
"""Test that sum(y)=0 and therefore y_pred=0 is forbidden on nodes."""
X = [[0, 0], [0, 1], [0, 2], [0, 3],
[1, 0], [1, 2], [1, 2], [1, 3]]
y = [0, 0, 0, 0, 1, 2, 3, 4]
reg = DecisionTreeRegressor(criterion="poisson", random_state=1)
reg.fit(X, y)
assert np.all(reg.predict(X) > 0)
|
22,042 | def upload_to_google_bigquery_table(df, dataset, table, job_config=None, client_project=None, credentials=None, chunk_size=None, progress=None):
'''Upload a Vaex DataFrame to a Google BigQuery Table.
Note that the upload creates a temporary parquet file on the local disk, which is then upload to
Google BigQuery.
:param DataFrame df: The Vaex DataFrame to be uploaded.
:param str dataset: The name of the dataset to which the table belongs
:param str table: The name of the table
:param job_config: Optional, an instance of google.cloud.bigquery.job.load.LoadJobConfig
:param str client_project: The ID of the project that executes the query. Will be passed when creating a job. If `None`, falls back to the default inferred from the environment.
:param credentials: The authorization credentials to attach to requests. See google.auth.credentials.Credentials for more details.
:param chunk_size: In case the local disk space is limited, export the dataset in chunks.
This is considerably slower than a single file upload and it should be avoided.
:param progress: Valid only if chunk_size is not None. A callable that takes one argument (a floating point value between 0 and 1) indicating the progress, calculations are cancelled when this callable returns False
Example:
>>> import os
>>> os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../path/to/project_access_key.json'
>>> import vaex
>>> from vaex.contrib.io.gbq import upload_to_google_bigquery_table
>>> df = vaex.example()
>>> dataset = 'my_dataset'
>>> table = 'my_table'
>>> upload_to_google_bigquery_table(df=df, dataset=dataset, table=table)
'''
# Instantiate the BigQuery Client
client = google.cloud.bigquery.Client(project=client_project, credentials=credentials)
# Confirm configuration of the LoadJobConfig
if job_config is not None:
assert isinstance(job_config, google.cloud.bigquery.job.load.LoadJobConfig)
job_config.source_format = google.cloud.bigquery.SourceFormat.PARQUET
else:
job_config = google.cloud.bigquery.LoadJobConfig(source_format=google.cloud.bigquery.SourceFormat.PARQUET)
# Table to which to upload
table_bq = f"{dataset}.{table}"
if chunk_size is None:
with tempfile.NamedTemporaryFile(suffix='.parquet') as tmp:
df.export_parquet(tmp.name)
with open(tmp.name, "rb") as source_file:
job = client.load_table_from_file(source_file, table_bq, job_config=job_config)
job.result()
else:
progressbar = vaex.utils.progressbars(progress)
n_samples = len(df)
for i1, i2, table in df.to_arrow_table(chunk_size=chunk_size):
progressbar(i1 / n_samples)
with tempfile.NamedTemporaryFile(suffix='.parquet') as tmp:
pq.write_table(table, tmp.name)
with open(tmp.name, "rb") as source_file:
job = client.load_table_from_file(source_file, table_bq, job_config=job_config)
job.result()
progressbar(1.0)
| def to_table(df, dataset, table, job_config=None, client_project=None, credentials=None, chunk_size=None, progress=None):
'''Upload a Vaex DataFrame to a Google BigQuery Table.
Note that the upload creates a temporary parquet file on the local disk, which is then upload to
Google BigQuery.
:param DataFrame df: The Vaex DataFrame to be uploaded.
:param str dataset: The name of the dataset to which the table belongs
:param str table: The name of the table
:param job_config: Optional, an instance of google.cloud.bigquery.job.load.LoadJobConfig
:param str client_project: The ID of the project that executes the query. Will be passed when creating a job. If `None`, falls back to the default inferred from the environment.
:param credentials: The authorization credentials to attach to requests. See google.auth.credentials.Credentials for more details.
:param chunk_size: In case the local disk space is limited, export the dataset in chunks.
This is considerably slower than a single file upload and it should be avoided.
:param progress: Valid only if chunk_size is not None. A callable that takes one argument (a floating point value between 0 and 1) indicating the progress, calculations are cancelled when this callable returns False
Example:
>>> import os
>>> os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '../path/to/project_access_key.json'
>>> import vaex
>>> from vaex.contrib.io.gbq import upload_to_google_bigquery_table
>>> df = vaex.example()
>>> dataset = 'my_dataset'
>>> table = 'my_table'
>>> upload_to_google_bigquery_table(df=df, dataset=dataset, table=table)
'''
# Instantiate the BigQuery Client
client = google.cloud.bigquery.Client(project=client_project, credentials=credentials)
# Confirm configuration of the LoadJobConfig
if job_config is not None:
assert isinstance(job_config, google.cloud.bigquery.job.load.LoadJobConfig)
job_config.source_format = google.cloud.bigquery.SourceFormat.PARQUET
else:
job_config = google.cloud.bigquery.LoadJobConfig(source_format=google.cloud.bigquery.SourceFormat.PARQUET)
# Table to which to upload
table_bq = f"{dataset}.{table}"
if chunk_size is None:
with tempfile.NamedTemporaryFile(suffix='.parquet') as tmp:
df.export_parquet(tmp.name)
with open(tmp.name, "rb") as source_file:
job = client.load_table_from_file(source_file, table_bq, job_config=job_config)
job.result()
else:
progressbar = vaex.utils.progressbars(progress)
n_samples = len(df)
for i1, i2, table in df.to_arrow_table(chunk_size=chunk_size):
progressbar(i1 / n_samples)
with tempfile.NamedTemporaryFile(suffix='.parquet') as tmp:
pq.write_table(table, tmp.name)
with open(tmp.name, "rb") as source_file:
job = client.load_table_from_file(source_file, table_bq, job_config=job_config)
job.result()
progressbar(1.0)
|
58,763 | def sparsereshape(sparse_indices, sparse_values, prev_shape, new_shape):
"""
Reshape a Sparse Tensor
Parameters
----------
inputs : List[relay.Expr]
Input tensor and indices.
The first tensor is input data and rests are indices.
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
sparse_values = [7, 5, 6, 3, 9]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
relay.sparsereshape(sparse_indices,
sparse_values,
prev_shape,
new_shape)
= [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
"""
return cpp.sparsereshape(sparse_indices, sparse_values, prev_shape, new_shape)
| def sparsereshape(sparse_indices, sparse_values, prev_shape, new_shape):
"""
Reshape a Sparse Tensor
Parameters
----------
inputs : List[relay.Expr]
Input tensor and indices.
The first tensor is input data and rests are indices.
Returns
-------
result: relay.Expr
Output tensor.
Examples
--------
.. code-block:: python
sparse_indices = [[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[1, 2, 3]]
sparse_values = [7, 5, 6, 3, 9]
prev_shape = [2, 3, 4]
new_shape = [9, -1]
relay.sparsereshape(sparse_indices,
sparse_values,
prev_shape,
new_shape)
= [[0, 0],
[0, 1],
[1, 2],
[4, 2],
[8, 1]]
"""
return cpp.sparsereshape(sparse_indices, sparse_values, prev_shape, new_shape)
|
35,587 | def densenet161(pretrained: bool = False, progress: bool = True, **kwargs) -> DenseNet:
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
**kwargs)
| def densenet161(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet:
r"""Densenet-161 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress,
**kwargs)
|
57,825 | def http_request(method, url, body=None, headers=None, url_params=None):
"""
returns the http response body
uses TOKEN global var to send requests to RSA end (this enables using a token for multiple requests and avoiding
unnecessary creation of a new token)
catches and handles token expiration: in case of 'request timeout' the token will be renewed and the request
will be resent once more.
"""
if headers is None:
headers = {}
global TOKEN
# add token to headers
headers['NetWitness-Token'] = TOKEN
request_kwargs = {
'headers': headers,
'verify': USE_SSL
}
# add optional arguments if specified
if body is not None:
request_kwargs['data'] = body
if url_params is not None:
request_kwargs['params'] = url_params
LOG('Attempting {} request to {}\nWith params:{}\nWith body:\n{}'.format(method, url,
json.dumps(url_params, indent=4),
json.dumps(body, indent=4)))
response = requests.request(
method,
url,
**request_kwargs
)
# handle timeout (token expired): renew token and try again
if response.status_code == 408:
LOG('Timeout detected - renewing token')
TOKEN = get_token()
headers['NetWitness-Token'] = TOKEN
response = requests.request(
method,
url,
**request_kwargs
)
# successful request
if response.status_code == 200:
try:
return response.json()
except Exception as e:
demisto.debug('Could not parse response as a JSON.\nResponse is: {}.'
'\nError is:{}'.format(response.content, e.message))
return None
# bad request - NetWitness returns a common json structure for errors; a list of error objects
error_lst = response.json().get('errors')
raise ValueError('Request failed with status: {}\n{}'.format(response.status_code, dict_list_to_str(error_lst)))
| def http_request(method, url, body=None, headers=None, url_params=None):
"""
returns the http response body
uses TOKEN global var to send requests to RSA end (this enables using a token for multiple requests and avoiding
unnecessary creation of a new token)
catches and handles token expiration: in case of 'request timeout' the token will be renewed and the request
will be resent once more.
"""
if headers is None:
headers = {}
global TOKEN
# add token to headers
headers['NetWitness-Token'] = TOKEN
request_kwargs = {
'headers': headers,
'verify': USE_SSL
}
# add optional arguments if specified
if body is not None:
request_kwargs['data'] = body
if url_params is not None:
request_kwargs['params'] = url_params
LOG('Attempting {} request to {}\nWith params:{}\nWith body:\n{}'.format(method, url,
json.dumps(url_params, indent=4),
json.dumps(body, indent=4)))
response = requests.request(
method,
url,
**request_kwargs
)
# handle timeout (token expired): renew token and try again
if response.status_code == 408:
LOG('Timeout detected - renewing token')
TOKEN = get_token()
headers['NetWitness-Token'] = TOKEN
response = requests.request(
method,
url,
**request_kwargs
)
# successful request
if response.status_code == 200:
try:
return response.json()
except Exception as e:
demisto.debug('Could not parse response as a JSON.\nResponse is: {}.'
'\nError is: {}'.format(response.content, e.message))
return None
# bad request - NetWitness returns a common json structure for errors; a list of error objects
error_lst = response.json().get('errors')
raise ValueError('Request failed with status: {}\n{}'.format(response.status_code, dict_list_to_str(error_lst)))
|
10,775 | def push_call_vars(blocks, saved_globals, saved_getattrs, typemap, nested=False):
"""push call variables to right before their call site.
assuming one global/getattr is created for each call site and control flow
doesn't change it.
"""
for block in blocks.values():
new_body = []
# global/attr variables that are defined in this block already,
# no need to reassign them
block_defs = set()
# Some definitions are copied right before the call but then we
# need to rename that symbol in that block so that typing won't
# generate an error trying to lock the save var twice.
# In rename_dict, we collect the symbols that must be renamed in
# this block. We collect them then apply the renaming at the end.
rename_dict = {}
for stmt in block.body:
def process_assign(stmt):
if isinstance(stmt, ir.Assign):
rhs = stmt.value
lhs = stmt.target
if (isinstance(rhs, ir.Global)):
saved_globals[lhs.name] = stmt
block_defs.add(lhs.name)
elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':
if (rhs.value.name in saved_globals
or rhs.value.name in saved_getattrs):
saved_getattrs[lhs.name] = stmt
block_defs.add(lhs.name)
if not nested and isinstance(stmt, Parfor):
for s in stmt.init_block.body:
process_assign(s)
pblocks = stmt.loop_body.copy()
push_call_vars(pblocks, saved_globals, saved_getattrs, typemap, nested=True)
new_body.append(stmt)
continue
else:
process_assign(stmt)
for v in stmt.list_vars():
new_body += _get_saved_call_nodes(v.name, saved_globals,
saved_getattrs, block_defs, rename_dict)
new_body.append(stmt)
block.body = new_body
# If there is anything to rename then apply the renaming here.
if len(rename_dict) > 0:
# Fix-up the typing for the renamed vars.
for k,v in rename_dict.items():
typemap[v] = typemap[k]
# This is only to call replace_var_names which takes a dict.
temp_blocks = {0:block}
replace_var_names(temp_blocks, rename_dict)
return
| def push_call_vars(blocks, saved_globals, saved_getattrs, typemap, nested=False):
"""push call variables to right before their call site.
assuming one global/getattr is created for each call site and control flow
doesn't change it.
"""
for block in blocks.values():
new_body = []
# global/attr variables that are defined in this block already,
# no need to reassign them
block_defs = set()
# Some definitions are copied right before the call but then we
# need to rename that symbol in that block so that typing won't
# generate an error trying to lock the save var twice.
# In rename_dict, we collect the symbols that must be renamed in
# this block. We collect them then apply the renaming at the end.
rename_dict = {}
for stmt in block.body:
def process_assign(stmt):
if isinstance(stmt, ir.Assign):
rhs = stmt.value
lhs = stmt.target
if (isinstance(rhs, ir.Global)):
saved_globals[lhs.name] = stmt
block_defs.add(lhs.name)
elif isinstance(rhs, ir.Expr) and rhs.op == 'getattr':
if (rhs.value.name in saved_globals
or rhs.value.name in saved_getattrs):
saved_getattrs[lhs.name] = stmt
block_defs.add(lhs.name)
if not nested and isinstance(stmt, Parfor):
for s in stmt.init_block.body:
process_assign(s)
pblocks = stmt.loop_body.copy()
push_call_vars(pblocks, saved_globals, saved_getattrs, typemap, nested=True)
new_body.append(stmt)
continue
else:
process_assign(stmt)
for v in stmt.list_vars():
new_body += _get_saved_call_nodes(v.name, saved_globals,
saved_getattrs, block_defs, rename_dict)
new_body.append(stmt)
block.body = new_body
# If there is anything to rename then apply the renaming here.
if len(rename_dict) > 0:
# Fix-up the typing for the renamed vars.
for k,v in rename_dict.items():
typemap[v] = typemap[k]
# This is only to call replace_var_names which takes a dict.
temp_blocks = {0: block}
replace_var_names(temp_blocks, rename_dict)
return
|
30,482 | def get_layout_data(path):
data = OrderedDict()
json_data = get_json(path)
layout = json_data.get('layout')
name = layout.get('name', '-')
id = layout.get('id', '-')
typeID = json_data.get('typeId')
typeName = json_data.get('TypeName')
fromversion = json_data.get('fromVersion')
toversion = json_data.get('toVersion')
pack = get_pack_name(path)
if typeID:
data['typeID'] = typeID
if typeName:
data['typename'] = typeName
if name:
data['name'] = name
if not name:
data['name'] = '-'
if toversion:
data['toversion'] = toversion
if fromversion:
data['fromversion'] = fromversion
if pack:
data['pack'] = pack
return {id: data}
| def get_layout_data(path):
data = OrderedDict()
json_data = get_json(path)
layout = json_data.get('layout')
name = layout.get('name', '-')
id = layout.get('id', '-')
typeID = json_data.get('typeId')
typeName = json_data.get('TypeName')
fromversion = json_data.get('fromVersion')
toversion = json_data.get('toVersion')
pack = get_pack_name(path)
if typeID:
data['typeID'] = typeID
if typeName:
data['typename'] = typeName
if name:
data['name'] = name
else:
data['name'] = '-'
if toversion:
data['toversion'] = toversion
if fromversion:
data['fromversion'] = fromversion
if pack:
data['pack'] = pack
return {id: data}
|
16,663 | def run(runtime_config: RuntimeConfig) -> int:
"""Run Home Assistant."""
asyncio.set_event_loop_policy(HassEventLoopPolicy(runtime_config.debug))
# Backport of cpython 3.9 asyncio.run with a _cancel_all_tasks that times out
loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(loop)
return loop.run_until_complete(setup_and_run_hass(runtime_config))
finally:
try:
_cancel_all_tasks_with_timeout(loop, TASK_CANCELATION_TIMEOUT)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.shutdown_default_executor()
finally:
asyncio.set_event_loop(None)
loop.close()
| def run(runtime_config: RuntimeConfig) -> int:
"""Run Home Assistant."""
asyncio.set_event_loop_policy(HassEventLoopPolicy(runtime_config.debug))
# Backport of cpython 3.9 asyncio.run with a _cancel_all_tasks that times out
loop = asyncio.new_event_loop()
try:
asyncio.set_event_loop(loop)
return loop.run_until_complete(setup_and_run_hass(runtime_config))
finally:
try:
_cancel_all_tasks_with_timeout(loop, TASK_CANCELATION_TIMEOUT)
loop.run_until_complete(loop.shutdown_asyncgens())
loop.run_until_complete(loop.shutdown_default_executor())
finally:
asyncio.set_event_loop(None)
loop.close()
|
32,173 | def main():
options = arguments_handler()
pr_number = options.pr_number
github_token = options.github_token
org_name = 'demisto'
repo_name = 'content'
github_client: Github = Github(github_token, verify=False)
content_repo: Repository = github_client.get_repo(f'{org_name}/{repo_name}')
pr: PullRequest = content_repo.get_pull(int(pr_number))
t = Terminal()
pr_label_names = [label.name for label in pr.labels]
is_contribution_form_filled_label_exist = CONTRIBUTION_FORM_FILLED_LABEL in pr_label_names
is_community_label_exist = COMMUNITY_LABEL in pr_label_names
is_partner_label_exist = PARTNER_LABEL in pr_label_names
is_internal_label_exist = INTERNAL_LABEL in pr_label_names
print(f'{t.cyan}Check that {CONTRIBUTION_FORM_FILLED_LABEL} label exist in PR {pr_number}')
if not is_contribution_form_filled_label_exist:
print(
f'{t.red}ERROR: Contribution form was not filled for PR: {pr_number}.\nMake sure to register your'
f' contribution by filling the contribution registration form in - https://forms.gle/XDfxU4E61ZwEESSMA'
)
sys.exit(1)
print(f'{t.cyan}Check that one of Community/Partner/Internal labels exist in PR {pr_number}')
if not (is_community_label_exist ^ is_partner_label_exist ^ is_internal_label_exist):
print(
f'{t.red}ERROR: PR labels {pr_label_names} '
f'must contain one of {COMMUNITY_LABEL}/{PARTNER_LABEL}/{INTERNAL_LABEL} labels'
)
sys.exit(1)
print(f'{t.cyan}PR labels {pr_label_names} are valid')
print(f'{t.cyan} Contribution form was filled successfully for PR: {pr_number}')
sys.exit(0)
| def main():
options = arguments_handler()
pr_number = options.pr_number
github_token = options.github_token
org_name = 'demisto'
repo_name = 'content'
github_client: Github = Github(github_token, verify=False)
content_repo: Repository = github_client.get_repo(f'{org_name}/{repo_name}')
pr: PullRequest = content_repo.get_pull(int(pr_number))
t = Terminal()
pr_label_names = [label.name for label in pr.labels]
is_contribution_form_filled_label_exist = CONTRIBUTION_FORM_FILLED_LABEL in pr_label_names
is_community_label_exist = COMMUNITY_LABEL in pr_label_names
is_partner_label_exist = PARTNER_LABEL in pr_label_names
is_internal_label_exist = INTERNAL_LABEL in pr_label_names
print(f'{t.cyan}Check that {CONTRIBUTION_FORM_FILLED_LABEL} label exist in PR {pr_number}')
if not is_contribution_form_filled_label_exist:
print(
f'{t.red}ERROR: Contribution form was not filled for PR: {pr_number}.\nMake sure to register your'
f' contribution by filling the contribution registration form in - https://forms.gle/XDfxU4E61ZwEESSMA'
)
sys.exit(1)
print(f'{t.cyan}Checking if one of Community/Partner/Internal labels exist in PR {pr_number}')
if not (is_community_label_exist ^ is_partner_label_exist ^ is_internal_label_exist):
print(
f'{t.red}ERROR: PR labels {pr_label_names} '
f'must contain one of {COMMUNITY_LABEL}/{PARTNER_LABEL}/{INTERNAL_LABEL} labels'
)
sys.exit(1)
print(f'{t.cyan}PR labels {pr_label_names} are valid')
print(f'{t.cyan} Contribution form was filled successfully for PR: {pr_number}')
sys.exit(0)
|
50,207 | def _preprocessed_interpreter_search_paths(
env_tgt: EnvironmentTarget,
_search_paths: Iterable[str],
is_default: bool,
) -> tuple[str, ...]:
"""Checks for special search path strings, and errors if any are invalid for the environment.
This will return:
* The search paths, unaltered, for local/undefined environments, OR
* The search paths, with invalid tokens removed, if the provided value was unaltered from the
default value in the options system
(see `PythonBootstrapSubsystem.EnvironmentAware.search_paths`)
* The search paths unaltered, if the search paths do not contain tokens invalid for this
environment
If the environment is non-local and there are invalid tokens for those environments, raise
`ValueError`.
"""
env = env_tgt.val
search_paths = tuple(_search_paths)
if isinstance(env, LocalEnvironmentTarget):
return search_paths
if env is None:
return search_paths
not_allowed = {"<PYENV>", "<PYENV_LOCAL>", "<ASDF>", "<ASDF_LOCAL>", "<PEXRC>"}
if is_default:
# Strip out the not-allowed special strings from search_paths.
# An error will occur on the off chance the non-local environment expects pyenv
# but there's nothing we can do here to detect it.
return tuple(path for path in search_paths if path not in not_allowed)
any_not_allowed = set(search_paths) & not_allowed
if any_not_allowed:
env_type = type(env)
raise ValueError(
f"`[python-bootstrap].search_paths` is configured to use local Python discovery "
f"tools, which do not work in {env_type.__name__} runtime environments. To fix this, "
f"set the value of `python_bootstrap_search_path` in the {env.alias} defined at "
f"`{env.address}` to contain only hardcoded paths or the `<PATH>` special string."
)
return search_paths
| def _preprocessed_interpreter_search_paths(
env_tgt: EnvironmentTarget,
_search_paths: Iterable[str],
is_default: bool,
) -> tuple[str, ...]:
"""Checks for special search path strings, and errors if any are invalid for the environment.
This will return:
* The search paths, unaltered, for local/undefined environments, OR
* The search paths, with invalid tokens removed, if the provided value was unaltered from the
default value in the options system
(see `PythonBootstrapSubsystem.EnvironmentAware.search_paths`)
* The search paths unaltered, if the search paths do not contain tokens invalid for this
environment
If the environment is non-local and there are invalid tokens for those environments, raise
`ValueError`.
"""
env = env_tgt.val
search_paths = tuple(_search_paths)
if env is None or isinstance(env, LocalEnvironmentTarget):
return search_paths
not_allowed = {"<PYENV>", "<PYENV_LOCAL>", "<ASDF>", "<ASDF_LOCAL>", "<PEXRC>"}
if is_default:
# Strip out the not-allowed special strings from search_paths.
# An error will occur on the off chance the non-local environment expects pyenv
# but there's nothing we can do here to detect it.
return tuple(path for path in search_paths if path not in not_allowed)
any_not_allowed = set(search_paths) & not_allowed
if any_not_allowed:
env_type = type(env)
raise ValueError(
f"`[python-bootstrap].search_paths` is configured to use local Python discovery "
f"tools, which do not work in {env_type.__name__} runtime environments. To fix this, "
f"set the value of `python_bootstrap_search_path` in the {env.alias} defined at "
f"`{env.address}` to contain only hardcoded paths or the `<PATH>` special string."
)
return search_paths
|
17,436 | def mean(array, axis=None, skipna=None, **kwargs):
"""inhouse mean that can handle np.datetime64 or cftime.datetime
dtypes"""
from .common import _contains_cftime_datetimes
# The mean over an empty axis shouldn't change the data
# See https://github.com/pydata/xarray/issues/4885
if axis == tuple():
return array
array = asarray(array)
if array.dtype.kind in "Mm":
offset = _datetime_nanmin(array)
# xarray always uses np.datetime64[ns] for np.datetime64 data
dtype = "timedelta64[ns]"
return (
_mean(
datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs
).astype(dtype)
+ offset
)
elif _contains_cftime_datetimes(array):
if is_duck_dask_array(array):
raise NotImplementedError(
"Computing the mean of an array containing "
"cftime.datetime objects is not yet implemented on "
"dask arrays."
)
offset = min(array)
timedeltas = datetime_to_numeric(array, offset, datetime_unit="us")
mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs)
return _to_pytimedelta(mean_timedeltas, unit="us") + offset
else:
return _mean(array, axis=axis, skipna=skipna, **kwargs)
| def mean(array, axis=None, skipna=None, **kwargs):
"""inhouse mean that can handle np.datetime64 or cftime.datetime
dtypes"""
from .common import _contains_cftime_datetimes
# The mean over an empty axis shouldn't change the data
# See https://github.com/pydata/xarray/issues/4885
if axis == ():
return array
array = asarray(array)
if array.dtype.kind in "Mm":
offset = _datetime_nanmin(array)
# xarray always uses np.datetime64[ns] for np.datetime64 data
dtype = "timedelta64[ns]"
return (
_mean(
datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs
).astype(dtype)
+ offset
)
elif _contains_cftime_datetimes(array):
if is_duck_dask_array(array):
raise NotImplementedError(
"Computing the mean of an array containing "
"cftime.datetime objects is not yet implemented on "
"dask arrays."
)
offset = min(array)
timedeltas = datetime_to_numeric(array, offset, datetime_unit="us")
mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs)
return _to_pytimedelta(mean_timedeltas, unit="us") + offset
else:
return _mean(array, axis=axis, skipna=skipna, **kwargs)
|
32,075 | def main():
try:
demisto_params = demisto.params()
command = demisto.command()
params = {
'api_url': demisto_params['url'].rstrip('/'),
'use_ssl': not demisto_params.get('insecure', False),
'threshold': int(demisto_params.get('threshold', 1)),
'create_relationships': bool(demisto_params.get('create_relationships', True)),
'max_num_of_relationships': int(demisto_params.get('max_num_of_relationships', 1)) if int(
demisto_params.get('max_num_of_relationships', 1)) < 1000 else 1000,
}
reliability = demisto_params.get('integrationReliability', DBotScoreReliability.C)
if DBotScoreReliability.is_valid_type(reliability):
params['reliability'] = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception('Please provide a valid value for the Source Reliability parameter.')
# Remove proxy if not set to true in params
handle_proxy()
if command == 'test-module':
# This is the call made when pressing the integration test button.
test_module(**params)
demisto.results('ok')
elif command == 'url':
return_results(results=url_command(**params))
elif command == 'domain':
return_results(results=domain_command(**params))
elif command == 'file':
return_results(results=file_command(**params))
elif command == 'urlhaus-download-sample':
urlhaus_download_sample_command(**params)
# Log exceptions
except Exception as exc:
return_error(f'Failed to execute command "{command}".\nError: {exc}', error=exc)
| def main():
try:
demisto_params = demisto.params()
command = demisto.command()
params = {
'api_url': demisto_params['url'].rstrip('/'),
'use_ssl': not demisto_params.get('insecure', False),
'threshold': int(demisto_params.get('threshold', 1)),
'create_relationships': demisto_params.get('create_relationships', True),
'max_num_of_relationships': int(demisto_params.get('max_num_of_relationships', 1)) if int(
demisto_params.get('max_num_of_relationships', 1)) < 1000 else 1000,
}
reliability = demisto_params.get('integrationReliability', DBotScoreReliability.C)
if DBotScoreReliability.is_valid_type(reliability):
params['reliability'] = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability)
else:
Exception('Please provide a valid value for the Source Reliability parameter.')
# Remove proxy if not set to true in params
handle_proxy()
if command == 'test-module':
# This is the call made when pressing the integration test button.
test_module(**params)
demisto.results('ok')
elif command == 'url':
return_results(results=url_command(**params))
elif command == 'domain':
return_results(results=domain_command(**params))
elif command == 'file':
return_results(results=file_command(**params))
elif command == 'urlhaus-download-sample':
urlhaus_download_sample_command(**params)
# Log exceptions
except Exception as exc:
return_error(f'Failed to execute command "{command}".\nError: {exc}', error=exc)
|
6,435 | def get_context(context):
context.no_cache = 1
s = frappe.get_doc("Support Settings", "Support Settings")
# Get Started sections
if s.get_started_sections:
sections = json.loads(s.get_started_sections)
context.get_started_sections = sections
# Forum posts
if s.show_latest_forum_posts:
topics_data, post_params = get_forum_posts(s)
context.post_params = post_params
context.forum_url = s.forum_url
context.topics = topics_data[:3]
# Issues
if frappe.session.user != "Guest":
context.issues = frappe.get_list("Issue", fields=["name", "status", "subject", "modified"])[:3]
else:
context.issues = []
| def get_context(context):
context.no_cache = 1
settings = frappe.get_doc("Support Settings")
# Get Started sections
if s.get_started_sections:
sections = json.loads(s.get_started_sections)
context.get_started_sections = sections
# Forum posts
if s.show_latest_forum_posts:
topics_data, post_params = get_forum_posts(s)
context.post_params = post_params
context.forum_url = s.forum_url
context.topics = topics_data[:3]
# Issues
if frappe.session.user != "Guest":
context.issues = frappe.get_list("Issue", fields=["name", "status", "subject", "modified"])[:3]
else:
context.issues = []
|
21,530 | def _filter_results(
direction: str,
from_token: Optional[RoomStreamToken],
to_token: Optional[RoomStreamToken],
instance_name: str,
stream_ordering: int,
) -> bool:
"""Filter results from fetching events in the DB against the given tokens.
This is necessary to handle the case where the tokens include positions
maps, which we handle by fetching more than necessary from the DB and then
filtering (rather than attempting to construct a complicated SQL query).
"""
# We will have already filtered by the topological tokens, so we don't
# bother checking topological token bounds again.
if from_token and from_token.topological:
from_token = None
if to_token and to_token.topological:
to_token = None
lower_bound = None
if direction == "f" and from_token:
lower_bound = from_token.get_stream_pos_for_instance(instance_name)
elif direction == "b" and to_token:
lower_bound = to_token.get_stream_pos_for_instance(instance_name)
if lower_bound and stream_ordering <= lower_bound:
return False
upper_bound = None
if direction == "b" and from_token:
upper_bound = from_token.get_stream_pos_for_instance(instance_name)
elif direction == "f" and to_token:
upper_bound = to_token.get_stream_pos_for_instance(instance_name)
if upper_bound and upper_bound < stream_ordering:
return False
return True
| def _filter_results(
direction: str,
from_token: Optional[RoomStreamToken],
to_token: Optional[RoomStreamToken],
instance_name: str,
stream_ordering: int,
) -> bool:
"""Filter results from fetching events in the DB against the given tokens.
This is necessary to handle the case where the tokens include position
maps, which we handle by fetching more than necessary from the DB and then
filtering (rather than attempting to construct a complicated SQL query).
"""
# We will have already filtered by the topological tokens, so we don't
# bother checking topological token bounds again.
if from_token and from_token.topological:
from_token = None
if to_token and to_token.topological:
to_token = None
lower_bound = None
if direction == "f" and from_token:
lower_bound = from_token.get_stream_pos_for_instance(instance_name)
elif direction == "b" and to_token:
lower_bound = to_token.get_stream_pos_for_instance(instance_name)
if lower_bound and stream_ordering <= lower_bound:
return False
upper_bound = None
if direction == "b" and from_token:
upper_bound = from_token.get_stream_pos_for_instance(instance_name)
elif direction == "f" and to_token:
upper_bound = to_token.get_stream_pos_for_instance(instance_name)
if upper_bound and upper_bound < stream_ordering:
return False
return True
|
52,882 | def run_sniffers_raw(filename_or_file_prefix: Union[str, FilePrefix], sniff_order, is_binary=None):
"""Run through sniffers specified by sniff_order, return None of None match."""
file_prefix = _get_file_prefix(filename_or_file_prefix)
fname = file_prefix.filename
file_ext = None
for datatype in sniff_order:
"""
Some classes may not have a sniff function, which is ok. In fact,
Binary, Data, Tabular and Text are examples of classes that should never
have a sniff function. Since these classes are default classes, they contain
few rules to filter out data of other formats, so they should be called
from this function after all other datatypes in sniff_order have not been
successfully discovered.
"""
if is_binary is not None and is_binary != datatype.is_binary:
continue
try:
if hasattr(datatype, "sniff_prefix"):
datatype_compressed = getattr(datatype, "compressed", False)
if datatype_compressed and not file_prefix.compressed_format:
continue
if not datatype_compressed and file_prefix.compressed_format:
continue
if file_prefix.compressed_format and getattr(datatype, "compressed_format", None):
# In this case go a step further and compare the compressed format detected
# to the expected.
if file_prefix.compressed_format != datatype.compressed_format:
continue
if datatype.sniff_prefix(file_prefix):
file_ext = datatype.file_ext
break
elif datatype.sniff(fname):
file_ext = datatype.file_ext
break
except Exception:
pass
return file_ext
| def run_sniffers_raw(filename_or_file_prefix: Union[str, FilePrefix], sniff_order, is_binary=None):
"""Run through sniffers specified by sniff_order, return None of None match."""
file_prefix = _get_file_prefix(filename_or_file_prefix)
fname = file_prefix.filename
file_ext = None
for datatype in sniff_order:
"""
Some classes may not have a sniff function, which is ok. In fact,
Binary, Data, Tabular and Text are examples of classes that should never
have a sniff function. Since these classes are default classes, they contain
few rules to filter out data of other formats, so they should be called
from this function after all other datatypes in sniff_order have not been
successfully discovered.
"""
if is_binary and not datatype.is_binary:
continue
file_prefix = FilePrefix(filename)
datatype_compressed = getattr(self, "compressed", False)
if file_prefix.compressed_format and not datatype_compressed:
return False
if datatype_compressed:
if not file_prefix.compressed_format:
# This not a compressed file we are looking but the type expects it to be
# must return False.
return False
if hasattr(self, "compressed_format"):
if self.compressed_format != file_prefix.compressed_format:
return False
return self.sniff_prefix(file_prefix)
klass.sniff = auto_sniff
return klass
|
35,190 | def pinv(a, rcond=1e-15):
"""Compute the Moore-Penrose pseudoinverse of a matrix.
It computes a pseudoinverse of a matrix ``a``, which is a generalization
of the inverse matrix with Singular Value Decomposition (SVD).
Note that it automatically removes small singular values for stability.
Args:
a (cupy.ndarray): The matrix with dimension ``(..., M, N)``
rcond (float or cupy.ndarray): Cutoff parameter for small singular
values. For stability it computes the largest singular value
denoted by ``s``, and sets all singular values smaller than
``s`` to zero. Broadcasts against the stack of matrices.
Returns:
cupy.ndarray: The pseudoinverse of ``a`` with dimension
``(..., N, M)``.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.pinv`
"""
if a.size == 0:
m, n = a.shape[-2:]
return cupy.empty(a.shape[:-2] + (n, m), dtype=a.dtype)
u, s, vt = _decomposition.svd(a.conj(), full_matrices=False)
# discard small singular values
if cupy.isscalar(rcond):
rcond = cupy.asarray(rcond)
cutoff = rcond[..., None] * cupy.amax(s, axis=-1, keepdims=True)
leq = s <= cutoff
s = 1 / s
s[leq] = 0
return cupy.matmul(vt.swapaxes(-2, -1), s[..., None] * u.swapaxes(-2, -1))
| def pinv(a, rcond=1e-15):
"""Compute the Moore-Penrose pseudoinverse of a matrix.
It computes a pseudoinverse of a matrix ``a``, which is a generalization
of the inverse matrix with Singular Value Decomposition (SVD).
Note that it automatically removes small singular values for stability.
Args:
a (cupy.ndarray): The matrix with dimension ``(..., M, N)``
rcond (float or cupy.ndarray): Cutoff parameter for small singular
values. For stability it computes the largest singular value
denoted by ``s``, and sets all singular values smaller than
``rcond * s`` to zero. Broadcasts against the stack of matrices.
Returns:
cupy.ndarray: The pseudoinverse of ``a`` with dimension
``(..., N, M)``.
.. warning::
This function calls one or more cuSOLVER routine(s) which may yield
invalid results if input conditions are not met.
To detect these invalid results, you can set the `linalg`
configuration to a value that is not `ignore` in
:func:`cupyx.errstate` or :func:`cupyx.seterr`.
.. seealso:: :func:`numpy.linalg.pinv`
"""
if a.size == 0:
m, n = a.shape[-2:]
return cupy.empty(a.shape[:-2] + (n, m), dtype=a.dtype)
u, s, vt = _decomposition.svd(a.conj(), full_matrices=False)
# discard small singular values
if cupy.isscalar(rcond):
rcond = cupy.asarray(rcond)
cutoff = rcond[..., None] * cupy.amax(s, axis=-1, keepdims=True)
leq = s <= cutoff
s = 1 / s
s[leq] = 0
return cupy.matmul(vt.swapaxes(-2, -1), s[..., None] * u.swapaxes(-2, -1))
|
30,849 | def query_malops_command():
total_result_limit = demisto.args().get('totalResultLimit')
per_group_limit = demisto.args().get('perGroupLimit')
template_context = demisto.args().get('templasteContext')
filters = safe_load_json(demisto.args().get('filters', []))
within_last_days = demisto.args().get('withinLastDays')
guid_list = argToList(demisto.args().get('malopGuid'))
request_type = demisto.args().get('requestType', 'MalopProcess')
if within_last_days:
current_timestamp = time.time()
current_datetime = datetime.fromtimestamp(current_timestamp)
within_last_days_datetime = current_datetime - timedelta(days=int(within_last_days))
within_last_days_timestamp = time.mktime( # Converting datetime to time
within_last_days_datetime.timetuple()) + within_last_days_datetime.microsecond / 1E6
within_last_days_timestamp = within_last_days_timestamp * 1000
filters.append({
'facetName': 'malopLastUpdateTime',
'values': [within_last_days_timestamp],
'filterType': 'GreaterThan'
})
response = query_malops(total_result_limit, per_group_limit, template_context, filters, guid_list=guid_list,
request_type=request_type)
data = response['data']
malops_map = data.get('resultIdToElementDataMap')
if not data or not malops_map:
demisto.results('No malops found')
return
outputs = []
for malop_id in malops_map:
malop = malops_map[malop_id]
management_status = malop['simpleValues']['managementStatus']['values'][0]
if management_status and management_status.lower() == 'closed':
continue
creation_time = translate_timestamp(malop['simpleValues']['creationTime']['values'][0])
malop_last_update_time = translate_timestamp(malop['simpleValues']['malopLastUpdateTime']['values'][0])
decision_failure = malop['simpleValues']['decisionFeature']['values'][0].replace('Process.', '')
suspects_string = ''
raw_suspects = malop['elementValues'].get('suspects')
if raw_suspects:
suspects = raw_suspects['elementValues'][0]
suspects_string = '{}: {}'.format(suspects['elementType'], suspects['name'])
affected_machines = []
for machine in malop['elementValues']['affectedMachines']['elementValues']:
machine_name = machine.get('name', '')
affected_machines.append(machine_name)
involved_hashes = [] # type: List[str]
if 'rootCauseElementHashes' in malop['simpleValues']:
if malop['simpleValues']['rootCauseElementHashes']['totalValues'] != 0:
involved_hashes.extend(malop['simpleValues']['rootCauseElementHashes']['values'])
malop_output = {
'GUID': malop_id,
'Link': SERVER + '/#/malop/' + malop_id,
'CreationTime': creation_time,
'DecisionFailure': re.sub(r'\([^)]*\)', '', decision_failure),
'Suspects': suspects_string,
'LastUpdateTime': malop_last_update_time,
'Status': management_status,
'AffectedMachine': affected_machines,
'InvolvedHash': involved_hashes
}
outputs.append(malop_output)
ec = {
'Cybereason.Malops(val.GUID && val.GUID === obj.GUID)': outputs
}
demisto.results({
'Type': entryTypes['note'],
'Contents': data,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Cybereason Malops', outputs, ['GUID', 'Link', 'CreationTime', 'Status',
'LastUpdateTime', 'DecisionFailure', 'Suspects',
'AffectedMachine', 'InvolvedHash']),
'EntryContext': ec
})
| def query_malops_command():
total_result_limit = demisto.args().get('totalResultLimit')
per_group_limit = demisto.args().get('perGroupLimit')
template_context = demisto.args().get('templateContext')
filters = safe_load_json(demisto.args().get('filters', []))
within_last_days = demisto.args().get('withinLastDays')
guid_list = argToList(demisto.args().get('malopGuid'))
request_type = demisto.args().get('requestType', 'MalopProcess')
if within_last_days:
current_timestamp = time.time()
current_datetime = datetime.fromtimestamp(current_timestamp)
within_last_days_datetime = current_datetime - timedelta(days=int(within_last_days))
within_last_days_timestamp = time.mktime( # Converting datetime to time
within_last_days_datetime.timetuple()) + within_last_days_datetime.microsecond / 1E6
within_last_days_timestamp = within_last_days_timestamp * 1000
filters.append({
'facetName': 'malopLastUpdateTime',
'values': [within_last_days_timestamp],
'filterType': 'GreaterThan'
})
response = query_malops(total_result_limit, per_group_limit, template_context, filters, guid_list=guid_list,
request_type=request_type)
data = response['data']
malops_map = data.get('resultIdToElementDataMap')
if not data or not malops_map:
demisto.results('No malops found')
return
outputs = []
for malop_id in malops_map:
malop = malops_map[malop_id]
management_status = malop['simpleValues']['managementStatus']['values'][0]
if management_status and management_status.lower() == 'closed':
continue
creation_time = translate_timestamp(malop['simpleValues']['creationTime']['values'][0])
malop_last_update_time = translate_timestamp(malop['simpleValues']['malopLastUpdateTime']['values'][0])
decision_failure = malop['simpleValues']['decisionFeature']['values'][0].replace('Process.', '')
suspects_string = ''
raw_suspects = malop['elementValues'].get('suspects')
if raw_suspects:
suspects = raw_suspects['elementValues'][0]
suspects_string = '{}: {}'.format(suspects['elementType'], suspects['name'])
affected_machines = []
for machine in malop['elementValues']['affectedMachines']['elementValues']:
machine_name = machine.get('name', '')
affected_machines.append(machine_name)
involved_hashes = [] # type: List[str]
if 'rootCauseElementHashes' in malop['simpleValues']:
if malop['simpleValues']['rootCauseElementHashes']['totalValues'] != 0:
involved_hashes.extend(malop['simpleValues']['rootCauseElementHashes']['values'])
malop_output = {
'GUID': malop_id,
'Link': SERVER + '/#/malop/' + malop_id,
'CreationTime': creation_time,
'DecisionFailure': re.sub(r'\([^)]*\)', '', decision_failure),
'Suspects': suspects_string,
'LastUpdateTime': malop_last_update_time,
'Status': management_status,
'AffectedMachine': affected_machines,
'InvolvedHash': involved_hashes
}
outputs.append(malop_output)
ec = {
'Cybereason.Malops(val.GUID && val.GUID === obj.GUID)': outputs
}
demisto.results({
'Type': entryTypes['note'],
'Contents': data,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown('Cybereason Malops', outputs, ['GUID', 'Link', 'CreationTime', 'Status',
'LastUpdateTime', 'DecisionFailure', 'Suspects',
'AffectedMachine', 'InvolvedHash']),
'EntryContext': ec
})
|
43,513 | def rand_circuit(params, random_gate_sequence=None, num_qubits=None):
"""A random variational quantum circuit
Args:
params (array[float]): array of parameters
random_gate_sequence (dict): a dictionary of random gates
num_qubits (int): the number of qubits in the circuit
Returns:
float: the expectation value of the target observable
"""
for i in range(num_qubits):
qml.RY(np.pi / 4, wires=i)
for i in range(num_qubits):
random_gate_sequence[i](params[i], wires=i)
for i in range(num_qubits - 1):
qml.CZ(wires=[i, i + 1])
H = np.zeros((2 ** num_qubits, 2 ** num_qubits))
H[0, 0] = 1
wirelist = [i for i in range(num_qubits)]
return qml.expval(qml.Hermitian(H, wirelist))
| def rand_circuit(params, random_gate_sequence=None, num_qubits=None):
"""A random variational quantum circuit.
Args:
params (array[float]): array of parameters
random_gate_sequence (dict): a dictionary of random gates
num_qubits (int): the number of qubits in the circuit
Returns:
float: the expectation value of the target observable
"""
for i in range(num_qubits):
qml.RY(np.pi / 4, wires=i)
for i in range(num_qubits):
random_gate_sequence[i](params[i], wires=i)
for i in range(num_qubits - 1):
qml.CZ(wires=[i, i + 1])
H = np.zeros((2 ** num_qubits, 2 ** num_qubits))
H[0, 0] = 1
wirelist = [i for i in range(num_qubits)]
return qml.expval(qml.Hermitian(H, wirelist))
|
14,469 | def main() -> int:
"""Linter CLI entry point."""
cwd = pathlib.Path.cwd()
options = cli.get_config(sys.argv[1:])
initialize_logger(options.verbosity)
_logger.debug("Options: %s", options)
formatter_factory = choose_formatter_factory(options)
formatter = formatter_factory(cwd, options.display_relative_path)
rulesdirs = get_rules_dirs([str(rdir) for rdir in options.rulesdir],
options.use_default_rules)
rules = RulesCollection(rulesdirs)
if options.listrules:
console.print(
_rule_format_map[options.format](rules),
highlight=False)
return 0
if options.listtags:
print(rules.listtags())
return 0
if isinstance(options.tags, str):
options.tags = options.tags.split(',')
skip = set()
for s in options.skip_list:
skip.update(str(s).split(','))
options.skip_list = frozenset(skip)
matches = _get_matches(rules, options)
# Assure we do not print duplicates and the order is consistent
matches = sorted(set(matches))
mark_as_success = False
if matches and options.progressive:
_logger.info(
"Matches found, running again on previos revision in order to detect regressions")
with _previous_revision():
old_matches = _get_matches(rules, options)
# remove old matches from current list
if len(old_matches) > len(matches):
_logger.warning(
"Total violation(s) reducted from %s to %s since previous "
"commit, will mark result as success",
len(old_matches), len(matches))
mark_as_success = True
current_len = len(matches)
matches = list(set(matches) - set(old_matches))
_logger.warning("Removed %s previously known violation(s)", current_len - len(matches))
for match in matches:
print(formatter.format(match, options.colored))
# If run under GitHub Actions we also want to emit output recognized by it.
if os.getenv('GITHUB_ACTIONS') == 'true' and os.getenv('GITHUB_WORKFLOW'):
formatter = formatters.AnnotationsFormatter(cwd, True)
for match in matches:
print(formatter.format(match))
if matches and not mark_as_success:
return report_outcome(matches, options=options)
else:
return 0
| def main() -> int:
"""Linter CLI entry point."""
cwd = pathlib.Path.cwd()
options = cli.get_config(sys.argv[1:])
initialize_logger(options.verbosity)
_logger.debug("Options: %s", options)
formatter_factory = choose_formatter_factory(options)
formatter = formatter_factory(cwd, options.display_relative_path)
rulesdirs = get_rules_dirs([str(rdir) for rdir in options.rulesdir],
options.use_default_rules)
rules = RulesCollection(rulesdirs)
if options.listrules:
console.print(
_rule_format_map[options.format](rules),
highlight=False)
return 0
if options.listtags:
print(rules.listtags())
return 0
if isinstance(options.tags, str):
options.tags = options.tags.split(',')
skip = set()
for s in options.skip_list:
skip.update(str(s).split(','))
options.skip_list = frozenset(skip)
matches = _get_matches(rules, options)
# Assure we do not print duplicates and the order is consistent
matches = sorted(set(matches))
mark_as_success = False
if matches and options.progressive:
_logger.info(
"Matches found, running again on previous revision in order to detect regressions")
with _previous_revision():
old_matches = _get_matches(rules, options)
# remove old matches from current list
if len(old_matches) > len(matches):
_logger.warning(
"Total violation(s) reducted from %s to %s since previous "
"commit, will mark result as success",
len(old_matches), len(matches))
mark_as_success = True
current_len = len(matches)
matches = list(set(matches) - set(old_matches))
_logger.warning("Removed %s previously known violation(s)", current_len - len(matches))
for match in matches:
print(formatter.format(match, options.colored))
# If run under GitHub Actions we also want to emit output recognized by it.
if os.getenv('GITHUB_ACTIONS') == 'true' and os.getenv('GITHUB_WORKFLOW'):
formatter = formatters.AnnotationsFormatter(cwd, True)
for match in matches:
print(formatter.format(match))
if matches and not mark_as_success:
return report_outcome(matches, options=options)
else:
return 0
|
3,221 | def test_retries():
"""
Tests that, even if I set up 5 retries, there is only one request
made since it times out.
"""
conneciton_mock = mock.Mock()
conneciton_mock.request.side_effect = ReadTimeoutError(None, "test.com", "Timeout")
snuba_pool = FakeConnectionPool(
connection=conneciton_mock,
host="www.test.com",
port=80,
retries=RetrySkipTimeout(total=5, method_whitelist={"GET", "POST"}),
timeout=30,
maxsize=10,
)
with pytest.raises(HTTPError):
snuba_pool.urlopen("POST", "/query", body="{}")
assert conneciton_mock.request.call_count == 1
| def test_retries():
"""
Tests that, even if I set up 5 retries, there is only one request
made since it times out.
"""
connection_mock = mock.Mock()
conneciton_mock.request.side_effect = ReadTimeoutError(None, "test.com", "Timeout")
snuba_pool = FakeConnectionPool(
connection=conneciton_mock,
host="www.test.com",
port=80,
retries=RetrySkipTimeout(total=5, method_whitelist={"GET", "POST"}),
timeout=30,
maxsize=10,
)
with pytest.raises(HTTPError):
snuba_pool.urlopen("POST", "/query", body="{}")
assert conneciton_mock.request.call_count == 1
|
24,257 | def git_commit(targets, message, force=False, sign=False, update=False):
"""
Commit the changes for the given targets.
`targets` - be files or directiries
`message` - the commit message
`force` - (optional) force the commit
`sign` - sign with `-S` option
`update` - only commit updated files already tracked by git, via `-u`
"""
root = get_root()
target_paths = []
for t in targets:
target_paths.append(os.path.join(root, t))
with chdir(root):
if updated:
result = run_command(f"git add{' -f' if force else ''} -u {' '.join(target_paths)}")
else:
result = run_command(f"git add{' -f' if force else ''} {' '.join(target_paths)}")
if result.code != 0:
return result
return run_command('git commit{} -m "{}"'.format(' -S' if sign else '', message))
| def git_commit(targets, message, force=False, sign=False, update=False):
"""
Commit the changes for the given targets.
`targets` - be files or directiries
`message` - the commit message
`force` - (optional) force the commit
`sign` - sign with `-S` option
`update` - only commit updated files already tracked by git, via `-u`
"""
root = get_root()
target_paths = []
for t in targets:
target_paths.append(os.path.join(root, t))
with chdir(root):
if update:
result = run_command(f"git add{' -f' if force else ''} -u {' '.join(target_paths)}")
else:
result = run_command(f"git add{' -f' if force else ''} {' '.join(target_paths)}")
if result.code != 0:
return result
return run_command('git commit{} -m "{}"'.format(' -S' if sign else '', message))
|
38,927 | def test_unsupported_field_type():
with pytest.raises(TypeError):
@pydantic.dataclasses.dataclass
class TestUnsupported:
unsupported: MutableSet[int]
| def test_unsupported_field_type():
with pytest.raises(TypeError) as exc_info:
@pydantic.dataclasses.dataclass
class TestUnsupported:
unsupported: MutableSet[int]
|
20,876 | def csv_from_excel(excel_content, delimiter, quote):
decoded_data = base64.decodestring(excel_content)
wb = xlrd.open_workbook(file_contents=decoded_data)
sh = wb.sheet_by_index(0)
content = StringIO()
quoting = csv.QUOTE_ALL
if not quote:
quoting = csv.QUOTE_NONE
if delimiter == " " and quoting == csv.QUOTE_NONE:
quoting = csv.QUOTE_MINIMAL
wr = csv.writer(content, delimiter=delimiter, quoting=quoting)
for rownum in xrange(sh.nrows):
row = []
for x in sh.row_values(rownum):
# if isinstance(x, str):
# x = x.strip()
if quoting == csv.QUOTE_NONE and delimiter in x:
raise ValidationError(
_('Template with CSV Quoting = False, data must not '
'containg same char with delimeter -> "%s"') % delimiter)
row.append(x)
wr.writerow(row)
content.seek(0) # Set index to 0, and start reading
out_file = base64.b64encode(content.getvalue().encode('utf-8'))
return out_file
| def csv_from_excel(excel_content, delimiter, quote):
decoded_data = base64.decodestring(excel_content)
wb = xlrd.open_workbook(file_contents=decoded_data)
sh = wb.sheet_by_index(0)
content = StringIO()
quoting = csv.QUOTE_ALL
if not quote:
quoting = csv.QUOTE_NONE
if delimiter == " " and quoting == csv.QUOTE_NONE:
quoting = csv.QUOTE_MINIMAL
wr = csv.writer(content, delimiter=delimiter, quoting=quoting)
for rownum in xrange(sh.nrows):
row = []
for x in sh.row_values(rownum):
# if isinstance(x, str):
# x = x.strip()
if quoting == csv.QUOTE_NONE and delimiter in x:
raise ValidationError(
_('Template with CSV Quoting = False, data must not '
'contain the same char as delimiter -> "%s"') % delimiter)
row.append(x)
wr.writerow(row)
content.seek(0) # Set index to 0, and start reading
out_file = base64.b64encode(content.getvalue().encode('utf-8'))
return out_file
|
45,745 | def forecast(
precip,
metadata,
velocity,
timesteps,
n_ens_members=24,
n_cascade_levels=6,
win_size=256,
overlap=0.1,
war_thr=0.1,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
noise_method="ssft",
ar_order=2,
vel_pert_method=None,
probmatching_method="cdf",
mask_method="incremental",
callback=None,
fft_method="numpy",
return_output=True,
seed=None,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
noise_kwargs=None,
vel_pert_kwargs=None,
mask_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast ensemble by using the Short-space ensemble prediction
system (SSEPS) method.
This is an experimental version of STEPS which allows for localization
by means of a window function.
Parameters
----------
precip: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between the inputs
are assumed to be regular, and the inputs are required to have finite values.
metadata: dict
Metadata dictionary containing the accutime, xpixelsize, threshold and
zerovalue attributes as described in the documentation of
:py:mod:`pysteps.io.importers`. xpixelsize is assumed to be in meters.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the advection
field. The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
win_size: int or two-element sequence of ints
Size-length of the localization window.
overlap: float [0,1[
A float between 0 and 1 prescribing the level of overlap between
successive windows. If set to 0, no overlap is used.
war_thr: float
Threshold for the minimum fraction of rain in a given window.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
n_ens_members: int
The number of ensemble members to generate.
n_cascade_levels: int
The number of cascade levels to use.
extrap_method: {'semilagrangian'}
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}
Name of the bandpass filter method to use with the cascade
decomposition.
noise_method: {'parametric','nonparametric','ssft','nested',None}
Name of the noise generator to use for perturbating the precipitation
field. See the documentation of pysteps.noise.interface. If set to None,
no noise is generated.
ar_order: int
The order of the autoregressive model to use. Must be >= 1.
vel_pert_method: {'bps',None}
Name of the noise generator to use for perturbing the advection field.
See the documentation of pysteps.noise.interface. If set to None,
the advection field is not perturbed.
mask_method: {'incremental', None}
The method to use for masking no precipitation areas in the forecast
field. The masked pixels are set to the minimum value of the
observations. 'incremental' = iteratively buffer the mask with a
certain rate (currently it is 1 km/min), None=no masking.
probmatching_method: {'cdf', None}
Method for matching the statistics of the forecast field with those of
the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, None=no matching applied. Using 'mean' requires
that mask_method is not None.
callback: function
Optional function that is called after computation of each time step of
the nowcast. The function takes one argument: a three-dimensional array
of shape (n_ens_members,h,w), where h and w are the height and width
of the input field precip, respectively. This can be used, for instance,
writing the outputs into files.
return_output: bool
Set to False to disable returning the outputs as numpy arrays. This can
save memory if the intermediate results are written to output files
using the callback function.
seed: int
Optional seed number for the random generators.
num_workers: int
The number of workers to use for parallel computation. Applicable if
dask is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting the
environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
extrap_kwargs: dict
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
noise_kwargs: dict
Optional dictionary containing keyword arguments for the initializer of
the noise generator. See the documentation of
pysteps.noise.fftgenerators.
vel_pert_kwargs: dict
Optional dictionary containing keyword arguments "p_pert_par" and
"p_pert_perp" for the initializer of the velocity perturbator.
See the documentation of pysteps.noise.motion.
mask_kwargs: dict
Optional dictionary containing mask keyword arguments 'mask_f' and
'mask_rim', the factor defining the the mask increment and the rim size,
respectively.
The mask increment is defined as mask_f*timestep/kmperpixel.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
If return_output is True, a four-dimensional array of shape
(n_ens_members,num_timesteps,m,n) containing a time series of forecast
precipitation fields for each ensemble member. Otherwise, a None value
is returned. The time series starts from t0+timestep, where timestep is
taken from the input precipitation fields R.
See also
--------
psysteps.extrapolation.interface, pysteps.cascade.interface,
pysteps.noise.interface, pysteps.noise.utils.compute_noise_stddev_adjs
Notes
-----
Please be aware that this represents a (very) experimental implementation.
References
----------
:cite:`Seed2003`, :cite:`BPS2006`, :cite:`SPN2013`, :cite:`NBSG2017`
"""
_check_inputs(precip, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
if noise_kwargs is None:
noise_kwargs = dict()
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
if mask_kwargs is None:
mask_kwargs = dict()
if np.any(~np.isfinite(precip)):
raise ValueError("precip contains non-finite values")
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if mask_method not in ["incremental", None]:
raise ValueError(
"unknown mask method %s: must be 'incremental' or None" % mask_method
)
if np.isscalar(win_size):
win_size = (int(win_size), int(win_size))
else:
win_size = tuple([int(win_size[i]) for i in range(2)])
timestep = metadata["accutime"]
kmperpixel = metadata["xpixelsize"] / 1000
print("Computing SSEPS nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print("input dimensions: %dx%d" % (precip.shape[1], precip.shape[2]))
print(f"km/pixel: {kmperpixel}")
print(f"time step: {timestep} minutes")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"bandpass filter: {bandpass_filter_method}")
print(f"decomposition: {decomp_method}")
print(f"noise generator: {noise_method}")
print(f"velocity perturbator: {vel_pert_method}")
print(f"precip. mask method: {mask_method}")
print(f"probability matching: {probmatching_method}")
print(f"FFT method: {fft_method}")
print("")
print("Parameters")
print("----------")
print(f"localization window: {win_size[0]}x{win_size[1]}")
print(f"overlap: {overlap:.1f}")
print(f"war thr: {war_thr:.2f}")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"ensemble size: {n_ens_members}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the AR(p) model: {ar_order}")
print("dask imported: {}".format(("yes" if dask_imported else "no")))
print(f"num workers: {num_workers}")
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get(
"p_pert_par", noise.motion.get_default_params_bps_par()
)
vp_perp = vel_pert_kwargs.get(
"p_pert_perp", noise.motion.get_default_params_bps_perp()
)
print(
f"velocity perturbations, parallel: {vp_par[0]},{vp_par[1]},{vp_par[2]}"
)
print(
f"velocity perturbations, perpendicular: {vp_perp[0]},{vp_perp[1]},{vp_perp[2]}"
)
precip_thr = metadata["threshold"]
precip_min = metadata["zerovalue"]
num_ensemble_workers = n_ens_members if num_workers > n_ens_members else num_workers
if measure_time:
starttime_init = time.time()
# get methods
extrapolator_method = extrapolation.get_method(extrap_method)
x_values, y_values = np.meshgrid(
np.arange(precip.shape[2]), np.arange(precip.shape[1])
)
xy_coords = np.stack([x_values, y_values])
decomp_method, __ = cascade.get_method(decomp_method)
filter_method = cascade.get_method(bandpass_filter_method)
if noise_method is not None:
init_noise, generate_noise = noise.get_method(noise_method)
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
precip = precip[-(ar_order + 1) :, :, :].copy()
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(precip)) else False
)
res = []
f = lambda precip, i: extrapolator_method(
precip[i, :, :], velocity, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not dask_imported:
precip[i, :, :] = f(precip, i)
else:
res.append(dask.delayed(f)(precip, i))
if dask_imported:
num_workers_ = len(res) if num_workers > len(res) else num_workers
precip = np.stack(
list(dask.compute(*res, num_workers=num_workers_)) + [precip[-1, :, :]]
)
if mask_method == "incremental":
# get mask parameters
mask_rim = mask_kwargs.get("mask_rim", 10)
mask_f = mask_kwargs.get("mask_f", 1.0)
# initialize the structuring element
struct = scipy.ndimage.generate_binary_structure(2, 1)
# iterate it to expand it nxn
n = mask_f * timestep / kmperpixel
struct = scipy.ndimage.iterate_structure(struct, int((n - 1) / 2.0))
noise_kwargs.update(
{
"win_size": win_size,
"overlap": overlap,
"war_thr": war_thr,
"rm_rdisc": True,
"donorm": True,
}
)
print("Estimating nowcast parameters...", end="")
def estimator(precip, parsglob=None, idxm=None, idxn=None):
pars = {}
# initialize the perturbation generator for the precipitation field
if noise_method is not None and parsglob is None:
P = init_noise(precip, fft_method=fft_method, **noise_kwargs)
else:
P = None
pars["P"] = P
# initialize the band-pass filter
if parsglob is None:
filter = filter_method(precip.shape[1:], n_cascade_levels, **filter_kwargs)
pars["filter"] = filter
else:
pars["filter"] = None
# compute the cascade decompositions of the input precipitation fields
if parsglob is None:
R_d = []
for i in range(ar_order + 1):
R_d_ = decomp_method(
precip[i, :, :],
filter,
fft_method=fft_method,
normalize=True,
compute_stats=True,
)
R_d.append(R_d_)
R_d_ = None
# normalize the cascades and rearrange them into a four-dimensional array
# of shape (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
if parsglob is None:
R_c = nowcast_utils.stack_cascades(R_d, n_cascade_levels)
mu = R_d[-1]["means"]
sigma = R_d[-1]["stds"]
R_d = None
else:
R_c = parsglob["R_c"][0][
:, :, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
mu = np.mean(R_c, axis=(2, 3))
sigma = np.std(R_c, axis=(2, 3))
R_c = (R_c - mu[:, :, None, None]) / sigma[:, :, None, None]
mu = mu[:, -1]
sigma = sigma[:, -1]
pars["mu"] = mu
pars["sigma"] = sigma
# compute lag-l temporal autocorrelation coefficients for each cascade level
GAMMA = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
R_c_ = np.stack([R_c[i, j, :, :] for j in range(ar_order + 1)])
GAMMA[i, :] = correlation.temporal_autocorrelation(R_c_)
R_c_ = None
if ar_order == 2:
# adjust the local lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(
GAMMA[i, 0], GAMMA[i, 1]
)
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
PHI = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])
pars["PHI"] = PHI
# stack the cascades into a five-dimensional array containing all ensemble
# members
R_c = [R_c.copy() for i in range(n_ens_members)]
pars["R_c"] = R_c
if mask_method is not None and parsglob is None:
MASK_prec = precip[-1, :, :] >= precip_thr
if mask_method == "incremental":
# initialize precip mask for each member
MASK_prec = nowcast_utils.compute_dilated_mask(
MASK_prec, struct, mask_rim
)
MASK_prec = [MASK_prec.copy() for j in range(n_ens_members)]
else:
MASK_prec = None
pars["MASK_prec"] = MASK_prec
return pars
# prepare windows
M, N = precip.shape[1:]
n_windows_M = np.ceil(1.0 * M / win_size[0]).astype(int)
n_windows_N = np.ceil(1.0 * N / win_size[1]).astype(int)
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
if measure_time:
starttime = time.time()
# compute global parameters to be used as defaults
parsglob = estimator(precip)
# loop windows
if n_windows_M > 1 or n_windows_N > 1:
war = np.empty((n_windows_M, n_windows_N))
PHI = np.empty((n_windows_M, n_windows_N, n_cascade_levels, ar_order + 1))
mu = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
sigma = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
ff = []
rc = []
pp = []
mm = []
for m in range(n_windows_M):
ff_ = []
pp_ = []
rc_ = []
mm_ = []
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(np.max((m * win_size[0] - overlap * win_size[0], 0)))
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(np.max((n * win_size[1] - overlap * win_size[1], 0)))
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
mask = np.zeros((M, N), dtype=bool)
mask[idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)] = True
precip_ = precip[
:, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
]
war[m, n] = (
np.sum(precip_[-1, :, :] >= precip_thr) / precip_[-1, :, :].size
)
if war[m, n] > war_thr:
# estimate local parameters
pars = estimator(precip, parsglob, idxm, idxn)
ff_.append(pars["filter"])
pp_.append(pars["P"])
rc_.append(pars["R_c"])
mm_.append(pars["MASK_prec"])
mu[m, n, :] = pars["mu"]
sigma[m, n, :] = pars["sigma"]
PHI[m, n, :, :] = pars["PHI"]
else:
# dry window
ff_.append(None)
pp_.append(None)
rc_.append(None)
mm_.append(None)
ff.append(ff_)
pp.append(pp_)
rc.append(rc_)
mm.append(mm_)
# remove unnecessary variables
ff_ = None
pp_ = None
rc_ = None
mm_ = None
pars = None
if measure_time:
print(f"{time.time() - starttime:.2f} seconds.")
else:
print(" done.")
# initialize the random generators
if noise_method is not None:
randgen_prec = []
randgen_motion = []
np.random.seed(seed)
for j in range(n_ens_members):
rs = np.random.RandomState(seed)
randgen_prec.append(rs)
seed = rs.randint(0, high=1e9)
rs = np.random.RandomState(seed)
randgen_motion.append(rs)
seed = rs.randint(0, high=1e9)
if vel_pert_method is not None:
init_vel_noise, generate_vel_noise = noise.get_method(vel_pert_method)
# initialize the perturbation generators for the motion field
vps = []
for j in range(n_ens_members):
kwargs = {
"randstate": randgen_motion[j],
"p_par": vp_par,
"p_perp": vp_perp,
}
vp_ = init_vel_noise(velocity, 1.0 / kmperpixel, timestep, **kwargs)
vps.append(vp_)
D = [None for j in range(n_ens_members)]
R_f = [[] for j in range(n_ens_members)]
if measure_time:
init_time = time.time() - starttime_init
precip = precip[-1, :, :]
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
extrap_kwargs["return_displacement"] = True
R_f_prev = [precip for i in range(n_ens_members)]
t_prev = [0.0 for j in range(n_ens_members)]
t_total = [0.0 for j in range(n_ens_members)]
# iterate each time step
for t, subtimestep_idx in enumerate(timesteps):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in subtimestep_idx]
else:
subtimesteps = [t]
if (timestep_type == "list" and subtimesteps) or (
timestep_type == "int" and t > 0
):
is_nowcast_time_step = True
else:
is_nowcast_time_step = False
if is_nowcast_time_step:
print(
f"Computing nowcast for time step {t}... ",
end="",
flush=True,
)
if measure_time:
starttime = time.time()
# iterate each ensemble member
def worker(j):
# first the global step
if noise_method is not None:
# generate noise field
EPS = generate_noise(
parsglob["P"], randstate=randgen_prec[j], fft_method=fft_method
)
# decompose the noise field into a cascade
EPS_d = decomp_method(
EPS,
parsglob["filter"],
fft_method=fft_method,
normalize=True,
compute_stats=True,
)
else:
EPS_d = None
# iterate the AR(p) model for each cascade level
R_c = parsglob["R_c"][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :].copy()
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d is not None:
EPS_ = (
EPS_d["cascade_levels"][i, :, :] - EPS_d["means"][i]
) / EPS_d["stds"][i]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], parsglob["PHI"][i, :], eps=EPS_
)
EPS_ = None
parsglob["R_c"][j] = R_c.copy()
EPS = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
R_f_new = _recompose_cascade(R_c, parsglob["mu"], parsglob["sigma"])
R_c = None
# then the local steps
if n_windows_M > 1 or n_windows_N > 1:
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
R_l = np.zeros((M, N), dtype=float)
M_s = np.zeros((M, N), dtype=float)
for m in range(n_windows_M):
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(
np.max((m * win_size[0] - overlap * win_size[0], 0))
)
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(
np.max((n * win_size[1] - overlap * win_size[1], 0))
)
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
# build localization mask
mask = _get_mask((M, N), idxm, idxn)
mask_l = mask[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
]
M_s += mask
# skip if dry
if war[m, n] > war_thr:
R_c = rc[m][n][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :]
if noise_method is not None:
# extract noise field
EPS_d_l = EPS_d["cascade_levels"][
:,
idxm.item(0) : idxm.item(1),
idxn.item(0) : idxn.item(1),
].copy()
mu_ = np.mean(EPS_d_l, axis=(1, 2))
sigma_ = np.std(EPS_d_l, axis=(1, 2))
else:
EPS_d_l = None
# iterate the AR(p) model for each cascade level
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d_l is not None:
EPS_ = (
EPS_d_l[i, :, :] - mu_[i, None, None]
) / sigma_[i, None, None]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], PHI[m, n, i, :], eps=EPS_
)
EPS_ = None
rc[m][n][j] = R_c.copy()
EPS_d_l = mu_ = sigma_ = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
mu_ = mu[m, n, :]
sigma_ = sigma[m, n, :]
R_c = [
((R_c[i, -1, :, :] * sigma_[i]) + mu_[i])
* parsglob["sigma"][i]
+ parsglob["mu"][i]
for i in range(len(mu_))
]
R_l_ = np.sum(np.stack(R_c), axis=0)
R_c = mu_ = sigma_ = None
# R_l_ = _recompose_cascade(R_c[:, :, :], mu[m, n, :], sigma[m, n, :])
else:
R_l_ = R_f_new[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_ = precip[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
R_l_ = probmatching.nonparam_match_empirical_cdf(R_l_, R_)
R_ = None
R_l[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
] += (R_l_ * mask_l)
R_l_ = None
ind = M_s > 0
R_l[ind] *= 1 / M_s[ind]
R_l[~ind] = precip_min
R_f_new = R_l.copy()
R_l = None
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_f_new[R_f_new < precip_thr] = precip_min
R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, precip)
if mask_method is not None:
# apply the precipitation mask to prevent generation of new
# precipitation into areas where it was not originally
# observed
if mask_method == "incremental":
MASK_prec = parsglob["MASK_prec"][j].copy()
R_f_new = R_f_new.min() + (R_f_new - R_f_new.min()) * MASK_prec
MASK_prec = None
if mask_method == "incremental":
parsglob["MASK_prec"][j] = nowcast_utils.compute_dilated_mask(
R_f_new >= precip_thr, struct, mask_rim
)
R_f_out = []
extrap_kwargs_ = extrap_kwargs.copy()
extrap_kwargs_["xy_coords"] = xy_coords
extrap_kwargs_["return_displacement"] = True
V_pert = velocity
# advect the recomposed precipitation field to obtain the forecast for
# the current time step (or subtimesteps if non-integer time steps are
# given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
R_f_ip = (1.0 - t_diff_prev_int) * R_f_prev[
j
] + t_diff_prev_int * R_f_new
else:
R_f_ip = R_f_prev[j]
t_diff_prev = t_sub - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = velocity + generate_vel_noise(
vps[j], t_total[j] * timestep
)
extrap_kwargs_["displacement_prev"] = D[j]
R_f_ep, D[j] = extrapolator_method(
R_f_ip,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
R_f_ep[0][R_f_ep[0] < precip_thr] = precip_min
R_f_out.append(R_f_ep[0])
t_prev[j] = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if not subtimesteps:
t_diff_prev = t + 1 - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = velocity + generate_vel_noise(
vps[j], t_total[j] * timestep
)
extrap_kwargs_["displacement_prev"] = D[j]
_, D[j] = extrapolator_method(
None,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
t_prev[j] = t + 1
R_f_prev[j] = R_f_new
return R_f_out
res = []
for j in range(n_ens_members):
if not dask_imported or n_ens_members == 1:
res.append(worker(j))
else:
res.append(dask.delayed(worker)(j))
R_f_ = (
dask.compute(*res, num_workers=num_ensemble_workers)
if dask_imported and n_ens_members > 1
else res
)
res = None
if is_nowcast_time_step:
if measure_time:
print(f"{time.time() - starttime:.2f} seconds.")
else:
print("done.")
if callback is not None:
R_f_stacked = np.stack(R_f_)
if R_f_stacked.shape[1] > 0:
callback(R_f_stacked.squeeze())
R_f_ = None
if return_output:
for j in range(n_ens_members):
R_f[j].extend(R_f_[j])
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if return_output:
outarr = np.stack([np.stack(R_f[j]) for j in range(n_ens_members)])
if measure_time:
return outarr, init_time, mainloop_time
else:
return outarr
else:
return None
| def forecast(
precip,
metadata,
velocity,
timesteps,
n_ens_members=24,
n_cascade_levels=6,
win_size=256,
overlap=0.1,
war_thr=0.1,
extrap_method="semilagrangian",
decomp_method="fft",
bandpass_filter_method="gaussian",
noise_method="ssft",
ar_order=2,
vel_pert_method=None,
probmatching_method="cdf",
mask_method="incremental",
callback=None,
fft_method="numpy",
return_output=True,
seed=None,
num_workers=1,
extrap_kwargs=None,
filter_kwargs=None,
noise_kwargs=None,
vel_pert_kwargs=None,
mask_kwargs=None,
measure_time=False,
):
"""
Generate a nowcast ensemble by using the Short-space ensemble prediction
system (SSEPS) method.
This is an experimental version of STEPS which allows for localization
by means of a window function.
Parameters
----------
precip: array-like
Array of shape (ar_order+1,m,n) containing the input precipitation fields
ordered by timestamp from oldest to newest. The time steps between the inputs
are assumed to be regular, and the inputs are required to have finite values.
metadata: dict
Metadata dictionary containing the accutime, xpixelsize, threshold and
zerovalue attributes as described in the documentation of
:py:mod:`pysteps.io.importers`. xpixelsize is assumed to be in meters.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the advection
field. The velocities are assumed to represent one time step between the
inputs. All values are required to be finite.
win_size: int or two-element sequence of ints
Size-length of the localization window.
overlap: float [0,1[
A float between 0 and 1 prescribing the level of overlap between
successive windows. If set to 0, no overlap is used.
war_thr: float
Threshold for the minimum fraction of rain in a given window.
timesteps: int or list of floats
Number of time steps to forecast or a list of time steps for which the
forecasts are computed (relative to the input time step). The elements
of the list are required to be in ascending order.
n_ens_members: int
The number of ensemble members to generate.
n_cascade_levels: int
The number of cascade levels to use.
extrap_method: {'semilagrangian'}
Name of the extrapolation method to use. See the documentation of
pysteps.extrapolation.interface.
decomp_method: {'fft'}
Name of the cascade decomposition method to use. See the documentation
of pysteps.cascade.interface.
bandpass_filter_method: {'gaussian', 'uniform'}
Name of the bandpass filter method to use with the cascade
decomposition.
noise_method: {'parametric','nonparametric','ssft','nested',None}
Name of the noise generator to use for perturbating the precipitation
field. See the documentation of pysteps.noise.interface. If set to None,
no noise is generated.
ar_order: int
The order of the autoregressive model to use. Must be >= 1.
vel_pert_method: {'bps',None}
Name of the noise generator to use for perturbing the advection field.
See the documentation of pysteps.noise.interface. If set to None,
the advection field is not perturbed.
mask_method: {'incremental', None}
The method to use for masking no precipitation areas in the forecast
field. The masked pixels are set to the minimum value of the
observations. 'incremental' = iteratively buffer the mask with a
certain rate (currently it is 1 km/min), None=no masking.
probmatching_method: {'cdf', None}
Method for matching the statistics of the forecast field with those of
the most recently observed one. 'cdf'=map the forecast CDF to the
observed one, None=no matching applied. Using 'mean' requires
that mask_method is not None.
callback: function
Optional function that is called after computation of each time step of
the nowcast. The function takes one argument: a three-dimensional array
of shape (n_ens_members,h,w), where h and w are the height and width
of the input field precip, respectively. This can be used, for instance,
writing the outputs into files.
return_output: bool
Set to False to disable returning the outputs as numpy arrays. This can
save memory if the intermediate results are written to output files
using the callback function.
seed: int
Optional seed number for the random generators.
num_workers: int
The number of workers to use for parallel computation. Applicable if
dask is enabled or pyFFTW is used for computing the FFT.
When num_workers>1, it is advisable to disable OpenMP by setting the
environment variable OMP_NUM_THREADS to 1.
This avoids slowdown caused by too many simultaneous threads.
fft_method: str
A string defining the FFT method to use (see utils.fft.get_method).
Defaults to 'numpy' for compatibility reasons. If pyFFTW is installed,
the recommended method is 'pyfftw'.
extrap_kwargs: dict
Optional dictionary containing keyword arguments for the extrapolation
method. See the documentation of pysteps.extrapolation.
filter_kwargs: dict
Optional dictionary containing keyword arguments for the filter method.
See the documentation of pysteps.cascade.bandpass_filters.py.
noise_kwargs: dict
Optional dictionary containing keyword arguments for the initializer of
the noise generator. See the documentation of
pysteps.noise.fftgenerators.
vel_pert_kwargs: dict
Optional dictionary containing keyword arguments "p_pert_par" and
"p_pert_perp" for the initializer of the velocity perturbator.
See the documentation of pysteps.noise.motion.
mask_kwargs: dict
Optional dictionary containing mask keyword arguments 'mask_f' and
'mask_rim', the factor defining the the mask increment and the rim size,
respectively.
The mask increment is defined as mask_f*timestep/kmperpixel.
measure_time: bool
If set to True, measure, print and return the computation time.
Returns
-------
out: ndarray
If return_output is True, a four-dimensional array of shape
(n_ens_members,num_timesteps,m,n) containing a time series of forecast
precipitation fields for each ensemble member. Otherwise, a None value
is returned. The time series starts from t0+timestep, where timestep is
taken from the input precipitation fields R.
See also
--------
pysteps.extrapolation.interface, pysteps.cascade.interface,
pysteps.noise.interface, pysteps.noise.utils.compute_noise_stddev_adjs
Notes
-----
Please be aware that this represents a (very) experimental implementation.
References
----------
:cite:`Seed2003`, :cite:`BPS2006`, :cite:`SPN2013`, :cite:`NBSG2017`
"""
_check_inputs(precip, velocity, timesteps, ar_order)
if extrap_kwargs is None:
extrap_kwargs = dict()
else:
extrap_kwargs = extrap_kwargs.copy()
if filter_kwargs is None:
filter_kwargs = dict()
if noise_kwargs is None:
noise_kwargs = dict()
if vel_pert_kwargs is None:
vel_pert_kwargs = dict()
if mask_kwargs is None:
mask_kwargs = dict()
if np.any(~np.isfinite(precip)):
raise ValueError("precip contains non-finite values")
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if mask_method not in ["incremental", None]:
raise ValueError(
"unknown mask method %s: must be 'incremental' or None" % mask_method
)
if np.isscalar(win_size):
win_size = (int(win_size), int(win_size))
else:
win_size = tuple([int(win_size[i]) for i in range(2)])
timestep = metadata["accutime"]
kmperpixel = metadata["xpixelsize"] / 1000
print("Computing SSEPS nowcast")
print("-----------------------")
print("")
print("Inputs")
print("------")
print("input dimensions: %dx%d" % (precip.shape[1], precip.shape[2]))
print(f"km/pixel: {kmperpixel}")
print(f"time step: {timestep} minutes")
print("")
print("Methods")
print("-------")
print(f"extrapolation: {extrap_method}")
print(f"bandpass filter: {bandpass_filter_method}")
print(f"decomposition: {decomp_method}")
print(f"noise generator: {noise_method}")
print(f"velocity perturbator: {vel_pert_method}")
print(f"precip. mask method: {mask_method}")
print(f"probability matching: {probmatching_method}")
print(f"FFT method: {fft_method}")
print("")
print("Parameters")
print("----------")
print(f"localization window: {win_size[0]}x{win_size[1]}")
print(f"overlap: {overlap:.1f}")
print(f"war thr: {war_thr:.2f}")
if isinstance(timesteps, int):
print(f"number of time steps: {timesteps}")
else:
print(f"time steps: {timesteps}")
print(f"ensemble size: {n_ens_members}")
print(f"number of cascade levels: {n_cascade_levels}")
print(f"order of the AR(p) model: {ar_order}")
print("dask imported: {}".format(("yes" if dask_imported else "no")))
print(f"num workers: {num_workers}")
if vel_pert_method == "bps":
vp_par = vel_pert_kwargs.get(
"p_pert_par", noise.motion.get_default_params_bps_par()
)
vp_perp = vel_pert_kwargs.get(
"p_pert_perp", noise.motion.get_default_params_bps_perp()
)
print(
f"velocity perturbations, parallel: {vp_par[0]},{vp_par[1]},{vp_par[2]}"
)
print(
f"velocity perturbations, perpendicular: {vp_perp[0]},{vp_perp[1]},{vp_perp[2]}"
)
precip_thr = metadata["threshold"]
precip_min = metadata["zerovalue"]
num_ensemble_workers = n_ens_members if num_workers > n_ens_members else num_workers
if measure_time:
starttime_init = time.time()
# get methods
extrapolator_method = extrapolation.get_method(extrap_method)
x_values, y_values = np.meshgrid(
np.arange(precip.shape[2]), np.arange(precip.shape[1])
)
xy_coords = np.stack([x_values, y_values])
decomp_method, __ = cascade.get_method(decomp_method)
filter_method = cascade.get_method(bandpass_filter_method)
if noise_method is not None:
init_noise, generate_noise = noise.get_method(noise_method)
# advect the previous precipitation fields to the same position with the
# most recent one (i.e. transform them into the Lagrangian coordinates)
precip = precip[-(ar_order + 1) :, :, :].copy()
extrap_kwargs = extrap_kwargs.copy()
extrap_kwargs["xy_coords"] = xy_coords
extrap_kwargs["allow_nonfinite_values"] = (
True if np.any(~np.isfinite(precip)) else False
)
res = []
f = lambda precip, i: extrapolator_method(
precip[i, :, :], velocity, ar_order - i, "min", **extrap_kwargs
)[-1]
for i in range(ar_order):
if not dask_imported:
precip[i, :, :] = f(precip, i)
else:
res.append(dask.delayed(f)(precip, i))
if dask_imported:
num_workers_ = len(res) if num_workers > len(res) else num_workers
precip = np.stack(
list(dask.compute(*res, num_workers=num_workers_)) + [precip[-1, :, :]]
)
if mask_method == "incremental":
# get mask parameters
mask_rim = mask_kwargs.get("mask_rim", 10)
mask_f = mask_kwargs.get("mask_f", 1.0)
# initialize the structuring element
struct = scipy.ndimage.generate_binary_structure(2, 1)
# iterate it to expand it nxn
n = mask_f * timestep / kmperpixel
struct = scipy.ndimage.iterate_structure(struct, int((n - 1) / 2.0))
noise_kwargs.update(
{
"win_size": win_size,
"overlap": overlap,
"war_thr": war_thr,
"rm_rdisc": True,
"donorm": True,
}
)
print("Estimating nowcast parameters...", end="")
def estimator(precip, parsglob=None, idxm=None, idxn=None):
pars = {}
# initialize the perturbation generator for the precipitation field
if noise_method is not None and parsglob is None:
P = init_noise(precip, fft_method=fft_method, **noise_kwargs)
else:
P = None
pars["P"] = P
# initialize the band-pass filter
if parsglob is None:
filter = filter_method(precip.shape[1:], n_cascade_levels, **filter_kwargs)
pars["filter"] = filter
else:
pars["filter"] = None
# compute the cascade decompositions of the input precipitation fields
if parsglob is None:
R_d = []
for i in range(ar_order + 1):
R_d_ = decomp_method(
precip[i, :, :],
filter,
fft_method=fft_method,
normalize=True,
compute_stats=True,
)
R_d.append(R_d_)
R_d_ = None
# normalize the cascades and rearrange them into a four-dimensional array
# of shape (n_cascade_levels,ar_order+1,m,n) for the autoregressive model
if parsglob is None:
R_c = nowcast_utils.stack_cascades(R_d, n_cascade_levels)
mu = R_d[-1]["means"]
sigma = R_d[-1]["stds"]
R_d = None
else:
R_c = parsglob["R_c"][0][
:, :, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
mu = np.mean(R_c, axis=(2, 3))
sigma = np.std(R_c, axis=(2, 3))
R_c = (R_c - mu[:, :, None, None]) / sigma[:, :, None, None]
mu = mu[:, -1]
sigma = sigma[:, -1]
pars["mu"] = mu
pars["sigma"] = sigma
# compute lag-l temporal autocorrelation coefficients for each cascade level
GAMMA = np.empty((n_cascade_levels, ar_order))
for i in range(n_cascade_levels):
R_c_ = np.stack([R_c[i, j, :, :] for j in range(ar_order + 1)])
GAMMA[i, :] = correlation.temporal_autocorrelation(R_c_)
R_c_ = None
if ar_order == 2:
# adjust the local lag-2 correlation coefficient to ensure that the AR(p)
# process is stationary
for i in range(n_cascade_levels):
GAMMA[i, 1] = autoregression.adjust_lag2_corrcoef2(
GAMMA[i, 0], GAMMA[i, 1]
)
# estimate the parameters of the AR(p) model from the autocorrelation
# coefficients
PHI = np.empty((n_cascade_levels, ar_order + 1))
for i in range(n_cascade_levels):
PHI[i, :] = autoregression.estimate_ar_params_yw(GAMMA[i, :])
pars["PHI"] = PHI
# stack the cascades into a five-dimensional array containing all ensemble
# members
R_c = [R_c.copy() for i in range(n_ens_members)]
pars["R_c"] = R_c
if mask_method is not None and parsglob is None:
MASK_prec = precip[-1, :, :] >= precip_thr
if mask_method == "incremental":
# initialize precip mask for each member
MASK_prec = nowcast_utils.compute_dilated_mask(
MASK_prec, struct, mask_rim
)
MASK_prec = [MASK_prec.copy() for j in range(n_ens_members)]
else:
MASK_prec = None
pars["MASK_prec"] = MASK_prec
return pars
# prepare windows
M, N = precip.shape[1:]
n_windows_M = np.ceil(1.0 * M / win_size[0]).astype(int)
n_windows_N = np.ceil(1.0 * N / win_size[1]).astype(int)
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
if measure_time:
starttime = time.time()
# compute global parameters to be used as defaults
parsglob = estimator(precip)
# loop windows
if n_windows_M > 1 or n_windows_N > 1:
war = np.empty((n_windows_M, n_windows_N))
PHI = np.empty((n_windows_M, n_windows_N, n_cascade_levels, ar_order + 1))
mu = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
sigma = np.empty((n_windows_M, n_windows_N, n_cascade_levels))
ff = []
rc = []
pp = []
mm = []
for m in range(n_windows_M):
ff_ = []
pp_ = []
rc_ = []
mm_ = []
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(np.max((m * win_size[0] - overlap * win_size[0], 0)))
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(np.max((n * win_size[1] - overlap * win_size[1], 0)))
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
mask = np.zeros((M, N), dtype=bool)
mask[idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)] = True
precip_ = precip[
:, idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
]
war[m, n] = (
np.sum(precip_[-1, :, :] >= precip_thr) / precip_[-1, :, :].size
)
if war[m, n] > war_thr:
# estimate local parameters
pars = estimator(precip, parsglob, idxm, idxn)
ff_.append(pars["filter"])
pp_.append(pars["P"])
rc_.append(pars["R_c"])
mm_.append(pars["MASK_prec"])
mu[m, n, :] = pars["mu"]
sigma[m, n, :] = pars["sigma"]
PHI[m, n, :, :] = pars["PHI"]
else:
# dry window
ff_.append(None)
pp_.append(None)
rc_.append(None)
mm_.append(None)
ff.append(ff_)
pp.append(pp_)
rc.append(rc_)
mm.append(mm_)
# remove unnecessary variables
ff_ = None
pp_ = None
rc_ = None
mm_ = None
pars = None
if measure_time:
print(f"{time.time() - starttime:.2f} seconds.")
else:
print(" done.")
# initialize the random generators
if noise_method is not None:
randgen_prec = []
randgen_motion = []
np.random.seed(seed)
for j in range(n_ens_members):
rs = np.random.RandomState(seed)
randgen_prec.append(rs)
seed = rs.randint(0, high=1e9)
rs = np.random.RandomState(seed)
randgen_motion.append(rs)
seed = rs.randint(0, high=1e9)
if vel_pert_method is not None:
init_vel_noise, generate_vel_noise = noise.get_method(vel_pert_method)
# initialize the perturbation generators for the motion field
vps = []
for j in range(n_ens_members):
kwargs = {
"randstate": randgen_motion[j],
"p_par": vp_par,
"p_perp": vp_perp,
}
vp_ = init_vel_noise(velocity, 1.0 / kmperpixel, timestep, **kwargs)
vps.append(vp_)
D = [None for j in range(n_ens_members)]
R_f = [[] for j in range(n_ens_members)]
if measure_time:
init_time = time.time() - starttime_init
precip = precip[-1, :, :]
print("Starting nowcast computation.")
if measure_time:
starttime_mainloop = time.time()
if isinstance(timesteps, int):
timesteps = range(timesteps + 1)
timestep_type = "int"
else:
original_timesteps = [0] + list(timesteps)
timesteps = nowcast_utils.binned_timesteps(original_timesteps)
timestep_type = "list"
extrap_kwargs["return_displacement"] = True
R_f_prev = [precip for i in range(n_ens_members)]
t_prev = [0.0 for j in range(n_ens_members)]
t_total = [0.0 for j in range(n_ens_members)]
# iterate each time step
for t, subtimestep_idx in enumerate(timesteps):
if timestep_type == "list":
subtimesteps = [original_timesteps[t_] for t_ in subtimestep_idx]
else:
subtimesteps = [t]
if (timestep_type == "list" and subtimesteps) or (
timestep_type == "int" and t > 0
):
is_nowcast_time_step = True
else:
is_nowcast_time_step = False
if is_nowcast_time_step:
print(
f"Computing nowcast for time step {t}... ",
end="",
flush=True,
)
if measure_time:
starttime = time.time()
# iterate each ensemble member
def worker(j):
# first the global step
if noise_method is not None:
# generate noise field
EPS = generate_noise(
parsglob["P"], randstate=randgen_prec[j], fft_method=fft_method
)
# decompose the noise field into a cascade
EPS_d = decomp_method(
EPS,
parsglob["filter"],
fft_method=fft_method,
normalize=True,
compute_stats=True,
)
else:
EPS_d = None
# iterate the AR(p) model for each cascade level
R_c = parsglob["R_c"][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :].copy()
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d is not None:
EPS_ = (
EPS_d["cascade_levels"][i, :, :] - EPS_d["means"][i]
) / EPS_d["stds"][i]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], parsglob["PHI"][i, :], eps=EPS_
)
EPS_ = None
parsglob["R_c"][j] = R_c.copy()
EPS = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
R_f_new = _recompose_cascade(R_c, parsglob["mu"], parsglob["sigma"])
R_c = None
# then the local steps
if n_windows_M > 1 or n_windows_N > 1:
idxm = np.zeros((2, 1), dtype=int)
idxn = np.zeros((2, 1), dtype=int)
R_l = np.zeros((M, N), dtype=float)
M_s = np.zeros((M, N), dtype=float)
for m in range(n_windows_M):
for n in range(n_windows_N):
# compute indices of local window
idxm[0] = int(
np.max((m * win_size[0] - overlap * win_size[0], 0))
)
idxm[1] = int(
np.min((idxm[0] + win_size[0] + overlap * win_size[0], M))
)
idxn[0] = int(
np.max((n * win_size[1] - overlap * win_size[1], 0))
)
idxn[1] = int(
np.min((idxn[0] + win_size[1] + overlap * win_size[1], N))
)
# build localization mask
mask = _get_mask((M, N), idxm, idxn)
mask_l = mask[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
]
M_s += mask
# skip if dry
if war[m, n] > war_thr:
R_c = rc[m][n][j].copy()
if R_c.shape[1] >= ar_order:
R_c = R_c[:, -ar_order:, :, :]
if noise_method is not None:
# extract noise field
EPS_d_l = EPS_d["cascade_levels"][
:,
idxm.item(0) : idxm.item(1),
idxn.item(0) : idxn.item(1),
].copy()
mu_ = np.mean(EPS_d_l, axis=(1, 2))
sigma_ = np.std(EPS_d_l, axis=(1, 2))
else:
EPS_d_l = None
# iterate the AR(p) model for each cascade level
for i in range(n_cascade_levels):
# normalize the noise cascade
if EPS_d_l is not None:
EPS_ = (
EPS_d_l[i, :, :] - mu_[i, None, None]
) / sigma_[i, None, None]
else:
EPS_ = None
# apply AR(p) process to cascade level
R_c[i, :, :, :] = autoregression.iterate_ar_model(
R_c[i, :, :, :], PHI[m, n, i, :], eps=EPS_
)
EPS_ = None
rc[m][n][j] = R_c.copy()
EPS_d_l = mu_ = sigma_ = None
# compute the recomposed precipitation field(s) from the cascades
# obtained from the AR(p) model(s)
mu_ = mu[m, n, :]
sigma_ = sigma[m, n, :]
R_c = [
((R_c[i, -1, :, :] * sigma_[i]) + mu_[i])
* parsglob["sigma"][i]
+ parsglob["mu"][i]
for i in range(len(mu_))
]
R_l_ = np.sum(np.stack(R_c), axis=0)
R_c = mu_ = sigma_ = None
# R_l_ = _recompose_cascade(R_c[:, :, :], mu[m, n, :], sigma[m, n, :])
else:
R_l_ = R_f_new[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_ = precip[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
].copy()
R_l_ = probmatching.nonparam_match_empirical_cdf(R_l_, R_)
R_ = None
R_l[
idxm.item(0) : idxm.item(1), idxn.item(0) : idxn.item(1)
] += (R_l_ * mask_l)
R_l_ = None
ind = M_s > 0
R_l[ind] *= 1 / M_s[ind]
R_l[~ind] = precip_min
R_f_new = R_l.copy()
R_l = None
if probmatching_method == "cdf":
# adjust the CDF of the forecast to match the most recently
# observed precipitation field
R_f_new[R_f_new < precip_thr] = precip_min
R_f_new = probmatching.nonparam_match_empirical_cdf(R_f_new, precip)
if mask_method is not None:
# apply the precipitation mask to prevent generation of new
# precipitation into areas where it was not originally
# observed
if mask_method == "incremental":
MASK_prec = parsglob["MASK_prec"][j].copy()
R_f_new = R_f_new.min() + (R_f_new - R_f_new.min()) * MASK_prec
MASK_prec = None
if mask_method == "incremental":
parsglob["MASK_prec"][j] = nowcast_utils.compute_dilated_mask(
R_f_new >= precip_thr, struct, mask_rim
)
R_f_out = []
extrap_kwargs_ = extrap_kwargs.copy()
extrap_kwargs_["xy_coords"] = xy_coords
extrap_kwargs_["return_displacement"] = True
V_pert = velocity
# advect the recomposed precipitation field to obtain the forecast for
# the current time step (or subtimesteps if non-integer time steps are
# given)
for t_sub in subtimesteps:
if t_sub > 0:
t_diff_prev_int = t_sub - int(t_sub)
if t_diff_prev_int > 0.0:
R_f_ip = (1.0 - t_diff_prev_int) * R_f_prev[
j
] + t_diff_prev_int * R_f_new
else:
R_f_ip = R_f_prev[j]
t_diff_prev = t_sub - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = velocity + generate_vel_noise(
vps[j], t_total[j] * timestep
)
extrap_kwargs_["displacement_prev"] = D[j]
R_f_ep, D[j] = extrapolator_method(
R_f_ip,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
R_f_ep[0][R_f_ep[0] < precip_thr] = precip_min
R_f_out.append(R_f_ep[0])
t_prev[j] = t_sub
# advect the forecast field by one time step if no subtimesteps in the
# current interval were found
if not subtimesteps:
t_diff_prev = t + 1 - t_prev[j]
t_total[j] += t_diff_prev
# compute the perturbed motion field
if vel_pert_method is not None:
V_pert = velocity + generate_vel_noise(
vps[j], t_total[j] * timestep
)
extrap_kwargs_["displacement_prev"] = D[j]
_, D[j] = extrapolator_method(
None,
V_pert,
[t_diff_prev],
**extrap_kwargs_,
)
t_prev[j] = t + 1
R_f_prev[j] = R_f_new
return R_f_out
res = []
for j in range(n_ens_members):
if not dask_imported or n_ens_members == 1:
res.append(worker(j))
else:
res.append(dask.delayed(worker)(j))
R_f_ = (
dask.compute(*res, num_workers=num_ensemble_workers)
if dask_imported and n_ens_members > 1
else res
)
res = None
if is_nowcast_time_step:
if measure_time:
print(f"{time.time() - starttime:.2f} seconds.")
else:
print("done.")
if callback is not None:
R_f_stacked = np.stack(R_f_)
if R_f_stacked.shape[1] > 0:
callback(R_f_stacked.squeeze())
R_f_ = None
if return_output:
for j in range(n_ens_members):
R_f[j].extend(R_f_[j])
if measure_time:
mainloop_time = time.time() - starttime_mainloop
if return_output:
outarr = np.stack([np.stack(R_f[j]) for j in range(n_ens_members)])
if measure_time:
return outarr, init_time, mainloop_time
else:
return outarr
else:
return None
|
35,789 | def read_pfm(file_name: str, slice_channels: int = 2) -> np.ndarray:
"""Read file in .pfm format. Might contain
Args:
file_name (str): Path to the file.
slice_channels (int): Number of channels to slice out of the file.
Useful for reading different data formats stored in .pfm files: Optical Flows, Stereo Disparity Maps, etc.
"""
with open(file_name, "rb") as f:
header = f.readline().rstrip()
if header not in [b"PF", b"Pf"]:
raise ValueError("Invalid PFM file")
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
if not dim_match:
raise Exception("Malformed PFM header.")
w, h = (int(dim) for dim in dim_match.groups())
scale = float(f.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(f, dtype=endian + "f")
pfm_channels = 3 if header == b"PF" else 1
data = data.reshape(h, w, pfm_channels).transpose(2, 0, 1)
data = np.flip(data, axis=1) # flip on h dimension
data = data[:slice_channels, :, :]
return data.astype(np.float32)
| def _read_pfm(file_name: str, slice_channels: int = 2) -> np.ndarray:
"""Read file in .pfm format. Might contain
Args:
file_name (str): Path to the file.
slice_channels (int): Number of channels to slice out of the file.
Useful for reading different data formats stored in .pfm files: Optical Flows, Stereo Disparity Maps, etc.
"""
with open(file_name, "rb") as f:
header = f.readline().rstrip()
if header not in [b"PF", b"Pf"]:
raise ValueError("Invalid PFM file")
dim_match = re.match(rb"^(\d+)\s(\d+)\s$", f.readline())
if not dim_match:
raise Exception("Malformed PFM header.")
w, h = (int(dim) for dim in dim_match.groups())
scale = float(f.readline().rstrip())
if scale < 0: # little-endian
endian = "<"
scale = -scale
else:
endian = ">" # big-endian
data = np.fromfile(f, dtype=endian + "f")
pfm_channels = 3 if header == b"PF" else 1
data = data.reshape(h, w, pfm_channels).transpose(2, 0, 1)
data = np.flip(data, axis=1) # flip on h dimension
data = data[:slice_channels, :, :]
return data.astype(np.float32)
|
1,512 | def get_package_status(package, min_version):
"""
Returns a dictionary containing a boolean specifying whether given package
is up-to-date, along with the version string (empty string if
not installed).
"""
package_status = {}
try:
p = importlib.import_module(package)
package_version = p.__version__
package_status['up_to_date'] = parse_version(
package_version) >= parse_version(min_version)
package_status['version'] = package_version
except ImportError:
traceback.print_exc()
package_status['up_to_date'] = False
package_status['version'] = ""
return package_status
| def get_package_status(package, min_version):
"""
Returns a dictionary containing a boolean specifying whether given package
is up-to-date, along with the version string (empty string if
not installed).
"""
package_status = {}
try:
module = importlib.import_module(package)
package_version = p.__version__
package_status['up_to_date'] = parse_version(
package_version) >= parse_version(min_version)
package_status['version'] = package_version
except ImportError:
traceback.print_exc()
package_status['up_to_date'] = False
package_status['version'] = ""
return package_status
|
13,656 | def _post_processing(samples, svs, itpl_part, d_nsp, L_rk_tol):
"""Compute coefficients/partition to construct minimal interpolant."""
num_vars = len(svs)
max_idx = np.argmax([*(len(ip) for ip in itpl_part)])
max_rks = []
for i in range(num_vars):
max_rk = 0
# we don't need to compute this max rank since we exploit nullspace structure
if i == max_idx:
max_rks.append(len(itpl_part[max_idx])-1)
continue
shapes = []
for j in range(num_vars):
if i != j:
shapes.append(samples.shape[j])
# compute max ranks of all possible 1-D Loewner matrices
for idc in itertools.product(*(range(s) for s in shapes)):
l_idc = list(idc)
l_idc.insert(i, slice(None))
L = nd_loewner(samples[tuple(l_idc)], [svs[i]], [itpl_part[i]])
rk = np.linalg.matrix_rank(L, tol=L_rk_tol)
if rk > max_rk:
max_rk = rk
max_rks.append(max_rk)
# exploit nullspace structure to obtain final max rank
denom = np.prod([*(len(itpl_part[k])-max_rks[k] for k in range(len(itpl_part)))])
if denom == 0 or d_nsp % denom != 0:
return None, None
max_rks[max_idx] = len(itpl_part[max_idx]) - d_nsp / denom
max_rks[max_idx] = round(max_rks[max_idx])
for i in range(len(max_rks)):
itpl_part[i] = itpl_part[i][0:max_rks[i]+1]
# solve LS problem
L = full_nd_loewner(samples, svs, itpl_part)
_, S, V = np.linalg.svd(L)
VH = np.conj(V.T)
coefs = VH[:, -1:]
return coefs, itpl_part
| def _post_processing(samples, svs, itpl_part, d_nsp, L_rk_tol):
"""Compute coefficients/partition to construct minimal interpolant."""
num_vars = len(svs)
max_idx = np.argmax([len(ip) for ip in itpl_part])
max_rks = []
for i in range(num_vars):
max_rk = 0
# we don't need to compute this max rank since we exploit nullspace structure
if i == max_idx:
max_rks.append(len(itpl_part[max_idx])-1)
continue
shapes = []
for j in range(num_vars):
if i != j:
shapes.append(samples.shape[j])
# compute max ranks of all possible 1-D Loewner matrices
for idc in itertools.product(*(range(s) for s in shapes)):
l_idc = list(idc)
l_idc.insert(i, slice(None))
L = nd_loewner(samples[tuple(l_idc)], [svs[i]], [itpl_part[i]])
rk = np.linalg.matrix_rank(L, tol=L_rk_tol)
if rk > max_rk:
max_rk = rk
max_rks.append(max_rk)
# exploit nullspace structure to obtain final max rank
denom = np.prod([*(len(itpl_part[k])-max_rks[k] for k in range(len(itpl_part)))])
if denom == 0 or d_nsp % denom != 0:
return None, None
max_rks[max_idx] = len(itpl_part[max_idx]) - d_nsp / denom
max_rks[max_idx] = round(max_rks[max_idx])
for i in range(len(max_rks)):
itpl_part[i] = itpl_part[i][0:max_rks[i]+1]
# solve LS problem
L = full_nd_loewner(samples, svs, itpl_part)
_, S, V = np.linalg.svd(L)
VH = np.conj(V.T)
coefs = VH[:, -1:]
return coefs, itpl_part
|
7,358 | def quickshift(image, ratio=1.0, kernel_size=5, max_dist=10,
return_tree=False, sigma=0, convert2lab=True, random_seed=42,
channel_axis=-1):
"""Segments image using quickshift clustering in Color-(x,y) space.
Produces an oversegmentation of the image using the quickshift mode-seeking
algorithm.
Parameters
----------
image : (width, height, channels) ndarray
Input image. The axis corresponding to color channels can be specified
via the `channel_axis` argument.
ratio : float, optional, between 0 and 1
Balances color-space proximity and image-space proximity.
Higher values give more weight to color-space.
kernel_size : float, optional
Width of Gaussian kernel used in smoothing the
sample density. Higher means fewer clusters.
max_dist : float, optional
Cut-off point for data distances.
Higher means fewer clusters.
return_tree : bool, optional
Whether to return the full segmentation hierarchy tree and distances.
sigma : float, optional
Width for Gaussian smoothing as preprocessing. Zero means no smoothing.
convert2lab : bool, optional
Whether the input should be converted to Lab colorspace prior to
segmentation. For this purpose, the input is assumed to be RGB.
random_seed : int, optional
Random seed used for breaking ties.
channel_axis : int, optional
The axis of `image` corresponding to color channels. Defaults to the
last axis.
Returns
-------
segment_mask : (width, height) ndarray
Integer mask indicating segment labels.
Notes
-----
The authors advocate to convert the image to Lab color space prior to
segmentation, though this is not strictly necessary. For this to work, the
image must be given in RGB format.
References
----------
.. [1] Quick shift and kernel methods for mode seeking,
Vedaldi, A. and Soatto, S.
European Conference on Computer Vision, 2008
"""
image = img_as_float(np.atleast_3d(image))
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
if image.ndim > 3:
raise ValueError("only 2D color images are supported")
# move channels to last position as expected by the Cython code
image = np.moveaxis(image, source=channel_axis, destination=-1)
if convert2lab:
if image.shape[-1] != 3:
ValueError("Only RGB images can be converted to Lab space.")
image = rgb2lab(image)
if kernel_size < 1:
raise ValueError("`kernel_size` should be >= 1.")
image = ndi.gaussian_filter(image, [sigma, sigma, 0])
image = np.ascontiguousarray(image * ratio)
segment_mask = _quickshift_cython(
image, kernel_size=kernel_size, max_dist=max_dist,
return_tree=return_tree, random_seed=random_seed)
return segment_mask
| def quickshift(image, ratio=1.0, kernel_size=5, max_dist=10,
return_tree=False, sigma=0, convert2lab=True, random_seed=42,
*, channel_axis=-1):
"""Segments image using quickshift clustering in Color-(x,y) space.
Produces an oversegmentation of the image using the quickshift mode-seeking
algorithm.
Parameters
----------
image : (width, height, channels) ndarray
Input image. The axis corresponding to color channels can be specified
via the `channel_axis` argument.
ratio : float, optional, between 0 and 1
Balances color-space proximity and image-space proximity.
Higher values give more weight to color-space.
kernel_size : float, optional
Width of Gaussian kernel used in smoothing the
sample density. Higher means fewer clusters.
max_dist : float, optional
Cut-off point for data distances.
Higher means fewer clusters.
return_tree : bool, optional
Whether to return the full segmentation hierarchy tree and distances.
sigma : float, optional
Width for Gaussian smoothing as preprocessing. Zero means no smoothing.
convert2lab : bool, optional
Whether the input should be converted to Lab colorspace prior to
segmentation. For this purpose, the input is assumed to be RGB.
random_seed : int, optional
Random seed used for breaking ties.
channel_axis : int, optional
The axis of `image` corresponding to color channels. Defaults to the
last axis.
Returns
-------
segment_mask : (width, height) ndarray
Integer mask indicating segment labels.
Notes
-----
The authors advocate to convert the image to Lab color space prior to
segmentation, though this is not strictly necessary. For this to work, the
image must be given in RGB format.
References
----------
.. [1] Quick shift and kernel methods for mode seeking,
Vedaldi, A. and Soatto, S.
European Conference on Computer Vision, 2008
"""
image = img_as_float(np.atleast_3d(image))
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
if image.ndim > 3:
raise ValueError("only 2D color images are supported")
# move channels to last position as expected by the Cython code
image = np.moveaxis(image, source=channel_axis, destination=-1)
if convert2lab:
if image.shape[-1] != 3:
ValueError("Only RGB images can be converted to Lab space.")
image = rgb2lab(image)
if kernel_size < 1:
raise ValueError("`kernel_size` should be >= 1.")
image = ndi.gaussian_filter(image, [sigma, sigma, 0])
image = np.ascontiguousarray(image * ratio)
segment_mask = _quickshift_cython(
image, kernel_size=kernel_size, max_dist=max_dist,
return_tree=return_tree, random_seed=random_seed)
return segment_mask
|
58,668 | def _is_same_entity_annotation(entity, other) -> Any:
return (
entity["value"] == other["value"]
and entity["entity"] == other["entity"]
and entity.get("group", None) == other.get("group", None)
and entity.get("role", None) == other.get("group", None)
)
| def _is_same_entity_annotation(entity, other) -> Any:
return (
entity["value"] == other["value"]
and entity["entity"] == other["entity"]
and entity.get("group") == other.get("group")
and entity.get("role") == other.get("group")
)
|
59,805 | def create_agent_from_model_file(model_file, opt_overides=None):
"""
Load agent from model file if it exists.
An optional dict of option overrides can also be provided.
"""
opt = {}
opt['model_file'] = model_file
if opt_overides is None:
opt_overides = {}
opt['overrride'] = opt_overides
return create_agent_from_opt_file(opt)
| def create_agent_from_model_file(model_file, opt_overides=None):
"""
Load agent from model file if it exists.
:param opt_overrides:
An optional dict of option overrides can also be provided.
:return:
The agent
"""
opt = {}
opt['model_file'] = model_file
if opt_overides is None:
opt_overides = {}
opt['overrride'] = opt_overides
return create_agent_from_opt_file(opt)
|
52,694 | def connect(sys, Q, inputv, outputv):
"""Index-based interconnection of an LTI system.
The system `sys` is a system typically constructed with `append`, with
multiple inputs and outputs. The inputs and outputs are connected
according to the interconnection matrix `Q`, and then the final inputs and
outputs are trimmed according to the inputs and outputs listed in `inputv`
and `outputv`.
NOTE: Inputs and outputs are indexed starting at 1 and negative values
correspond to a negative feedback interconnection.
Parameters
----------
sys : StateSpace Transferfunction
System to be connected
Q : 2D array
Interconnection matrix. First column gives the input to be connected.
The second column gives the index of an output that is to be fed into
that input. Each additional column gives the index of an additional
input that may be optionally added to that input. Negative
values mean the feedback is negative. A zero value is ignored. Inputs
and outputs are indexed starting at 1 to communicate sign information.
inputv : 1D array
list of final external inputs, indexed starting at 1
outputv : 1D array
list of final external outputs, indexed starting at 1
Returns
-------
sys: LTI system
Connected and trimmed LTI system
Examples
--------
>>> sys1 = ss([[1., -2], [3., -4]], [[5.], [7]], [[6, 8]], [[9.]])
>>> sys2 = ss([[-1.]], [[1.]], [[1.]], [[0.]])
>>> sys = append(sys1, sys2)
>>> Q = [[1, 2], [2, -1]] # negative feedback interconnection
>>> sysc = connect(sys, Q, [2], [1, 2])
"""
inputv, outputv, Q = np.asarray(inputv), np.asarray(outputv), np.asarray(Q)
# check indices
index_errors = (inputv - 1 > sys.inputs) | (inputv < 1)
if np.any(index_errors):
raise IndexError(
"inputv index %s out of bounds"%inputv[np.where(index_errors)])
index_errors = (outputv - 1 > sys.outputs) | (outputv < 1)
if np.any(index_errors):
raise IndexError(
"outputv index %s out of bounds"%outputv[np.where(index_errors)])
index_errors = (Q[:,0:1] - 1 > sys.inputs) | (Q[:,0:1] < 1)
if np.any(index_errors):
raise IndexError(
"Q input index %s out of bounds"%Q[np.where(index_errors)])
index_errors = (np.abs(Q[:,1:]) - 1 > sys.outputs)
if np.any(index_errors):
raise IndexError(
"Q output index %s out of bounds"%Q[np.where(index_errors)])
# first connect
K = np.zeros((sys.inputs, sys.outputs))
for r in np.array(Q).astype(int):
inp = r[0]-1
for outp in r[1:]:
if outp < 0:
K[inp,-outp-1] = -1.
elif outp > 0:
K[inp,outp-1] = 1.
sys = sys.feedback(np.array(K), sign=1)
# now trim
Ytrim = np.zeros((len(outputv), sys.outputs))
Utrim = np.zeros((sys.inputs, len(inputv)))
for i,u in enumerate(inputv):
Utrim[u-1,i] = 1.
for i,y in enumerate(outputv):
Ytrim[i,y-1] = 1.
return Ytrim * sys * Utrim
| def connect(sys, Q, inputv, outputv):
"""Index-based interconnection of an LTI system.
The system `sys` is a system typically constructed with `append`, with
multiple inputs and outputs. The inputs and outputs are connected
according to the interconnection matrix `Q`, and then the final inputs and
outputs are trimmed according to the inputs and outputs listed in `inputv`
and `outputv`.
NOTE: Inputs and outputs are indexed starting at 1 and negative values
correspond to a negative feedback interconnection.
Parameters
----------
sys : StateSpace Transferfunction
System to be connected
Q : 2D array
Interconnection matrix. First column gives the input to be connected.
The second column gives the index of an output that is to be fed into
that input. Each additional column gives the index of an additional
input that may be optionally added to that input. Negative
values mean the feedback is negative. A zero value is ignored. Inputs
and outputs are indexed starting at 1 to communicate sign information.
inputv : 1D array
list of final external inputs, indexed starting at 1
outputv : 1D array
list of final external outputs, indexed starting at 1
Returns
-------
sys: LTI system
Connected and trimmed LTI system
Examples
--------
>>> sys1 = ss([[1., -2], [3., -4]], [[5.], [7]], [[6, 8]], [[9.]])
>>> sys2 = ss([[-1.]], [[1.]], [[1.]], [[0.]])
>>> sys = append(sys1, sys2)
>>> Q = [[1, 2], [2, -1]] # negative feedback interconnection
>>> sysc = connect(sys, Q, [2], [1, 2])
"""
inputv, outputv, Q = np.asarray(inputv), np.asarray(outputv), np.asarray(Q)
# check indices
index_errors = (inputv - 1 > sys.inputs) | (inputv < 1)
if np.any(index_errors):
raise IndexError(
"inputv index %s out of bounds"%inputv[np.where(index_errors)])
index_errors = (outputv - 1 > sys.outputs) | (outputv < 1)
if np.any(index_errors):
raise IndexError(
"outputv index %s out of bounds"%outputv[np.where(index_errors)])
index_errors = (Q[:,0:1] - 1 > sys.inputs) | (Q[:,0:1] < 1)
if np.any(index_errors):
raise IndexError(
"Q input index %s out of bounds"%Q[np.where(index_errors)])
index_errors = (np.abs(Q[:,1:]) - 1 > sys.outputs)
if np.any(index_errors):
raise IndexError(
"Q output index %s out of bounds"%Q[np.where(index_errors)])
# first connect
K = np.zeros((sys.inputs, sys.outputs))
for r in np.array(Q).astype(int):
inp = r[0]-1
for outp in r[1:]:
if outp < 0:
K[inp,-outp-1] = -1.
elif outp > 0:
K[inp,outp-1] = 1.
sys = sys.feedback(np.array(K), sign=1)
# now trim
Ytrim = np.zeros((len(outputv), sys.outputs))
Utrim = np.zeros((sys.inputs, len(inputv)))
for i,u in enumerate(inputv):
Utrim[u-1,i] = 1.
for i,y in enumerate(outputv):
Ytrim[i,y-1] = 1.
return Ytrim * sys * Utrim
|
52,877 | def test_import_export_history_hidden_false_with_hidden_dataset():
app = _mock_app()
u, h, d1, d2, j = _setup_simple_cat_job(app)
d2.visible = False
app.model.session.flush()
imported_history = _import_export_history(app, h, export_files="copy", include_hidden=False)
assert d2.dataset.get_size() > 0
assert imported_history.datasets[-1].get_size() == 0
| def test_import_export_history_hidden_false_with_hidden_dataset():
app = _mock_app()
u, h, d1, d2, j = _setup_simple_cat_job(app)
d2.visible = False
app.model.session.flush()
imported_history = _import_export_history(app, h, export_files="copy", include_hidden=False)
assert d1.dataset.get_size() == imported_history.datasets[0].get_size()
assert imported_history.datasets[1].get_size() == 0
|
45,137 | def flag_is_enabled(
flag_name: str,
default=False,
client: FeatureFlagClient = None,
**conditions: Optional[Any]
):
"""
Check if a feature flag is enabled.
This function always returns False if the setting
PREFECT_CLOUD_ENABLE_FEATURE_FLAGGING is false.
NOTE: If `flag_is_enabled()` is called for a feature that has conditions,
but the caller does not give any conditions, the current state of the flag
is returned.
Args:
flag_name: the name of the feature flag
default: the default return value to use if no feature flag with
the given name exists. Defaults to False.
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
conditions: keyword arguments, e.g. is_admin=True, to check
against any Conditions on the flag
Returns:
bool: whether the flag is enabled
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return False
if not client:
client = get_features_client()
return client.is_enabled(flag_name, default=default, **conditions)
| def flag_is_enabled(
flag_name: str,
default=False,
client: FeatureFlagClient = None,
**conditions: Optional[Any]
):
"""
Check if a feature flag is enabled.
This function always returns False if the setting
PREFECT_CLOUD_ENABLE_FEATURE_FLAGGING is false.
NOTE: If `flag_is_enabled()` is called for a feature that has conditions,
but the caller does not give any conditions, the current state of the flag
is returned.
Args:
flag_name: the name of the feature flag
default: the default return value to use if no feature flag with
the given name exists. Defaults to False.
client: The FeatureFlagClient instance to use. Defaults to a client
configured to look at an in-memory feature store.
conditions: keyword arguments, e.g. `is_admin=True`, to check
against any Conditions on the flag
Returns:
bool: whether the flag is enabled
"""
if not settings.PREFECT_FEATURE_FLAGGING_ENABLED.value():
return False
if not client:
client = get_features_client()
return client.is_enabled(flag_name, default=default, **conditions)
|
21,180 | def forward(model, docs_moves: Tuple[List[Doc], TransitionSystem], is_train: bool):
nF = model.get_dim("nF")
tok2vec = model.get_ref("tok2vec")
lower_pad = model.get_param("lower_pad")
lower_W = model.get_param("lower_W")
lower_b = model.get_param("lower_b")
upper_W = model.get_param("upper_W")
upper_b = model.get_param("upper_b")
nH = model.get_dim("nH")
nP = model.get_dim("nP")
nO = model.get_dim("nO")
nI = model.get_dim("nI")
beam_width = model.attrs.get("beam_width", 1)
beam_density = model.attrs.get("beam_density", 0.0)
ops = model.ops
docs, moves = docs_moves
states = moves.init_batch(docs)
tokvecs, backprop_tok2vec = tok2vec(docs, is_train)
tokvecs = model.ops.xp.vstack((tokvecs, lower_pad))
feats, backprop_feats = _forward_precomputable_affine(model, tokvecs, is_train)
all_ids = []
all_which = []
all_statevecs = []
all_scores = []
if beam_width == 1:
batch = GreedyBatch(moves, states, None)
else:
batch = _beam_utils.BeamBatch(
moves, states, None, beam_width, density=beam_density
)
seen_mask = _get_seen_mask(model)
arange = model.ops.xp.arange(nF)
while not batch.is_done:
ids = numpy.zeros((len(batch.get_unfinished_states()), nF), dtype="i")
for i, state in enumerate(batch.get_unfinished_states()):
state.set_context_tokens(ids, i, nF)
# Sum the state features, add the bias and apply the activation (maxout)
# to create the state vectors.
preacts2f = feats[ids, arange].sum(axis=1) # type: ignore
preacts2f += lower_b
preacts = model.ops.reshape3f(preacts2f, preacts2f.shape[0], nH, nP)
assert preacts.shape[0] == len(batch.get_unfinished_states()), preacts.shape
statevecs, which = ops.maxout(preacts)
# Multiply the state-vector by the scores weights and add the bias,
# to get the logits.
scores = model.ops.gemm(statevecs, upper_W, trans2=True)
scores += upper_b
scores[:, seen_mask] = model.ops.xp.nanmin(scores)
# Transition the states, filtering out any that are finished.
cpu_scores = model.ops.to_numpy(scores)
batch.advance(cpu_scores)
all_scores.append(scores)
if is_train:
# Remember intermediate results for the backprop.
all_ids.append(ids)
all_statevecs.append(statevecs)
all_which.append(which)
def backprop_parser(d_states_d_scores):
d_tokvecs = model.ops.alloc2f(tokvecs.shape[0], tokvecs.shape[1])
ids = model.ops.xp.vstack(all_ids)
which = ops.xp.vstack(all_which)
statevecs = model.ops.xp.vstack(all_statevecs)
_, d_scores = d_states_d_scores
if model.attrs.get("unseen_classes"):
# If we have a negative gradient (i.e. the probability should
# increase) on any classes we filtered out as unseen, mark
# them as seen.
for clas in set(model.attrs["unseen_classes"]):
if (d_scores[:, clas] < 0).any():
model.attrs["unseen_classes"].remove(clas)
d_scores *= seen_mask == False
# Calculate the gradients for the parameters of the upper layer.
# The weight gemm is (nS, nO) @ (nS, nH).T
model.inc_grad("upper_b", d_scores.sum(axis=0))
model.inc_grad("upper_W", model.ops.gemm(d_scores, statevecs, trans1=True))
# Now calculate d_statevecs, by backproping through the upper linear layer.
# This gemm is (nS, nO) @ (nO, nH)
d_statevecs = model.ops.gemm(d_scores, upper_W)
# Backprop through the maxout activation
d_preacts = model.ops.backprop_maxout(d_statevecs, which, nP)
d_preacts2f = model.ops.reshape2f(d_preacts, d_preacts.shape[0], nH * nP)
model.inc_grad("lower_b", d_preacts2f.sum(axis=0))
# We don't need to backprop the summation, because we pass back the IDs instead
d_state_features = backprop_feats((d_preacts2f, ids))
d_tokvecs = model.ops.alloc2f(tokvecs.shape[0], tokvecs.shape[1])
model.ops.scatter_add(d_tokvecs, ids, d_state_features)
model.inc_grad("lower_pad", d_tokvecs[-1])
return (backprop_tok2vec(d_tokvecs[:-1]), None)
return (list(batch), all_scores), backprop_parser
| def forward(model, docs_moves: Tuple[List[Doc], TransitionSystem], is_train: bool):
nF = model.get_dim("nF")
tok2vec = model.get_ref("tok2vec")
lower_pad = model.get_param("lower_pad")
lower_W = model.get_param("lower_W")
lower_b = model.get_param("lower_b")
upper_W = model.get_param("upper_W")
upper_b = model.get_param("upper_b")
nH = model.get_dim("nH")
nP = model.get_dim("nP")
nO = model.get_dim("nO")
nI = model.get_dim("nI")
beam_width = model.attrs.get("beam_width", 1)
beam_density = model.attrs.get("beam_density", 0.0)
ops = model.ops
docs, moves = docs_moves
states = moves.init_batch(docs)
tokvecs, backprop_tok2vec = tok2vec(docs, is_train)
tokvecs = model.ops.xp.vstack((tokvecs, lower_pad))
feats, backprop_feats = _forward_precomputable_affine(model, tokvecs, is_train)
all_ids = []
all_which = []
all_statevecs = []
all_scores = []
if beam_width == 1:
batch = GreedyBatch(moves, states, None)
else:
batch = _beam_utils.BeamBatch(
moves, states, None, width=beam_width, density=beam_density
)
seen_mask = _get_seen_mask(model)
arange = model.ops.xp.arange(nF)
while not batch.is_done:
ids = numpy.zeros((len(batch.get_unfinished_states()), nF), dtype="i")
for i, state in enumerate(batch.get_unfinished_states()):
state.set_context_tokens(ids, i, nF)
# Sum the state features, add the bias and apply the activation (maxout)
# to create the state vectors.
preacts2f = feats[ids, arange].sum(axis=1) # type: ignore
preacts2f += lower_b
preacts = model.ops.reshape3f(preacts2f, preacts2f.shape[0], nH, nP)
assert preacts.shape[0] == len(batch.get_unfinished_states()), preacts.shape
statevecs, which = ops.maxout(preacts)
# Multiply the state-vector by the scores weights and add the bias,
# to get the logits.
scores = model.ops.gemm(statevecs, upper_W, trans2=True)
scores += upper_b
scores[:, seen_mask] = model.ops.xp.nanmin(scores)
# Transition the states, filtering out any that are finished.
cpu_scores = model.ops.to_numpy(scores)
batch.advance(cpu_scores)
all_scores.append(scores)
if is_train:
# Remember intermediate results for the backprop.
all_ids.append(ids)
all_statevecs.append(statevecs)
all_which.append(which)
def backprop_parser(d_states_d_scores):
d_tokvecs = model.ops.alloc2f(tokvecs.shape[0], tokvecs.shape[1])
ids = model.ops.xp.vstack(all_ids)
which = ops.xp.vstack(all_which)
statevecs = model.ops.xp.vstack(all_statevecs)
_, d_scores = d_states_d_scores
if model.attrs.get("unseen_classes"):
# If we have a negative gradient (i.e. the probability should
# increase) on any classes we filtered out as unseen, mark
# them as seen.
for clas in set(model.attrs["unseen_classes"]):
if (d_scores[:, clas] < 0).any():
model.attrs["unseen_classes"].remove(clas)
d_scores *= seen_mask == False
# Calculate the gradients for the parameters of the upper layer.
# The weight gemm is (nS, nO) @ (nS, nH).T
model.inc_grad("upper_b", d_scores.sum(axis=0))
model.inc_grad("upper_W", model.ops.gemm(d_scores, statevecs, trans1=True))
# Now calculate d_statevecs, by backproping through the upper linear layer.
# This gemm is (nS, nO) @ (nO, nH)
d_statevecs = model.ops.gemm(d_scores, upper_W)
# Backprop through the maxout activation
d_preacts = model.ops.backprop_maxout(d_statevecs, which, nP)
d_preacts2f = model.ops.reshape2f(d_preacts, d_preacts.shape[0], nH * nP)
model.inc_grad("lower_b", d_preacts2f.sum(axis=0))
# We don't need to backprop the summation, because we pass back the IDs instead
d_state_features = backprop_feats((d_preacts2f, ids))
d_tokvecs = model.ops.alloc2f(tokvecs.shape[0], tokvecs.shape[1])
model.ops.scatter_add(d_tokvecs, ids, d_state_features)
model.inc_grad("lower_pad", d_tokvecs[-1])
return (backprop_tok2vec(d_tokvecs[:-1]), None)
return (list(batch), all_scores), backprop_parser
|
5,339 | def installed(name, channel=None):
"""
Ensure that the named snap package is installed
name
The snap package
channel
Optional. The channel to install the package from.
"""
ret = {"name": name, "changes": {}, "pchanges": {}, "result": None, "comment": ""}
old = __salt__["snap.versions_installed"](name)
if not old:
if __opts__["test"]:
ret["comment"] = 'Package "{0}" would have been installed'.format(name)
ret["pchanges"]["new"] = name
ret["pchanges"]["old"] = None
ret["result"] = None
return ret
install = __salt__["snap.install"](name, channel=channel)
if install["result"]:
ret["comment"] = 'Package "{0}" was installed'.format(name)
ret["changes"]["new"] = name
ret["changes"]["old"] = None
ret["result"] = True
return ret
ret["comment"] = 'Package "{0}" failed to install'.format(name)
ret["comment"] += "\noutput:\n" + install["output"]
ret["result"] = False
return ret
# Currently snap always returns only one line?
old_channel = old[0]["tracking"]
if old_channel != channel and channel is not None:
if __opts__["test"]:
ret[
"comment"
] = 'Package "{0}" would have been switched to channel {1}'.format(
name, channel
)
ret["pchanges"]["old_channel"] = old_channel
ret["pchanges"]["new_channel"] = channel
ret["result"] = None
return ret
refresh = __salt__["snap.install"](name, channel=channel, refresh=True)
if refresh["result"]:
ret["comment"] = 'Package "{0}" was switched to channel {1}'.format(
name, channel
)
ret["pchanges"]["old_channel"] = old_channel
ret["pchanges"]["new_channel"] = channel
ret["result"] = True
return ret
ret["comment"] = 'Failed to switch Package "{0}" to channel {1}'.format(
name, channel
)
ret["comment"] += "\noutput:\n" + install["output"]
ret["result"] = False
return ret
ret["comment"] = 'Package "{0}" is already installed'.format(name)
if __opts__["test"]:
ret["result"] = None
return ret
ret["result"] = True
return ret
| def installed(name, channel=None):
"""
Ensure that the named snap package is installed
name
The snap package
channel
Optional. The channel to install the package from.
"""
ret = {"name": name, "changes": {}, "pchanges": {}, "result": None, "comment": ""}
old = __salt__["snap.versions_installed"](name)
if not old:
if __opts__["test"]:
ret["comment"] = 'Package "{0}" would have been installed'.format(name)
ret["pchanges"]["new"] = name
ret["pchanges"]["old"] = None
ret["result"] = None
return ret
install = __salt__["snap.install"](name, channel=channel)
if install["result"]:
ret["comment"] = 'Package "{0}" was installed'.format(name)
ret["changes"]["new"] = name
ret["changes"]["old"] = None
ret["result"] = True
return ret
ret["comment"] = 'Package "{0}" failed to install'.format(name)
ret["comment"] += "\noutput:\n" + install["output"]
ret["result"] = False
return ret
# Currently snap always returns only one line?
old_channel = old[0]["tracking"]
if old_channel != channel and channel is not None:
if __opts__["test"]:
ret[
"comment"
] = 'Package "{0}" would have been switched to channel {1}'.format(
name, channel
)
ret["pchanges"]["old_channel"] = old_channel
ret["pchanges"]["new_channel"] = channel
ret["result"] = None
return ret
refresh = __salt__["snap.install"](name, channel=channel, refresh=True)
if refresh["result"]:
ret["comment"] = 'Package "{0}" was switched to channel {1}'.format(
name, channel
)
ret["pchanges"]["old_channel"] = old_channel
ret["pchanges"]["new_channel"] = channel
ret["result"] = True
return ret
ret["comment"] = 'Failed to switch Package "{0}" to channel {1}'.format(
name, channel
)
ret["comment"] += "\noutput:\n" + refresh["output"]
ret["result"] = False
return ret
ret["comment"] = 'Package "{0}" is already installed'.format(name)
if __opts__["test"]:
ret["result"] = None
return ret
ret["result"] = True
return ret
|
57,697 | def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# get the service API url
base_url = API_PATH
commands = {
'url': url_command,
'openphish-reload': reload_command,
'openphish-status': status_command,
}
hours_to_refresh = demisto.params().get('fetchIntervalHours', '1')
try:
hours_to_refresh = float(hours_to_refresh)
use_ssl = not demisto.params().get('insecure', False)
use_proxy = demisto.params().get('proxy', False)
client = Client(
url=base_url,
use_ssl=use_ssl,
use_proxy=use_proxy,
fetch_interval_hours=hours_to_refresh)
command = demisto.command()
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif command in commands:
return_results(commands[command](client, **demisto.args()))
# Log exceptions
except ValueError:
return_error('Invalid parameter was given as database refresh interval.')
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)} \n '
f'tracback: {traceback.format_exc()}')
| def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
# get the service API url
base_url = API_PATH
commands = {
'url': url_command,
'openphish-reload': reload_command,
'openphish-status': status_command,
}
params = demisto.params()
hours_to_refresh = params.get('fetchIntervalHours', '1')
try:
hours_to_refresh = float(hours_to_refresh)
use_ssl = not demisto.params().get('insecure', False)
use_proxy = demisto.params().get('proxy', False)
client = Client(
url=base_url,
use_ssl=use_ssl,
use_proxy=use_proxy,
fetch_interval_hours=hours_to_refresh)
command = demisto.command()
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
return_results(result)
elif command in commands:
return_results(commands[command](client, **demisto.args()))
# Log exceptions
except ValueError:
return_error('Invalid parameter was given as database refresh interval.')
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)} \n '
f'tracback: {traceback.format_exc()}')
|
43,998 | def edges_to_wires(graph) -> Dict[Tuple, int]:
r"""Maps the edges of a graph to corresponding wires.
**Example**
>>> g = nx.complete_graph(4).to_directed()
>>> edges_to_wires(g)
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(1, 0): 3,
(1, 2): 4,
(1, 3): 5,
(2, 0): 6,
(2, 1): 7,
(2, 3): 8,
(3, 0): 9,
(3, 1): 10,
(3, 2): 11}
>>> g = rx.generators.directed_mesh_graph(4, [0,1,2,3])
>>> edges_to_wires(g)
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(1, 0): 3,
(1, 2): 4,
(1, 3): 5,
(2, 0): 6,
(2, 1): 7,
(2, 3): 8,
(3, 0): 9,
(3, 1): 10,
(3, 2): 11}
Args:
graph (nx.Graph or rx.Py(Di)Graph): the graph specifying possible edges
Returns:
Dict[Tuple, int]: a mapping from graph edges to wires
"""
if isinstance(graph, nx.Graph):
return {edge: i for i, edge in enumerate(graph.edges)}
elif isinstance(graph, (rx.PyGraph, rx.PyDiGraph)):
gnodes = graph.nodes()
return {
(gnodes.index(e[0]), gnodes.index(e[1])): i
for i, e in enumerate(sorted(graph.edge_list()))
}
raise ValueError(f"Input graph must be a nx.Graph, rx.Py(Di)Graph, got {type(graph).__name__}")
| def edges_to_wires(graph) -> Dict[Tuple, int]:
r"""Maps the edges of a graph to corresponding wires.
**Example**
>>> g = nx.complete_graph(4).to_directed()
>>> edges_to_wires(g)
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(1, 0): 3,
(1, 2): 4,
(1, 3): 5,
(2, 0): 6,
(2, 1): 7,
(2, 3): 8,
(3, 0): 9,
(3, 1): 10,
(3, 2): 11}
>>> g = rx.generators.directed_mesh_graph(4, [0,1,2,3])
>>> edges_to_wires(g)
{(0, 1): 0,
(0, 2): 1,
(0, 3): 2,
(1, 0): 3,
(1, 2): 4,
(1, 3): 5,
(2, 0): 6,
(2, 1): 7,
(2, 3): 8,
(3, 0): 9,
(3, 1): 10,
(3, 2): 11}
Args:
graph (nx.Graph or rx.Py(Di)Graph): the graph specifying possible edges
Returns:
Dict[Tuple, int]: a mapping from graph edges to wires
"""
if isinstance(graph, nx.Graph):
return {edge: i for i, edge in enumerate(graph.edges)}
if isinstance(graph, (rx.PyGraph, rx.PyDiGraph)):
gnodes = graph.nodes()
return {
(gnodes.index(e[0]), gnodes.index(e[1])): i
for i, e in enumerate(sorted(graph.edge_list()))
}
raise ValueError(f"Input graph must be a nx.Graph, rx.Py(Di)Graph, got {type(graph).__name__}")
|
23,246 | def babel_format_date(date: datetime, format: str, locale: str,
formatter: Callable = babel.dates.format_date) -> str:
if locale is None:
warnings.warn('The locale argument for babel_format_date() becomes required.',
RemovedInSphinx60Warning)
locale = 'en'
# Check if we have the tzinfo attribute. If not we cannot do any time
# related formats.
if not hasattr(date, 'tzinfo'):
formatter = babel.dates.format_date
try:
return formatter(date, format, locale=locale)
except (ValueError, babel.core.UnknownLocaleError):
# fallback to English
return formatter(date, format, locale='en')
except AttributeError:
logger.warning(__('Invalid date format. Quote the string by single quote '
'if you want to output it directly: %s'), format)
return format
| def babel_format_date(date: datetime, format: str, locale: str,
formatter: Callable = babel.dates.format_date) -> str:
if locale is None:
warnings.warn('The locale argument for babel_format_date() becomes required.',
RemovedInSphinx70Warning)
locale = 'en'
# Check if we have the tzinfo attribute. If not we cannot do any time
# related formats.
if not hasattr(date, 'tzinfo'):
formatter = babel.dates.format_date
try:
return formatter(date, format, locale=locale)
except (ValueError, babel.core.UnknownLocaleError):
# fallback to English
return formatter(date, format, locale='en')
except AttributeError:
logger.warning(__('Invalid date format. Quote the string by single quote '
'if you want to output it directly: %s'), format)
return format
|
31,367 | def is_there_private_packs_to_upload(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal. If there is one private pack that has a different
content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT
be skipped.
Args:
public_index_json (dict) : The public index file.
private_index_path : Path to where the private index is located.
Returns:
(bool) True is there is at least one private pack that should be upload.
False otherwise (i.e there are no private packs that should upload)
"""
logging.debug("Checking if there are private packs to upload")
with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file:
private_index_json = json.load(private_index_file)
if was_private_pack_updated(private_index_json, public_index_json):
logging.debug(f"There is at least one private pack that was updated, upload should not be skipped")
return True
return False
| def is_private_packs_updated(public_index_json, private_index_path):
""" Checks if there are private packs that should be uploaded.
The check compares the private index with the public one to verify if Content commit hash of each private pack in
those files (private and public index files) are equal. If there is one private pack that has a different
content commit hash, it tells us that this pack was updated and should be uploaded. So, an upload flow should NOT
be skipped.
Args:
public_index_json (dict) : The public index file.
private_index_path : Path to where the private index is located.
Returns:
(bool) True is there is at least one private pack that should be upload.
False otherwise (i.e there are no private packs that should upload)
"""
logging.debug("Checking if there are private packs to upload")
with open(os.path.join(private_index_path, f"{GCPConfig.INDEX_NAME}.json")) as private_index_file:
private_index_json = json.load(private_index_file)
if was_private_pack_updated(private_index_json, public_index_json):
logging.debug(f"There is at least one private pack that was updated, upload should not be skipped")
return True
return False
|
29,659 | def test_nprocs_deprecation():
runner = CliRunner()
with pytest.warns(UserWarning, match="nprocs"):
try:
runner.invoke(distributed.cli.dask_worker.main, ["--nprocs=2"])
except ValueError:
# didn't pass scheduler
pass
| def test_nprocs_deprecation():
runner = CliRunner()
with pytest.warns(FutureWarning, match="renamed to --num-workers"):
try:
runner.invoke(distributed.cli.dask_worker.main, ["--nprocs=2"])
except ValueError:
# didn't pass scheduler
pass
|
25,212 | def _get_namedtuple_fields(node: nodes.Call) -> str:
"""Get and return fields of a NamedTuple in code-as-a-string.
Because the fields are represented in their code form we can
extract a node from them later on.
"""
names = []
container = None
try:
container = next(node.args[1].infer())
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
# We pass on IndexError as well try to infer 'field_names' from the keywords
except IndexError:
pass
if not container:
for keyword_node in node.keywords:
if keyword_node.arg == "field_names":
try:
container = next(keyword_node.value.infer())
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
break
if not isinstance(container, nodes.BaseContainer):
raise UseInferenceDefault
for elt in container.elts:
if isinstance(elt, nodes.Const):
names.append(elt.as_string())
continue
if not isinstance(elt, (nodes.List, nodes.Tuple)):
raise UseInferenceDefault
if len(elt.elts) != 2:
raise UseInferenceDefault
names.append(elt.elts[0].as_string())
if names:
field_names = f"({','.join(names)},)"
else:
field_names = ""
return field_names
| def _get_namedtuple_fields(node: nodes.Call) -> str:
"""Get and return fields of a NamedTuple in code-as-a-string.
Because the fields are represented in their code form we can
extract a node from them later on.
"""
names = []
container = None
try:
container = next(node.args[1].infer())
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
# We pass on IndexError as we'll try to infer 'field_names' from the keywords
except IndexError:
pass
if not container:
for keyword_node in node.keywords:
if keyword_node.arg == "field_names":
try:
container = next(keyword_node.value.infer())
except (InferenceError, StopIteration) as exc:
raise UseInferenceDefault from exc
break
if not isinstance(container, nodes.BaseContainer):
raise UseInferenceDefault
for elt in container.elts:
if isinstance(elt, nodes.Const):
names.append(elt.as_string())
continue
if not isinstance(elt, (nodes.List, nodes.Tuple)):
raise UseInferenceDefault
if len(elt.elts) != 2:
raise UseInferenceDefault
names.append(elt.elts[0].as_string())
if names:
field_names = f"({','.join(names)},)"
else:
field_names = ""
return field_names
|
35,911 | def validate_serenity_proposer_signature(state: BeaconState,
block: BaseBeaconBlock,
beacon_chain_shard_number: int,
epoch_length: int) -> None:
block_without_signature_root = block.block_without_signature_root
# TODO: Replace this root with tree hash root
proposal_root = ProposalSignedData(
state.slot,
beacon_chain_shard_number,
block_without_signature_root
).root
# Get the public key of proposer
beacon_proposer_index = get_beacon_proposer_index(state, state.slot, epoch_length)
proposer_pubkey = state.validator_registry[beacon_proposer_index].pubkey
is_valid_signature = bls.verify(
pubkey=proposer_pubkey,
message=proposal_root,
signature=block.signature,
domain=get_domain(state.fork_data, state.slot, SignatureDomain.DOMAIN_PROPOSAL)
)
if not is_valid_signature:
raise ValidationError("Invalid Proposer Signature on block")
| def validate_serenity_proposer_signature(state: BeaconState,
block: BaseBeaconBlock,
beacon_chain_shard_number: int,
epoch_length: int) -> None:
block_without_signature_root = block.block_without_signature_root
# TODO: Replace this root with tree hash root
proposal_root = ProposalSignedData(
state.slot,
beacon_chain_shard_number,
block_without_signature_root
).root
# Get the public key of proposer
beacon_proposer_index = get_beacon_proposer_index(state, state.slot, epoch_length)
proposer_pubkey = state.validator_registry[beacon_proposer_index].pubkey
is_valid_signature = bls.verify(
pubkey=proposer_pubkey,
message=proposal_root,
signature=block.signature,
domain=get_domain(state.fork_data, state.slot, SignatureDomain.DOMAIN_PROPOSAL),
)
if not is_valid_signature:
raise ValidationError("Invalid Proposer Signature on block")
|
39,662 | def main():
module = ForemanEntityApypieAnsibleModule(
argument_spec=dict(
name=dict(required=True),
operatingsystems=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
(entity_dict, state) = module.parse_params()
module.connect()
if 'operatingsystems' in entity_dict:
entity_dict['operatingsystems'] = module.find_resources('operatingsystems', entity_dict['operatingsystems'], thin=True)
entity = module.find_resource_by_name('architectures', name=entity_dict['name'], failsafe=True)
changed = module.ensure_resource_state('architectures', entity_dict, entity, state, name_map)
module.exit_json(changed=changed)
| def main():
module = ForemanEntityApypieAnsibleModule(
argument_spec=dict(
name=dict(required=True),
operatingsystems=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
),
supports_check_mode=True,
)
(entity_dict, state) = module.parse_params()
module.connect()
if 'operatingsystems' in entity_dict:
entity_dict['operatingsystems'] = module.find_resources('operatingsystems', entity_dict['operatingsystems'], thin=True)
entity = module.find_resource_by_name('architectures', name=entity_dict['name'], failsafe=True)
changed = module.ensure_resource_state('architectures', entity_dict, entity, module.state, name_map)
module.exit_json(changed=changed)
|
59,153 | def test_qc_metrics():
adata = AnnData(X=sparse.csr_matrix(np.random.binomial(100, 0.005, (1000, 1000))))
adata.var["mito"] = np.concatenate(
(np.ones(100, dtype=bool), np.zeros(900, dtype=bool))
)
adata.var["negative"] = False
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito", "negative"], inplace=True)
assert (adata.obs["n_genes_by_counts"] < adata.shape[1]).all()
assert (
adata.obs["n_genes_by_counts"] >= adata.obs["log1p_n_genes_by_counts"]
).all()
assert (adata.obs["total_counts"] == np.ravel(adata.X.sum(axis=1))).all()
assert (adata.obs["total_counts"] >= adata.obs["log1p_total_counts"]).all()
assert (
adata.obs["total_counts_mito"] >= adata.obs["log1p_total_counts_mito"]
).all()
assert (adata.obs["total_counts_negative"] == 0).all()
assert (
adata.obs["pct_counts_in_top_50_genes"]
<= adata.obs["pct_counts_in_top_100_genes"]
).all()
for col in filter(lambda x: "negative" not in x, adata.obs.columns):
assert (adata.obs[col] >= 0).all() # Values should be positive or zero
assert (adata.obs[col] != 0).any().all() # Nothing should be all zeros
if col.startswith("pct_counts_in_top"):
assert (adata.obs[col] <= 100).all()
assert (adata.obs[col] >= 0).all()
for col in adata.var.columns:
assert (adata.var[col] >= 0).all()
assert (adata.var["mean_counts"] < np.ravel(adata.X.max(axis=0).todense())).all()
assert (adata.var["mean_counts"] >= adata.var["log1p_mean_counts"]).all()
assert (adata.var["total_counts"] >= adata.var["log1p_total_counts"]).all()
# Should return the same thing if run again
old_obs, old_var = adata.obs.copy(), adata.var.copy()
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito", "negative"], inplace=True)
assert set(adata.obs.columns) == set(old_obs.columns)
assert set(adata.var.columns) == set(old_var.columns)
for col in adata.obs:
assert np.allclose(adata.obs[col], old_obs[col])
for col in adata.var:
assert np.allclose(adata.var[col], old_var[col])
# with log1p=False
adata = AnnData(X=sparse.csr_matrix(np.random.binomial(100, 0.005, (1000, 1000))))
adata.var["mito"] = np.concatenate(
(np.ones(100, dtype=bool), np.zeros(900, dtype=bool))
)
adata.var["negative"] = False
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito", "negative"], log1p=False, inplace=True)
assert "log1p_total_counts" not in adata.obs.keys()
| def test_qc_metrics():
adata = AnnData(X=sparse.csr_matrix(np.random.binomial(100, 0.005, (1000, 1000))))
adata.var["mito"] = np.concatenate(
(np.ones(100, dtype=bool), np.zeros(900, dtype=bool))
)
adata.var["negative"] = False
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito", "negative"], inplace=True)
assert (adata.obs["n_genes_by_counts"] < adata.shape[1]).all()
assert (
adata.obs["n_genes_by_counts"] >= adata.obs["log1p_n_genes_by_counts"]
).all()
assert (adata.obs["total_counts"] == np.ravel(adata.X.sum(axis=1))).all()
assert (adata.obs["total_counts"] >= adata.obs["log1p_total_counts"]).all()
assert (
adata.obs["total_counts_mito"] >= adata.obs["log1p_total_counts_mito"]
).all()
assert (adata.obs["total_counts_negative"] == 0).all()
assert (
adata.obs["pct_counts_in_top_50_genes"]
<= adata.obs["pct_counts_in_top_100_genes"]
).all()
for col in filter(lambda x: "negative" not in x, adata.obs.columns):
assert (adata.obs[col] >= 0).all() # Values should be positive or zero
assert (adata.obs[col] != 0).any().all() # Nothing should be all zeros
if col.startswith("pct_counts_in_top"):
assert (adata.obs[col] <= 100).all()
assert (adata.obs[col] >= 0).all()
for col in adata.var.columns:
assert (adata.var[col] >= 0).all()
assert (adata.var["mean_counts"] < np.ravel(adata.X.max(axis=0).todense())).all()
assert (adata.var["mean_counts"] >= adata.var["log1p_mean_counts"]).all()
assert (adata.var["total_counts"] >= adata.var["log1p_total_counts"]).all()
# Should return the same thing if run again
old_obs, old_var = adata.obs.copy(), adata.var.copy()
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito", "negative"], inplace=True)
assert set(adata.obs.columns) == set(old_obs.columns)
assert set(adata.var.columns) == set(old_var.columns)
for col in adata.obs:
assert np.allclose(adata.obs[col], old_obs[col])
for col in adata.var:
assert np.allclose(adata.var[col], old_var[col])
# with log1p=False
adata = AnnData(X=sparse.csr_matrix(np.random.binomial(100, 0.005, (1000, 1000))))
adata.var["mito"] = np.concatenate(
(np.ones(100, dtype=bool), np.zeros(900, dtype=bool))
)
adata.var["negative"] = False
sc.pp.calculate_qc_metrics(adata, qc_vars=["mito", "negative"], log1p=False, inplace=True)
assert not np.any(adata.obs.columns.str.startswith("log1p_"))
assert not np.any(adata.var.columns.str.startswith("log1p_"))
|
41,100 | def _get_real_path(path, frozen_with):
if getattr(sys, 'frozen', False):
if frozen_with == 'cx_freeze':
return os.path.join(sys.executable, path)
elif frozen_with == 'PyInstaller':
return os.path.join(sys._MEIPASS, path)
else:
return os.path.abspath(path)
| def _get_real_path(path, frozen_with):
if getattr(sys, 'frozen', False):
return os.path.join(getattr(sys, '_MEIPASS', sys.executable), path)
else:
return os.path.abspath(path)
|
40,023 | def binary_encoding(v: Variable, upper_bound: int) -> BinaryQuadraticModel:
"""Return a binary quadratic model encoding an integer.
Args:
v: The integer variable label.
upper_bound: The upper bound on the integer value (inclusive).
Returns:
A binary quadratic model. The variables in the BQM will be labelled
with tuples of length two or three. The first value of the tuple will
be the variable label ``v`` provided. The second value will the the
coefficient in the integer encoding. One of the variables will
have a third value in the tuple, ``'msb'``. This is the variable
occupying the position of the most significant bit. Though it may
actually be a smaller number in order to enforce the ``upper_bound``.
Example:
>>> bqm = dimod.generators.binary_encoding('i', 6)
>>> bqm
BinaryQuadraticModel({('i', 1): 1.0, ('i', 2): 2.0, ('i', 3, 'msb'): 3.0}, {}, 0.0, 'BINARY')
We can use a sample to restore the original integer value.
>>> sample = {('i', 1): 1, ('i', 2): 0, ('i', 3, 'msb'): 1}
>>> bqm.energy(sample)
4.0
>>> sum(v[1]*val for v, val in sample.items()) + bqm.offset
4.0
If you wish to encode integers with a lower bound, you can use the
binary quadratic model's :attr:`~BinaryQuadraticModel.offset` attribute.
>>> i = dimod.generators.binary_encoding('i', 10) + 5 # integer in [5, 15]
References:
[1]: Sahar Karimi, Pooya Ronagh (2017), Practical Integer-to-Binary
Mapping for Quantum Annealers. arxiv.org:1706.01945.
"""
# note: the paper above also gives a nice way to handle bounded coefficients
# if we want to do that in the future.
if upper_bound <= 1:
raise ValueError("upper_bound must be a greater than or equal to 1, "
f"received {upper_bound}")
upper_bound = math.floor(upper_bound)
bqm = BinaryQuadraticModel(Vartype.BINARY)
max_pow = math.floor(math.log2(upper_bound))
for exp in range(max_pow):
val = 1 << exp
bqm.set_linear((v, val), val)
else:
val = upper_bound - ((1 << max_pow) - 1)
bqm.set_linear((v, val, 'msb'), val)
return bqm
| def binary_encoding(v: Variable, upper_bound: int) -> BinaryQuadraticModel:
"""Return a binary quadratic model encoding an integer.
Args:
v: The integer variable label.
upper_bound: The upper bound on the integer value (inclusive).
Returns:
A binary quadratic model. The variables in the BQM will be labelled
with tuples of length two or three. The first value of the tuple will
be the variable label ``v`` provided. The second value will be the
coefficient in the integer encoding. One of the variables will
have a third value in the tuple, ``'msb'``. This is the variable
occupying the position of the most significant bit. Though it may
actually be a smaller number in order to enforce the ``upper_bound``.
Example:
>>> bqm = dimod.generators.binary_encoding('i', 6)
>>> bqm
BinaryQuadraticModel({('i', 1): 1.0, ('i', 2): 2.0, ('i', 3, 'msb'): 3.0}, {}, 0.0, 'BINARY')
We can use a sample to restore the original integer value.
>>> sample = {('i', 1): 1, ('i', 2): 0, ('i', 3, 'msb'): 1}
>>> bqm.energy(sample)
4.0
>>> sum(v[1]*val for v, val in sample.items()) + bqm.offset
4.0
If you wish to encode integers with a lower bound, you can use the
binary quadratic model's :attr:`~BinaryQuadraticModel.offset` attribute.
>>> i = dimod.generators.binary_encoding('i', 10) + 5 # integer in [5, 15]
References:
[1]: Sahar Karimi, Pooya Ronagh (2017), Practical Integer-to-Binary
Mapping for Quantum Annealers. arxiv.org:1706.01945.
"""
# note: the paper above also gives a nice way to handle bounded coefficients
# if we want to do that in the future.
if upper_bound <= 1:
raise ValueError("upper_bound must be a greater than or equal to 1, "
f"received {upper_bound}")
upper_bound = math.floor(upper_bound)
bqm = BinaryQuadraticModel(Vartype.BINARY)
max_pow = math.floor(math.log2(upper_bound))
for exp in range(max_pow):
val = 1 << exp
bqm.set_linear((v, val), val)
else:
val = upper_bound - ((1 << max_pow) - 1)
bqm.set_linear((v, val, 'msb'), val)
return bqm
|
24,655 | def null_point_find(
x_arr=None,
y_arr=None,
z_arr=None,
u_arr=None,
v_arr=None,
w_arr=None,
maxiter=500,
err=1e-10,
):
r"""
Returns an array of ``~plasmapy.analysis.nullpoint.NullPoint` object, representing
the null points of the given vector space.
.. note::
Please note that this functionality is still under development
and the API may change in future releases.
Parameters
----------
x_arr: array_like
The array representing the coordinates in the x-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
y_arr: array_like
The array representing the coordinates in the y-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
z_arr: array_like
The array representing the coordinates in the z-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
u_arr: array_like
A 3D array containing the x-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
v_arr: array_like
A 3D array containing the y-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
w_arr: array_like
A 3D array containing the z-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
maxiter: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occurred
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of `~plasmapy.analysis.nullpoint.NullPoint` objects
representing the null points of the given vector space.
Notes
-----
This method is described by :cite:t:`haynes:2007`.
"""
# Constructing the vspace
vspace = _vector_space(
x_arr,
y_arr,
z_arr,
None,
None,
None,
u_arr,
v_arr,
w_arr,
None,
None,
)
return _vspace_iterator(vspace, maxiter, err)
| def null_point_find(
x_arr=None,
y_arr=None,
z_arr=None,
u_arr=None,
v_arr=None,
w_arr=None,
maxiter=500,
err=1e-10,
):
r"""
Returns an array of `~plasmapy.analysis.nullpoint.NullPoint` object, representing
the null points of the given vector space.
.. note::
Please note that this functionality is still under development
and the API may change in future releases.
Parameters
----------
x_arr: array_like
The array representing the coordinates in the x-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
y_arr: array_like
The array representing the coordinates in the y-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
z_arr: array_like
The array representing the coordinates in the z-dimension.
If not given, then range values are used to construct a
uniform array on that interval.
u_arr: array_like
A 3D array containing the x-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
v_arr: array_like
A 3D array containing the y-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
w_arr: array_like
A 3D array containing the z-component of the vector values for the vector
space. If not given, the vector values are generated over the vector space
using the function func.
maxiter: int
The maximum iterations of the Newton-Raphson method.
The default value is 500.
err: float
The threshold/error that determines if convergence has occurred
using the Newton-Raphson method.
The default value is ``1e-10``.
Returns
-------
array_like of `~plasmapy.analysis.nullpoint.NullPoint`
An array of `~plasmapy.analysis.nullpoint.NullPoint` objects
representing the null points of the given vector space.
Notes
-----
This method is described by :cite:t:`haynes:2007`.
"""
# Constructing the vspace
vspace = _vector_space(
x_arr,
y_arr,
z_arr,
None,
None,
None,
u_arr,
v_arr,
w_arr,
None,
None,
)
return _vspace_iterator(vspace, maxiter, err)
|
58,096 | def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
demisto.debug("Enter MAIN method")
args = demisto.args()
command = demisto.command()
username = demisto.params().get('auth', {}).get('identifier')
password = demisto.params().get('auth', {}).get('password')
account_settings_id = demisto.params().get('account_settings_id')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.debug(f'Command being called is {demisto.command()}')
try:
client = Client(
username=username,
password=password,
account_settings_id=account_settings_id,
verify=verify_certificate,
proxy=proxy)
if command == 'test-module':
return_results(test_module(client))
elif command == 'ip':
return_results(ip_lookup(client, args))
elif command == 'url':
return_results(url_lookup(client, args))
elif command == 'domain':
return_results(domain_lookup(client, args))
elif command == 'iboss-add-entity-to-block-list':
return_results(add_entity_to_block_list_command(client, args))
elif command == 'iboss-remove-entity-from-block-list':
return_results(remove_entity_from_block_list_command(client, args))
elif command == 'iboss-add-entity-to-allow-list':
return_results(add_entity_to_allow_list_command(client, args))
elif command == 'iboss-remove-entity-from-allow-list':
return_results(remove_entity_from_allow_list_command(client, args))
else:
raise NotImplementedError(f"Command {command} is not implemented.")
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
| def main() -> None:
"""main function, parses params and runs command functions
:return:
:rtype:
"""
demisto.debug("Enter MAIN method")
args = demisto.args()
params = demisto.params()
command = demisto.command()
username = params.get('auth', {}).get('identifier')
password = params.get('auth', {}).get('password')
account_settings_id = params.get('account_settings_id')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
demisto.debug(f'Command being called is {command}')
try:
client = Client(
username=username,
password=password,
account_settings_id=account_settings_id,
verify=verify_certificate,
proxy=proxy)
if command == 'test-module':
return_results(test_module(client))
elif command == 'ip':
return_results(ip_lookup(client, args))
elif command == 'url':
return_results(url_lookup(client, args))
elif command == 'domain':
return_results(domain_lookup(client, args))
elif command == 'iboss-add-entity-to-block-list':
return_results(add_entity_to_block_list_command(client, args))
elif command == 'iboss-remove-entity-from-block-list':
return_results(remove_entity_from_block_list_command(client, args))
elif command == 'iboss-add-entity-to-allow-list':
return_results(add_entity_to_allow_list_command(client, args))
elif command == 'iboss-remove-entity-from-allow-list':
return_results(remove_entity_from_allow_list_command(client, args))
else:
raise NotImplementedError(f"Command {command} is not implemented.")
# Log exceptions and return errors
except Exception as e:
demisto.error(traceback.format_exc()) # print the traceback
return_error(f'Failed to execute {command} command.\nError:\n{str(e)}')
|
21,205 | def test_requires_dists_none(pygoogleearth_zip_sdist):
# type: (str) -> None
assert [] == list(requires_dists(pygoogleearth_zip_sdist))
with example_distribution("MarkupSafe-1.0-cp27-cp27mu-linux_x86_64.whl") as (wheel_path, dist):
assert [] == list(requires_dists(wheel_path))
assert [] == list(requires_dists(dist))
# This tests a strange case detailed here:
# https://github.com/pantsbuild/pex/issues/1201#issuecomment-791715585
with downloaded_sdist("et-xmlfile==1.0.1") as sdist, warnings.catch_warnings(
record=True
) as events:
assert [] == list(requires_dists(sdist))
assert len(events) == 1
warning = events[0]
assert PEXWarning == warning.category
assert (
dedent(
"""\
Ignoring 1 `Requires` field in {sdist} metadata:
1.) Requires: python (>=2.6.0)
You may have issues using using the 'et_xmlfile' distribution as a result.
More information on this workaround can be found here:
https://github.com/pantsbuild/pex/issues/1201#issuecomment-791715585
"""
).format(sdist=sdist)
== str(warning.message)
)
| def test_requires_dists_none(pygoogleearth_zip_sdist):
# type: (str) -> None
assert [] == list(requires_dists(pygoogleearth_zip_sdist))
with example_distribution("MarkupSafe-1.0-cp27-cp27mu-linux_x86_64.whl") as (wheel_path, dist):
assert [] == list(requires_dists(wheel_path))
assert [] == list(requires_dists(dist))
# This tests a strange case detailed here:
# https://github.com/pantsbuild/pex/issues/1201#issuecomment-791715585
with downloaded_sdist("et-xmlfile==1.0.1") as sdist, warnings.catch_warnings(
record=True
) as events:
assert [] == list(requires_dists(sdist))
assert len(events) == 1
warning = events[0]
assert PEXWarning == warning.category
assert (
dedent(
"""\
Ignoring 1 `Requires` field in {sdist} metadata:
1.) Requires: python (>=2.6.0)
You may have issues using the 'et_xmlfile' distribution as a result.
More information on this workaround can be found here:
https://github.com/pantsbuild/pex/issues/1201#issuecomment-791715585
"""
).format(sdist=sdist)
== str(warning.message)
)
|
55,383 | def test_list_artifacts_with_absolute_path(ftp_mock):
artifact_root_path = "/experiment_id/run_id/"
repo = FTPArtifactRepository("ftp://test_ftp"+artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- file
# |- model
# |- model.pb
file_path = "/file"
file_name = os.path.basename(file_path)
dir_path = "/model"
dir_name = os.path.basename(dir_path)
file_size = 678
ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None])
ftp_mock.nlst = MagicMock(return_value=[file_path, dir_path])
ftp_mock.size = MagicMock(return_value=file_size)
artifacts = repo.list_artifacts(path=None)
ftp_mock.nlst.assert_called_once_with(artifact_root_path)
ftp_mock.size.assert_called_once_with(artifact_root_path + file_name)
assert len(artifacts) == 2
assert artifacts[0].path == file_name
assert artifacts[0].is_dir is False
assert artifacts[0].file_size == file_size
assert artifacts[1].path == dir_name
assert artifacts[1].is_dir is True
assert artifacts[1].file_size is None
| def test_list_artifacts_with_absolute_path(ftp_mock):
artifact_root_path = "/experiment_id/run_id/"
repo = FTPArtifactRepository("ftp://test_ftp"+artifact_root_path)
repo.get_ftp_client = MagicMock()
call_mock = MagicMock(return_value=ftp_mock)
repo.get_ftp_client.return_value = MagicMock(__enter__=call_mock)
# mocked file structure
# |- file
# |- model
# |- model.pb
file_path = "file"
dir_path = "model"
file_size = 678
ftp_mock.cwd = MagicMock(side_effect=[None, ftplib.error_perm, None])
ftp_mock.nlst = MagicMock(return_value=[
posixpath.join(artifact_root_path, file_path),
posixpath.join(artifact_root_path, dir_path)
])
ftp_mock.size = MagicMock(return_value=file_size)
artifacts = repo.list_artifacts(path=None)
ftp_mock.nlst.assert_called_once_with(artifact_root_path)
ftp_mock.size.assert_called_once_with(artifact_root_path + file_name)
assert len(artifacts) == 2
assert artifacts[0].path == file_name
assert artifacts[0].is_dir is False
assert artifacts[0].file_size == file_size
assert artifacts[1].path == dir_name
assert artifacts[1].is_dir is True
assert artifacts[1].file_size is None
|
44,807 | def _is_available_on_pypi(package, version=None):
"""
Returns True if the specified package version is available on PyPI.
"""
resp = requests.get("https://pypi.python.org/pypi/{}/json".format(package))
if not resp.ok:
return False
dist_files = resp.json()["releases"].get(version)
return (
dist_files is not None # specified version exists
and (len(dist_files) > 0) # at least one distribution file exists
and dist_files[0].get("yanked", False) # specified version is not yanked
)
| def _is_available_on_pypi(package, version):
"""
Returns True if the specified package version is available on PyPI.
"""
resp = requests.get("https://pypi.python.org/pypi/{}/json".format(package))
if not resp.ok:
return False
dist_files = resp.json()["releases"].get(version)
return (
dist_files is not None # specified version exists
and (len(dist_files) > 0) # at least one distribution file exists
and dist_files[0].get("yanked", False) # specified version is not yanked
)
|
50,572 | def _clip_gdf_with_mask(gdf, mask):
"""Clip geometry to the polygon/rectangle extent.
Clip an input GeoDataFrame to the polygon extent of the polygon
parameter.
Parameters
----------
gdf : GeoDataFrame, GeoSeries
Dataframe to clip.
mask : (Multi)Polygon, tuple
Reference polygon/rectangle for clipping.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a clipped subset of gdf
that intersects with polygon/rectangle.
"""
clipping_by_rectangle = isinstance(mask, tuple)
intersection_polygon = mask
if clipping_by_rectangle:
intersection_polygon = box(*mask)
gdf_sub = gdf.iloc[gdf.sindex.query(intersection_polygon, predicate="intersects")]
# For performance reasons points don't need to be intersected with poly
non_point_mask = gdf_sub.geom_type != "Point"
if not non_point_mask.any():
# only points, directly return
return gdf_sub
# Clip the data with the polygon
if isinstance(gdf_sub, GeoDataFrame):
clipped = gdf_sub.copy()
if clipping_by_rectangle:
clipped.loc[
non_point_mask, clipped._geometry_column_name
] = gdf_sub.geometry.values[non_point_mask].clip_by_rect(*mask)
else:
clipped.loc[
non_point_mask, clipped._geometry_column_name
] = gdf_sub.geometry.values[non_point_mask].intersection(mask)
else:
# GeoSeries
clipped = gdf_sub.copy()
if clipping_by_rectangle:
clipped[non_point_mask] = gdf_sub.values[non_point_mask].clip_by_rect(*mask)
else:
clipped[non_point_mask] = gdf_sub.values[non_point_mask].intersection(mask)
if clipping_by_rectangle:
# clip_by_rect might return empty geometry collections in edge cases
clipped = clipped[~clipped.is_empty]
return clipped
| def _clip_gdf_with_mask(gdf, mask):
"""Clip geometry to the polygon/rectangle extent.
Clip an input GeoDataFrame to the polygon extent of the polygon
parameter.
Parameters
----------
gdf : GeoDataFrame, GeoSeries
Dataframe to clip.
mask : (Multi)Polygon, tuple
Reference polygon/rectangle for clipping.
Returns
-------
GeoDataFrame
The returned GeoDataFrame is a clipped subset of gdf
that intersects with polygon/rectangle.
"""
clipping_by_rectangle = isinstance(mask, tuple)
if clipping_by_rectangle:
intersection_polygon = box(*mask)
else:
intersection_polygon = mask
gdf_sub = gdf.iloc[gdf.sindex.query(intersection_polygon, predicate="intersects")]
# For performance reasons points don't need to be intersected with poly
non_point_mask = gdf_sub.geom_type != "Point"
if not non_point_mask.any():
# only points, directly return
return gdf_sub
# Clip the data with the polygon
if isinstance(gdf_sub, GeoDataFrame):
clipped = gdf_sub.copy()
if clipping_by_rectangle:
clipped.loc[
non_point_mask, clipped._geometry_column_name
] = gdf_sub.geometry.values[non_point_mask].clip_by_rect(*mask)
else:
clipped.loc[
non_point_mask, clipped._geometry_column_name
] = gdf_sub.geometry.values[non_point_mask].intersection(mask)
else:
# GeoSeries
clipped = gdf_sub.copy()
if clipping_by_rectangle:
clipped[non_point_mask] = gdf_sub.values[non_point_mask].clip_by_rect(*mask)
else:
clipped[non_point_mask] = gdf_sub.values[non_point_mask].intersection(mask)
if clipping_by_rectangle:
# clip_by_rect might return empty geometry collections in edge cases
clipped = clipped[~clipped.is_empty]
return clipped
|
8,025 | def write_map(
filename,
m,
nest=False,
dtype=None,
fits_IDL=True,
coord=None,
partial=False,
column_names=None,
column_units=None,
extra_header=(),
overwrite=False,
):
"""Writes a healpix map into a healpix FITS file.
WARNING: starting from healpy 1.15.0, if you do not specify `dtype`,
the map will be written to disk with the same precision it is stored in memory.
Previously, by default `healpy` wrote maps in `float32`.
To reproduce the same behaviour of `healpy` 1.14.0 and below, set `dtype=np.float32`.
Parameters
----------
filename : str
the fits file name
m : array or sequence of 3 arrays
the map to write. Possibly a sequence of 3 maps of same size.
They will be considered as I, Q, U maps.
Supports masked maps, see the `ma` function.
nest : bool, optional
If True, ordering scheme is assumed to be NESTED, otherwise, RING. Default: RING.
The map ordering is not modified by this function, the input map array
should already be in the desired ordering (run `ud_grade` beforehand).
fits_IDL : bool, optional
If True, reshapes columns in rows of 1024, otherwise all the data will
go in one column. Default: True
coord : str
The coordinate system, typically 'E' for Ecliptic, 'G' for Galactic or 'C' for
Celestial (equatorial)
partial : bool, optional
If True, fits file is written as a partial-sky file with explicit indexing.
Otherwise, implicit indexing is used. Default: False.
column_names : str or list
Column name or list of column names, if None here the default column names based on
the number of columns:
1 : "TEMPERATURE",
2 : ["Q_POLARISATION", "U_POLARISATION"],
3 : ["TEMPERATURE", "Q_POLARISATION", "U_POLARISATION"],
6 : ["II", "IQ", "IU", "QQ", "QU", "UU"]
COLUMN_1, COLUMN_2... otherwise (FITS is 1-based)
column_units : str or list
Units for each column, or same units for all columns.
extra_header : list
Extra records to add to FITS header.
dtype: np.dtype or list of np.dtypes, optional
The datatype in which the columns will be stored. Will be converted
internally from the numpy datatype to the fits convention. If a list,
the length must correspond to the number of map arrays.
Default: use the data type of the input array(s)
(WARNING: this changed in 1.15.0, previous versions saved in float32
by default)
overwrite : bool, optional
If True, existing file is silently overwritten. Otherwise trying to write
an existing file raises an OSError (IOError for Python 2).
"""
if not hasattr(m, "__len__"):
raise TypeError("The map must be a sequence")
m = pixelfunc.ma_to_array(m)
if pixelfunc.maptype(m) == 0: # a single map is converted to a list
m = [m]
# check the dtype and convert it
if dtype is None:
dtype = [x.dtype for x in m]
logging.info("setting the output map dtype to %s", str(dtype))
try:
fitsformat = []
for curr_dtype in dtype:
fitsformat.append(getformat(curr_dtype))
except TypeError:
# dtype is not iterable
fitsformat = [getformat(dtype)] * len(m)
if column_names is None:
column_names = standard_column_names.get(
len(m), ["COLUMN_%d" % n for n in range(1, len(m) + 1)]
)
else:
assert len(column_names) == len(m), "Length column_names != number of maps"
if column_units is None or isinstance(column_units, str):
column_units = [column_units] * len(m)
# maps must have same length
assert len(set(map(len, m))) == 1, "Maps must have same length"
nside = pixelfunc.npix2nside(len(m[0]))
if nside < 0:
raise ValueError("Invalid healpix map : wrong number of pixel")
cols = []
if partial:
fits_IDL = False
mask = pixelfunc.mask_good(m[0])
pix = np.where(mask)[0]
if len(pix) == 0:
raise ValueError("Invalid healpix map : empty partial map")
m = [mm[mask] for mm in m]
ff = getformat(np.min_scalar_type(-pix.max()))
if ff is None:
ff = "I"
cols.append(pf.Column(name="PIXEL", format=ff, array=pix, unit=None))
for cn, cu, mm, curr_fitsformat in zip(column_names, column_units, m, fitsformat):
if len(mm) > 1024 and fits_IDL:
# I need an ndarray, for reshape:
mm2 = np.asarray(mm)
cols.append(
pf.Column(
name=cn,
format="1024%s" % curr_fitsformat,
array=mm2.reshape(mm2.size // 1024, 1024),
unit=cu,
)
)
else:
cols.append(
pf.Column(name=cn, format="%s" % curr_fitsformat, array=mm, unit=cu)
)
tbhdu = pf.BinTableHDU.from_columns(cols)
# add needed keywords
tbhdu.header["PIXTYPE"] = ("HEALPIX", "HEALPIX pixelisation")
if nest:
ordering = "NESTED"
else:
ordering = "RING"
tbhdu.header["ORDERING"] = (
ordering,
"Pixel ordering scheme, either RING or NESTED",
)
if coord:
tbhdu.header["COORDSYS"] = (
coord,
"Ecliptic, Galactic or Celestial (equatorial)",
)
tbhdu.header["EXTNAME"] = ("xtension", "name of this binary table extension")
tbhdu.header["NSIDE"] = (nside, "Resolution parameter of HEALPIX")
if not partial:
tbhdu.header["FIRSTPIX"] = (0, "First pixel # (0 based)")
tbhdu.header["LASTPIX"] = (
pixelfunc.nside2npix(nside) - 1,
"Last pixel # (0 based)",
)
tbhdu.header["INDXSCHM"] = (
"EXPLICIT" if partial else "IMPLICIT",
"Indexing: IMPLICIT or EXPLICIT",
)
tbhdu.header["OBJECT"] = (
"PARTIAL" if partial else "FULLSKY",
"Sky coverage, either FULLSKY or PARTIAL",
)
# FIXME: In modern versions of Pyfits, header.update() understands a
# header as an argument, and headers can be concatenated with the `+'
# operator.
for args in extra_header:
tbhdu.header[args[0]] = args[1:]
# Add str to convert pathlib.Path into str
# Due to https://github.com/astropy/astropy/issues/10594
tbhdu.writeto(str(filename), overwrite=overwrite)
| def write_map(
filename,
m,
nest=False,
dtype=None,
fits_IDL=True,
coord=None,
partial=False,
column_names=None,
column_units=None,
extra_header=(),
overwrite=False,
):
"""Writes a healpix map into a healpix FITS file.
WARNING: starting from healpy 1.15.0, if you do not specify `dtype`,
the map will be written to disk with the same precision it is stored in memory.
Previously, by default `healpy` wrote maps in `float32`.
To reproduce the same behaviour of `healpy` 1.14.0 and below, set `dtype=np.float32`.
Parameters
----------
filename : str
the fits file name
m : array or sequence of 3 arrays
the map to write. Possibly a sequence of 3 maps of same size.
They will be considered as I, Q, U maps.
Supports masked maps, see the `ma` function.
nest : bool, optional
If True, ordering scheme is assumed to be NESTED, otherwise, RING. Default: RING.
The map ordering is not modified by this function, the input map array
should already be in the desired ordering (run `ud_grade` beforehand).
fits_IDL : bool, optional
If True, reshapes columns in rows of 1024, otherwise all the data will
go in one column. Default: True
coord : str
The coordinate system, typically 'E' for Ecliptic, 'G' for Galactic or 'C' for
Celestial (equatorial)
partial : bool, optional
If True, fits file is written as a partial-sky file with explicit indexing.
Otherwise, implicit indexing is used. Default: False.
column_names : str or list
Column name or list of column names, if None here the default column names based on
the number of columns:
1 : "TEMPERATURE",
2 : ["Q_POLARISATION", "U_POLARISATION"],
3 : ["TEMPERATURE", "Q_POLARISATION", "U_POLARISATION"],
6 : ["II", "IQ", "IU", "QQ", "QU", "UU"]
COLUMN_1, COLUMN_2... otherwise (FITS is 1-based)
column_units : str or list
Units for each column, or same units for all columns.
extra_header : list
Extra records to add to FITS header.
dtype: np.dtype or list of np.dtypes, optional
The datatype in which the columns will be stored. Will be converted
internally from the numpy datatype to the fits convention. If a list,
the length must correspond to the number of map arrays.
Default: use the data type of the input array(s)
(WARNING: this changed in 1.15.0, previous versions saved in float32
by default)
overwrite : bool, optional
If True, existing file is silently overwritten. Otherwise trying to write
an existing file raises an OSError (IOError for Python 2).
"""
if not hasattr(m, "__len__"):
raise TypeError("The map must be a sequence")
m = pixelfunc.ma_to_array(m)
if pixelfunc.maptype(m) == 0: # a single map is converted to a list
m = [m]
# check the dtype and convert it
if dtype is None:
dtype = [x.dtype for x in m]
logging.info("setting the output map dtype to %s", dtype)
try:
fitsformat = []
for curr_dtype in dtype:
fitsformat.append(getformat(curr_dtype))
except TypeError:
# dtype is not iterable
fitsformat = [getformat(dtype)] * len(m)
if column_names is None:
column_names = standard_column_names.get(
len(m), ["COLUMN_%d" % n for n in range(1, len(m) + 1)]
)
else:
assert len(column_names) == len(m), "Length column_names != number of maps"
if column_units is None or isinstance(column_units, str):
column_units = [column_units] * len(m)
# maps must have same length
assert len(set(map(len, m))) == 1, "Maps must have same length"
nside = pixelfunc.npix2nside(len(m[0]))
if nside < 0:
raise ValueError("Invalid healpix map : wrong number of pixel")
cols = []
if partial:
fits_IDL = False
mask = pixelfunc.mask_good(m[0])
pix = np.where(mask)[0]
if len(pix) == 0:
raise ValueError("Invalid healpix map : empty partial map")
m = [mm[mask] for mm in m]
ff = getformat(np.min_scalar_type(-pix.max()))
if ff is None:
ff = "I"
cols.append(pf.Column(name="PIXEL", format=ff, array=pix, unit=None))
for cn, cu, mm, curr_fitsformat in zip(column_names, column_units, m, fitsformat):
if len(mm) > 1024 and fits_IDL:
# I need an ndarray, for reshape:
mm2 = np.asarray(mm)
cols.append(
pf.Column(
name=cn,
format="1024%s" % curr_fitsformat,
array=mm2.reshape(mm2.size // 1024, 1024),
unit=cu,
)
)
else:
cols.append(
pf.Column(name=cn, format="%s" % curr_fitsformat, array=mm, unit=cu)
)
tbhdu = pf.BinTableHDU.from_columns(cols)
# add needed keywords
tbhdu.header["PIXTYPE"] = ("HEALPIX", "HEALPIX pixelisation")
if nest:
ordering = "NESTED"
else:
ordering = "RING"
tbhdu.header["ORDERING"] = (
ordering,
"Pixel ordering scheme, either RING or NESTED",
)
if coord:
tbhdu.header["COORDSYS"] = (
coord,
"Ecliptic, Galactic or Celestial (equatorial)",
)
tbhdu.header["EXTNAME"] = ("xtension", "name of this binary table extension")
tbhdu.header["NSIDE"] = (nside, "Resolution parameter of HEALPIX")
if not partial:
tbhdu.header["FIRSTPIX"] = (0, "First pixel # (0 based)")
tbhdu.header["LASTPIX"] = (
pixelfunc.nside2npix(nside) - 1,
"Last pixel # (0 based)",
)
tbhdu.header["INDXSCHM"] = (
"EXPLICIT" if partial else "IMPLICIT",
"Indexing: IMPLICIT or EXPLICIT",
)
tbhdu.header["OBJECT"] = (
"PARTIAL" if partial else "FULLSKY",
"Sky coverage, either FULLSKY or PARTIAL",
)
# FIXME: In modern versions of Pyfits, header.update() understands a
# header as an argument, and headers can be concatenated with the `+'
# operator.
for args in extra_header:
tbhdu.header[args[0]] = args[1:]
# Add str to convert pathlib.Path into str
# Due to https://github.com/astropy/astropy/issues/10594
tbhdu.writeto(str(filename), overwrite=overwrite)
|
15,006 | def _serial_from_status(status):
"""Find the best serialvalue from the status."""
serial = status.get("device.serial") or status.get("ups.serial")
if serial and (serial.lower() == "unknown" or re.search(r"^0+$", serial)):
return None
return serial
| def _serial_from_status(status):
"""Find the best serialvalue from the status."""
serial = status.get("device.serial") or status.get("ups.serial")
if serial and (serial.lower() == "unknown" or serial.count("0") == len(serial)):
return None
return serial
|
57,864 | def add_notes(incident_id, comment):
body = {'text': {'format': 'text', 'content': comment}}
CLIENT.post('/incidents/' + str(incident_id) + '/comments', body) # type: ignore
return 'The note was added successfully.'
| def add_notes(incident_id, comment):
body = {
'text': {
'format': 'text',
'content': comment
}
}
CLIENT.post('/incidents/' + str(incident_id) + '/comments', body) # type: ignore
return 'The note was added successfully.'
|
42,416 | def crop_image(raster, geoms, all_touched=True):
"""Crop a single file using geometry objects.
Parameters
----------
raster : rasterio.io.DatasetReader object
The rasterio object to be cropped.
geoms : geopandas geodataframe or list of polygons
The spatial polygon boundaries in GeoJSON-like dict format
to be used to crop the image. All data outside of the polygon
boundaries will be set to nodata and/or removed from the image.
all_touched : bool (default=True)
Include a pixel in the mask if it touches any of the
shapes. If False, include a pixel only if its center is within one of
the shapes, or if it is selected by Bresenham's line algorithm.
(from rasterio)
Returns
----------
tuple
out_image: cropped numpy array
A numpy array that is cropped to the geoms object
extent with shape (bands, rows, columns)
out_meta: dict
A dictionary containing updated metadata for the cropped raster,
including extent (shape elements) and transform properties.
Example
-------
>>> import geopandas as gpd
>>> import rasterio as rio
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> # Clip an RGB image to the extent of Rocky Mountain National Park
>>> rmnp = gpd.read_file(path_to_example("rmnp.shp"))
>>> with rio.open(path_to_example("rmnp-rgb.tif")) as src:
... in_image = src.read()
... out_image, out_meta = es.crop_image(src, rmnp)
>>> in_image.shape
(3, 373, 485)
>>> out_image.shape
(3, 265, 281)
"""
if isinstance(geoms, gpd.geodataframe.GeoDataFrame):
clip_extent = [extent_to_json(geoms)]
else:
clip_extent = geoms
out_image, out_transform = mask(
raster, clip_extent, crop=True, all_touched=all_touched
)
out_meta = raster.meta.copy()
out_meta.update(
{
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform,
}
)
return out_image, out_meta
| def crop_image(raster, geoms, all_touched=True):
"""Crop a single file using geometry objects.
Parameters
----------
raster : rasterio.io.DatasetReader object
The rasterio object to be cropped.
geoms : geopandas geodataframe or list of polygons
The spatial polygon boundaries in GeoJSON-like dict format
to be used to crop the image. All data outside of the polygon
boundaries will be set to nodata and/or removed from the image.
all_touched : bool (default=True)
Include a pixel in the mask if it touches any of the
shapes. If False, include a pixel only if its center is within one of
the shapes, or if it is selected by Bresenham's line algorithm.
(from rasterio)
Returns
----------
tuple
out_image: cropped numpy array
A numpy array that is cropped to the geoms object
extent with shape (bands, rows, columns)
out_meta: dict
A dictionary containing updated metadata for the cropped raster,
including extent (shape elements) and transform properties.
Example
-------
>>> import geopandas as gpd
>>> import rasterio as rio
>>> import earthpy.spatial as es
>>> from earthpy.io import path_to_example
>>> # Clip an RGB image to the extent of Rocky Mountain National Park
>>> rmnp = gpd.read_file(path_to_example("rmnp.shp"))
>>> with rio.open(path_to_example("rmnp-rgb.tif")) as src:
... in_image = src.read()
... out_image, out_meta = es.crop_image(src, rmnp)
>>> src_raster.shape[1:3]
(3, 373, 485)
>>> out_image.shape
(3, 265, 281)
"""
if isinstance(geoms, gpd.geodataframe.GeoDataFrame):
clip_extent = [extent_to_json(geoms)]
else:
clip_extent = geoms
out_image, out_transform = mask(
raster, clip_extent, crop=True, all_touched=all_touched
)
out_meta = raster.meta.copy()
out_meta.update(
{
"driver": "GTiff",
"height": out_image.shape[1],
"width": out_image.shape[2],
"transform": out_transform,
}
)
return out_image, out_meta
|
27,556 | def parse_new_changelog(changelog_path):
"""Parse changelog data from specified new-format file."""
if not os.path.exists(changelog_path):
return None
changelog_list = None
for encoding_name in ('utf_8', 'utf_16'):
try:
with codecs.open(changelog_path, 'r', encoding=encoding_name) as changelog_file:
# Generate match object with fields from all matching lines
matches = re.findall(
r"^-\s?([0-9a-f]{40})\s(\d{4,4}-\d{2,2}-\d{2,2})\s(.*)\s\[(.*)\]\s*$",
changelog_file.read(), re.MULTILINE)
log.debug("Parsed {} changelog lines from {}".format(len(matches), changelog_file))
changelog_list = [{
"hash": entry[0],
"date": entry[1],
"subject": entry[2],
"author": entry[3],
} for entry in matches]
except UnicodeError:
log.debug('Failed to parse log file %s with encoding %s' % (changelog_path, encoding_name))
continue
except Exception:
log.warning("Parse error reading {}".format(changelog_path), exc_info=1)
return None
return changelog_list
| def parse_new_changelog(changelog_path):
"""Parse changelog data from specified new-format file."""
if not os.path.exists(changelog_path):
return None
changelog_list = None
for encoding_name in ('utf_8', 'utf_16'):
try:
with codecs.open(changelog_path, 'r', encoding=encoding_name) as changelog_file:
# Generate match object with fields from all matching lines
matches = re.findall(
r"^-\s?([0-9a-f]{40})\s(\d{4,4}-\d{2,2}-\d{2,2})\s(.*)\s\[(.*)\]\s*$",
changelog_file.read(), re.MULTILINE)
log.debug("Parsed {} changelog lines from {}".format(len(matches), changelog_path))
changelog_list = [{
"hash": entry[0],
"date": entry[1],
"subject": entry[2],
"author": entry[3],
} for entry in matches]
except UnicodeError:
log.debug('Failed to parse log file %s with encoding %s' % (changelog_path, encoding_name))
continue
except Exception:
log.warning("Parse error reading {}".format(changelog_path), exc_info=1)
return None
return changelog_list
|
43,791 | def pauli_mult_with_phase(pauli_1, pauli_2, wire_map=None):
r"""Multiply two Pauli words together including the global phase.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations. The phase is computed by
looking at the number of times we have the products XY, YZ, or ZX (adds a
phase of :math:`i`), or YX, ZY, XZ (adds a phase of :math:`-i`).
Args:
pauli_1 (.Operation): A Pauli word.
pauli_2 (.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
tuple[.Operation, complex]: The product of ``pauli_1`` and ``pauli_2``, and the
global phase.
**Example**
This function works the same as ``pauli_mult`` but also returns the global
phase accumulated as a result of the Pauli product rules
:math:`\sigma_i \sigma_j = i \sigma_k`.
>>> from pennylane.pauli import pauli_mult_with_phase
>>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
>>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
>>> product, phase = pauli_mult_with_phase(pauli_1, pauli_2)
>>> product
PauliZ(wires=[0])
>>> phase
1j
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Get the product; use our earlier function
pauli_product = pauli_mult(pauli_1, pauli_2, wire_map)
pauli_1_names = [pauli_1.name] if isinstance(pauli_1.name, str) else pauli_1.name
pauli_2_names = [pauli_2.name] if isinstance(pauli_2.name, str) else pauli_2.name
pauli_1_placeholder = 0
pauli_2_placeholder = 0
phase = 1
for wire in wire_map.keys():
if wire in pauli_1.wires:
pauli_1_op_name = pauli_1_names[pauli_1_placeholder]
pauli_1_placeholder += 1
else:
pauli_1_op_name = "Identity"
if wire in pauli_2.wires:
pauli_2_op_name = pauli_2_names[pauli_2_placeholder]
pauli_2_placeholder += 1
else:
pauli_2_op_name = "Identity"
# If we have identities anywhere we don't pick up a phase
if pauli_1_op_name == "Identity" or pauli_2_op_name == "Identity":
continue
# Likewise, no additional phase if the Paulis are the same
if pauli_1_op_name == pauli_2_op_name:
continue
# Use Pauli commutation rules to determine the phase
pauli_ordering = (pauli_1_op_name, pauli_2_op_name)
pos_phases = [("PauliX", "PauliY"), ("PauliY", "PauliZ"), ("PauliZ", "PauliX")]
if pauli_ordering in pos_phases:
phase *= 1j
else:
phase *= -1j
return pauli_product, phase
| def pauli_mult_with_phase(pauli_1, pauli_2, wire_map=None):
r"""Multiply two Pauli words together including the global phase.
Two Pauli operations can be multiplied together by taking the additive
OR of their binary symplectic representations. The phase is computed by
looking at the number of times we have the products XY, YZ, or ZX (adds a
phase of :math:`i`), or YX, ZY, XZ (adds a phase of :math:`-i`).
Args:
pauli_1 (.Operation): A Pauli word.
pauli_2 (.Operation): A Pauli word to multiply with the first one.
wire_map (dict[Union[str, int], int]): dictionary containing all wire labels used in the Pauli
word as keys, and unique integer labels as their values. If no wire map is
provided, the map will be constructed from the set of wires acted on
by the input Pauli words.
Returns:
tuple[.Operation, complex]: The product of ``pauli_1`` and ``pauli_2``, and the
global phase.
**Example**
This function works the same as ``pauli_mult`` but also returns the global
phase accumulated as a result of the Pauli product rules
:math:`\sigma_i \sigma_j = i \sigma_k`.
>>> from pennylane.grouping import pauli_mult_with_phase
>>> pauli_1 = qml.PauliX(0) @ qml.PauliZ(1)
>>> pauli_2 = qml.PauliY(0) @ qml.PauliZ(1)
>>> product, phase = pauli_mult_with_phase(pauli_1, pauli_2)
>>> product
PauliZ(wires=[0])
>>> phase
1j
"""
# If no wire map is specified, generate one from the union of wires
# in both Paulis.
if wire_map is None:
wire_labels = set(pauli_1.wires.labels + pauli_2.wires.labels)
wire_map = {label: i for i, label in enumerate(wire_labels)}
# Get the product; use our earlier function
pauli_product = pauli_mult(pauli_1, pauli_2, wire_map)
pauli_1_names = [pauli_1.name] if isinstance(pauli_1.name, str) else pauli_1.name
pauli_2_names = [pauli_2.name] if isinstance(pauli_2.name, str) else pauli_2.name
pauli_1_placeholder = 0
pauli_2_placeholder = 0
phase = 1
for wire in wire_map.keys():
if wire in pauli_1.wires:
pauli_1_op_name = pauli_1_names[pauli_1_placeholder]
pauli_1_placeholder += 1
else:
pauli_1_op_name = "Identity"
if wire in pauli_2.wires:
pauli_2_op_name = pauli_2_names[pauli_2_placeholder]
pauli_2_placeholder += 1
else:
pauli_2_op_name = "Identity"
# If we have identities anywhere we don't pick up a phase
if pauli_1_op_name == "Identity" or pauli_2_op_name == "Identity":
continue
# Likewise, no additional phase if the Paulis are the same
if pauli_1_op_name == pauli_2_op_name:
continue
# Use Pauli commutation rules to determine the phase
pauli_ordering = (pauli_1_op_name, pauli_2_op_name)
pos_phases = [("PauliX", "PauliY"), ("PauliY", "PauliZ"), ("PauliZ", "PauliX")]
if pauli_ordering in pos_phases:
phase *= 1j
else:
phase *= -1j
return pauli_product, phase
|
36,185 | def custom404_view(request, exception):
"""
This view handlers which 404 template to render, based on
which host the request was a 404 for. We do this, because
wagtail does not allow us to (currently) specify 404 pages
using the site admin UI, so we need to rely on the Django
methodology for handling 404 responses.
It would be great if we could pull the "which domain uses
which 404 template" information from the wagtail "sites"
configuration, but there is no way to know which template
belongs to which site, as "a site" is not tied to "a django
app" in the wagtail way of things.
"""
if request.site.hostname == 'mozillafestival.org':
html = render(request, 'mozfest/404.html')
return HttpResponseNotFound(html)
else:
html = render(request, '404.html')
return HttpResponseNotFound(html)
| def custom404_view(request, exception):
"""
This view handlers which 404 template to render, based on
which host the request was a 404 for. We do this, because
wagtail does not allow us to (currently) specify 404 pages
using the site admin UI, so we need to rely on the Django
methodology for handling 404 responses.
It would be great if we could pull the "which domain uses
which 404 template" information from the wagtail "sites"
configuration, but there is no way to know which template
belongs to which site, as "a site" is not tied to "a django
app" in the wagtail way of things.
"""
if request.site.hostname == 'mozillafestival.org':
html = render(request, 'mozfest/404.html')
return HttpResponseNotFound(html)
else:
html = render(request, '404.html')
return HttpResponseNotFound(html.content)
|
32,055 | def qradar_offenses_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of offenses from QRadar service.
possible arguments:
- offense_id: Retrieves details of the specific offense that corresponds to the ID given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id = args.get('offense_id')
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None'))
# if this call fails raise an error and stop command execution
response = client.offenses_list(range_, offense_id, filter_, fields)
enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich)
final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP)
headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'],
set(OFFENSE_OLD_NEW_NAMES_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Offenses List', final_outputs, headers=headers, removeNull=True),
outputs_prefix='QRadar.Offense',
outputs_key_field='ID',
outputs=final_outputs,
raw_response=response
)
| def qradar_offenses_list_command(client: Client, args: Dict) -> CommandResults:
"""
Retrieves list of offenses from QRadar service.
possible arguments:
- offense_id: Retrieves details of the specific offense that corresponds to the ID given.
- range: Range of offenses to return (e.g.: 0-20, 3-5, 3-3).
- filter: Query filter to filter results returned by QRadar service. see
https://www.ibm.com/support/knowledgecenter/SS42VS_SHR/com.ibm.qradarapi.doc/c_rest_api_filtering.html
for more details.
- fields: If used, will filter all fields except for the specified ones.
Use this parameter to specify which fields you would like to get back in the
response. Fields that are not explicitly named are excluded.
Args:
client (Client): QRadar client to perform the API call.
args (Dict): Demisto args.
Returns:
CommandResults.
"""
offense_id = args.get('offense_id')
range_ = f'''items={args.get('range', DEFAULT_RANGE_VALUE)}'''
filter_ = args.get('filter')
fields = args.get('fields')
ip_enrich, asset_enrich = get_offense_enrichment(args.get('enrichment', 'None'))
# if this call fails, raise an error and stop command execution
response = client.offenses_list(range_, offense_id, filter_, fields)
enriched_outputs = enrich_offenses_result(client, response, ip_enrich, asset_enrich)
final_outputs = sanitize_outputs(enriched_outputs, OFFENSE_OLD_NEW_NAMES_MAP)
headers = build_headers(['ID', 'Description', 'OffenseType', 'Status', 'Severity'],
set(OFFENSE_OLD_NEW_NAMES_MAP.values()))
return CommandResults(
readable_output=tableToMarkdown('Offenses List', final_outputs, headers=headers, removeNull=True),
outputs_prefix='QRadar.Offense',
outputs_key_field='ID',
outputs=final_outputs,
raw_response=response
)
|
31,699 | def fetch_emails_as_incidents(account_email, folder_name):
last_run = get_last_run()
excluded_ids = set(last_run.get(LAST_RUN_IDS))
try:
account = get_account(account_email)
last_emails = fetch_last_emails(account, folder_name, last_run.get(LAST_RUN_TIME), last_run.get(LAST_RUN_IDS))
incidents = []
incident = {} # type: Dict[Any, Any]
current_fetch_ids = set()
for item in last_emails:
if item.message_id:
current_fetch_ids.add(item.message_id)
incident = parse_incident_from_item(item, True)
if incident:
incidents.append(incident)
if len(incidents) >= MAX_FETCH:
break
demisto.debug(f'EWS V2 - ending fetch - got {len(incidents)} incidents.')
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
if last_run_time > LAST_RUN_TIME:
ids = current_fetch_ids
else:
ids = current_fetch_ids | excluded_ids
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
| def fetch_emails_as_incidents(account_email, folder_name):
last_run = get_last_run()
excluded_ids = set(last_run.get(LAST_RUN_IDS, []))
try:
account = get_account(account_email)
last_emails = fetch_last_emails(account, folder_name, last_run.get(LAST_RUN_TIME), last_run.get(LAST_RUN_IDS))
incidents = []
incident = {} # type: Dict[Any, Any]
current_fetch_ids = set()
for item in last_emails:
if item.message_id:
current_fetch_ids.add(item.message_id)
incident = parse_incident_from_item(item, True)
if incident:
incidents.append(incident)
if len(incidents) >= MAX_FETCH:
break
demisto.debug(f'EWS V2 - ending fetch - got {len(incidents)} incidents.')
last_run_time = incident.get("occurred", last_run.get(LAST_RUN_TIME))
if isinstance(last_run_time, EWSDateTime):
last_run_time = last_run_time.ewsformat()
if last_run_time > LAST_RUN_TIME:
ids = current_fetch_ids
else:
ids = current_fetch_ids | excluded_ids
new_last_run = {
LAST_RUN_TIME: last_run_time,
LAST_RUN_FOLDER: folder_name,
LAST_RUN_IDS: list(ids),
ERROR_COUNTER: 0
}
demisto.setLastRun(new_last_run)
return incidents
except RateLimitError:
if LAST_RUN_TIME in last_run:
last_run[LAST_RUN_TIME] = last_run[LAST_RUN_TIME].ewsformat()
if ERROR_COUNTER not in last_run:
last_run[ERROR_COUNTER] = 0
last_run[ERROR_COUNTER] += 1
demisto.setLastRun(last_run)
if last_run[ERROR_COUNTER] > 2:
raise
return []
|
45,698 | def extrapolate(
precip,
velocity,
timesteps,
outval=np.nan,
xy_coords=None,
allow_nonfinite_values=False,
vel_timestep=1,
**kwargs,
):
"""Apply semi-Lagrangian backward extrapolation to a two-dimensional
precipitation field.
Parameters
----------
precip: array-like or None
Array of shape (m,n) containing the input precipitation field. All
values are required to be finite by default. If set to None, only the
displacement field is returned without interpolating the inputs. This
requires that return_displacement is set to True.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the m*n
advection field. All values are required to be finite by default.
timesteps: int or list
If timesteps is integer, it specifies the number of time steps to
extrapolate. If a list is given, each element is the desired
extrapolation time step from the current time. The elements of the list
are required to be in ascending order.
outval: float, optional
Optional argument for specifying the value for pixels advected from
outside the domain. If outval is set to 'min', the value is taken as
the minimum value of precip.
Default: np.nan
xy_coords: ndarray, optional
Array with the coordinates of the grid dimension (2, m, n ).
* xy_coords[0]: x coordinates
* xy_coords[1]: y coordinates
By default, the *xy_coords* are computed for each extrapolation.
allow_nonfinite_values: bool, optional
If True, allow non-finite values in the precipitation and advection
fields. This option is useful if the input fields contain a radar mask
(i.e. pixels with no observations are set to nan).
Other Parameters
----------------
displacement_prev: array-like
Optional initial displacement vector field of shape (2,m,n) for the
extrapolation.
Default: None
n_iter: int
Number of inner iterations in the semi-Lagrangian scheme. If n_iter > 0,
the integration is done using the midpoint rule. Otherwise, the advection
vectors are taken from the starting point of each interval.
Default: 1
return_displacement: bool
If True, return the displacement between the initial input field and
the one obtained by integrating along the advection field.
Default: False
vel_timestep: float
The time step of the velocity field. It is assumed to have the same
unit as the timesteps argument. Applicable if timeseps is a list.
Default: 1.
interp_order: int
The order of interpolation to use. Default: 1 (linear). Setting this
to 0 (nearest neighbor) gives the best computational performance but
may produce visible artefacts. Setting this to 3 (cubic) gives the best
ability to reproduce small-scale variability but may significantly
increase the computation time.
Returns
-------
out: array or tuple
If return_displacement=False, return a time series extrapolated fields
of shape (num_timesteps,m,n). Otherwise, return a tuple containing the
extrapolated fields and the integrated trajectory (displacement) along
the advection field.
References
----------
:cite:`GZ2002`
"""
if precip is not None and len(precip.shape) != 2:
raise ValueError("precip must be a two-dimensional array")
if len(velocity.shape) != 3:
raise ValueError("velocity must be a three-dimensional array")
if precip is not None and not allow_nonfinite_values:
if np.any(~np.isfinite(precip)):
raise ValueError("precip contains non-finite values")
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
# defaults
verbose = kwargs.get("verbose", False)
displacement_prev = kwargs.get("displacement_prev", None)
n_iter = kwargs.get("n_iter", 1)
return_displacement = kwargs.get("return_displacement", False)
interp_order = kwargs.get("interp_order", 1)
if precip is None and not return_displacement:
raise ValueError("precip is None but return_displacement is False")
if "D_prev" in kwargs.keys():
warnings.warn(
"deprecated argument D_prev is ignored, use displacement_prev instead",
)
# if interp_order > 1, apply separate masking to preserve nan and
# non-precipitation values
if precip is not None and interp_order > 1:
minval = np.nanmin(precip)
mask_min = (precip > minval).astype(float)
if allow_nonfinite_values:
mask_finite = np.isfinite(precip)
precip = precip.copy()
precip[~mask_finite] = 0.0
mask_finite = mask_finite.astype(float)
prefilter = True if interp_order > 1 else False
if isinstance(timesteps, int):
timesteps = np.arange(1, timesteps + 1)
vel_timestep = 1.0
elif np.any(np.diff(timesteps) <= 0.0):
raise ValueError("the given timestep sequence is not monotonously increasing")
timestep_diff = np.hstack([[timesteps[0]], np.diff(timesteps)])
if verbose:
print("Computing the advection with the semi-lagrangian scheme.")
t0 = time.time()
if precip is not None and outval == "min":
outval = np.nanmin(precip)
if xy_coords is None:
x_values, y_values = np.meshgrid(
np.arange(velocity.shape[2]), np.arange(velocity.shape[1])
)
xy_coords = np.stack([x_values, y_values])
def interpolate_motion(displacement, velocity_inc, td):
coords_warped = xy_coords + displacement
coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]
velocity_inc_x = ip.map_coordinates(
velocity[0, :, :],
coords_warped,
mode="nearest",
order=1,
prefilter=False,
)
velocity_inc_y = ip.map_coordinates(
velocity[1, :, :],
coords_warped,
mode="nearest",
order=1,
prefilter=False,
)
velocity_inc[0, :, :] = velocity_inc_x
velocity_inc[1, :, :] = velocity_inc_y
if n_iter > 1:
velocity_inc /= n_iter
velocity_inc *= td / vel_timestep
precip_extrap = []
if displacement_prev is None:
displacement = np.zeros((2, velocity.shape[1], velocity.shape[2]))
velocity_inc = velocity.copy() * timestep_diff[0] / vel_timestep
else:
displacement = displacement_prev.copy()
velocity_inc = np.empty(velocity.shape)
interpolate_motion(displacement, velocity_inc, timestep_diff[0])
for ti, td in enumerate(timestep_diff):
if n_iter > 0:
for k in range(n_iter):
interpolate_motion(displacement - velocity_inc / 2.0, velocity_inc, td)
displacement -= velocity_inc
interpolate_motion(displacement, velocity_inc, td)
else:
if ti > 0 or displacement_prev is not None:
interpolate_motion(displacement, velocity_inc, td)
displacement -= velocity_inc
coords_warped = xy_coords + displacement
coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]
if precip is not None:
precip_warped = ip.map_coordinates(
precip,
coords_warped,
mode="constant",
cval=outval,
order=interp_order,
prefilter=prefilter,
)
if interp_order > 1:
mask_warped = ip.map_coordinates(
mask_min,
coords_warped,
mode="constant",
cval=0,
order=1,
prefilter=False,
)
precip_warped[mask_warped < 0.5] = minval
if allow_nonfinite_values:
mask_warped = ip.map_coordinates(
mask_finite,
coords_warped,
mode="constant",
cval=0,
order=1,
prefilter=False,
)
precip_warped[mask_warped < 0.5] = np.nan
precip_extrap.append(np.reshape(precip_warped, precip.shape))
if verbose:
print("--- %s seconds ---" % (time.time() - t0))
if precip is not None:
if not return_displacement:
return np.stack(precip_extrap)
else:
return np.stack(precip_extrap), displacement
else:
return None, displacement
| def extrapolate(
precip,
velocity,
timesteps,
outval=np.nan,
xy_coords=None,
allow_nonfinite_values=False,
vel_timestep=1,
**kwargs,
):
"""Apply semi-Lagrangian backward extrapolation to a two-dimensional
precipitation field.
Parameters
----------
precip: array-like or None
Array of shape (m,n) containing the input precipitation field. All
values are required to be finite by default. If set to None, only the
displacement field is returned without interpolating the inputs. This
requires that return_displacement is set to True.
velocity: array-like
Array of shape (2,m,n) containing the x- and y-components of the m*n
advection field. All values are required to be finite by default.
timesteps: int or list
If timesteps is integer, it specifies the number of time steps to
extrapolate. If a list is given, each element is the desired
extrapolation time step from the current time. The elements of the list
are required to be in ascending order.
outval: float, optional
Optional argument for specifying the value for pixels advected from
outside the domain. If outval is set to 'min', the value is taken as
the minimum value of precip.
Default: np.nan
xy_coords: ndarray, optional
Array with the coordinates of the grid dimension (2, m, n ).
* xy_coords[0]: x coordinates
* xy_coords[1]: y coordinates
By default, the *xy_coords* are computed for each extrapolation.
allow_nonfinite_values: bool, optional
If True, allow non-finite values in the precipitation and advection
fields. This option is useful if the input fields contain a radar mask
(i.e. pixels with no observations are set to nan).
Other Parameters
----------------
displacement_prev: array-like
Optional initial displacement vector field of shape (2,m,n) for the
extrapolation.
Default: None
n_iter: int
Number of inner iterations in the semi-Lagrangian scheme. If n_iter > 0,
the integration is done using the midpoint rule. Otherwise, the advection
vectors are taken from the starting point of each interval.
Default: 1
return_displacement: bool
If True, return the displacement between the initial input field and
the one obtained by integrating along the advection field.
Default: False
vel_timestep: float
The time step of the velocity field. It is assumed to have the same
unit as the timesteps argument. Applicable if timeseps is a list.
Default: 1.
interp_order: int
The order of interpolation to use. Default: 1 (linear). Setting this
to 0 (nearest neighbor) gives the best computational performance but
may produce visible artefacts. Setting this to 3 (cubic) gives the best
ability to reproduce small-scale variability but may significantly
increase the computation time.
Returns
-------
out: array or tuple
If return_displacement=False, return a time series extrapolated fields
of shape (num_timesteps,m,n). Otherwise, return a tuple containing the
extrapolated fields and the integrated trajectory (displacement) along
the advection field.
References
----------
:cite:`GZ2002`
"""
if precip is not None and precip.ndim != 2:
raise ValueError("precip must be a two-dimensional array")
if len(velocity.shape) != 3:
raise ValueError("velocity must be a three-dimensional array")
if precip is not None and not allow_nonfinite_values:
if np.any(~np.isfinite(precip)):
raise ValueError("precip contains non-finite values")
if np.any(~np.isfinite(velocity)):
raise ValueError("velocity contains non-finite values")
if isinstance(timesteps, list) and not sorted(timesteps) == timesteps:
raise ValueError("timesteps is not in ascending order")
# defaults
verbose = kwargs.get("verbose", False)
displacement_prev = kwargs.get("displacement_prev", None)
n_iter = kwargs.get("n_iter", 1)
return_displacement = kwargs.get("return_displacement", False)
interp_order = kwargs.get("interp_order", 1)
if precip is None and not return_displacement:
raise ValueError("precip is None but return_displacement is False")
if "D_prev" in kwargs.keys():
warnings.warn(
"deprecated argument D_prev is ignored, use displacement_prev instead",
)
# if interp_order > 1, apply separate masking to preserve nan and
# non-precipitation values
if precip is not None and interp_order > 1:
minval = np.nanmin(precip)
mask_min = (precip > minval).astype(float)
if allow_nonfinite_values:
mask_finite = np.isfinite(precip)
precip = precip.copy()
precip[~mask_finite] = 0.0
mask_finite = mask_finite.astype(float)
prefilter = True if interp_order > 1 else False
if isinstance(timesteps, int):
timesteps = np.arange(1, timesteps + 1)
vel_timestep = 1.0
elif np.any(np.diff(timesteps) <= 0.0):
raise ValueError("the given timestep sequence is not monotonously increasing")
timestep_diff = np.hstack([[timesteps[0]], np.diff(timesteps)])
if verbose:
print("Computing the advection with the semi-lagrangian scheme.")
t0 = time.time()
if precip is not None and outval == "min":
outval = np.nanmin(precip)
if xy_coords is None:
x_values, y_values = np.meshgrid(
np.arange(velocity.shape[2]), np.arange(velocity.shape[1])
)
xy_coords = np.stack([x_values, y_values])
def interpolate_motion(displacement, velocity_inc, td):
coords_warped = xy_coords + displacement
coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]
velocity_inc_x = ip.map_coordinates(
velocity[0, :, :],
coords_warped,
mode="nearest",
order=1,
prefilter=False,
)
velocity_inc_y = ip.map_coordinates(
velocity[1, :, :],
coords_warped,
mode="nearest",
order=1,
prefilter=False,
)
velocity_inc[0, :, :] = velocity_inc_x
velocity_inc[1, :, :] = velocity_inc_y
if n_iter > 1:
velocity_inc /= n_iter
velocity_inc *= td / vel_timestep
precip_extrap = []
if displacement_prev is None:
displacement = np.zeros((2, velocity.shape[1], velocity.shape[2]))
velocity_inc = velocity.copy() * timestep_diff[0] / vel_timestep
else:
displacement = displacement_prev.copy()
velocity_inc = np.empty(velocity.shape)
interpolate_motion(displacement, velocity_inc, timestep_diff[0])
for ti, td in enumerate(timestep_diff):
if n_iter > 0:
for k in range(n_iter):
interpolate_motion(displacement - velocity_inc / 2.0, velocity_inc, td)
displacement -= velocity_inc
interpolate_motion(displacement, velocity_inc, td)
else:
if ti > 0 or displacement_prev is not None:
interpolate_motion(displacement, velocity_inc, td)
displacement -= velocity_inc
coords_warped = xy_coords + displacement
coords_warped = [coords_warped[1, :, :], coords_warped[0, :, :]]
if precip is not None:
precip_warped = ip.map_coordinates(
precip,
coords_warped,
mode="constant",
cval=outval,
order=interp_order,
prefilter=prefilter,
)
if interp_order > 1:
mask_warped = ip.map_coordinates(
mask_min,
coords_warped,
mode="constant",
cval=0,
order=1,
prefilter=False,
)
precip_warped[mask_warped < 0.5] = minval
if allow_nonfinite_values:
mask_warped = ip.map_coordinates(
mask_finite,
coords_warped,
mode="constant",
cval=0,
order=1,
prefilter=False,
)
precip_warped[mask_warped < 0.5] = np.nan
precip_extrap.append(np.reshape(precip_warped, precip.shape))
if verbose:
print("--- %s seconds ---" % (time.time() - t0))
if precip is not None:
if not return_displacement:
return np.stack(precip_extrap)
else:
return np.stack(precip_extrap), displacement
else:
return None, displacement
|
39,033 | def test_secretstr_is_hashable():
assert hash(SecretStr('secret'))
| def test_secretstr_is_hashable():
assert type(hash(SecretStr('secret'))) is int
|
30,620 | def get_request_args(params):
limit = try_parse_integer(request.args.get('n', params.get('edl_size', 10000)), EDL_LIMIT_ERR_MSG)
offset = try_parse_integer(request.args.get('s', 0), EDL_OFFSET_ERR_MSG)
query = request.args.get('q', params.get('indicators_query'))
strip_port = request.args.get('sp', params.get('url_port_stripping', False))
drop_invalids = request.args.get('di', params.get('drop_invalids', False))
collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
# handle flags
if drop_invalids is not None and drop_invalids == '':
drop_invalids = True
if strip_port is not None and strip_port == '':
strip_port = True
if collapse_ips is not None and collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, EDL_COLLAPSE_ERR_MSG)
if collapse_ips not in [0, 1, 2]:
raise DemistoException(EDL_COLLAPSE_ERR_MSG)
collapse_options = {
0: DONT_COLLAPSE,
1: COLLAPSE_TO_RANGES,
2: COLLAPSE_TO_CIDR
}
collapse_ips = collapse_options[collapse_ips]
return RequestArguments(query, limit, offset, strip_port, drop_invalids, collapse_ips)
| def get_request_args(params):
limit = try_parse_integer(request.args.get('n', params.get('edl_size', 10000)), EDL_LIMIT_ERR_MSG)
offset = try_parse_integer(request.args.get('s', 0), EDL_OFFSET_ERR_MSG)
query = request.args.get('q', params.get('indicators_query'))
strip_port = request.args.get('sp', params.get('url_port_stripping', False))
drop_invalids = request.args.get('di', params.get('drop_invalids', False))
collapse_ips = request.args.get('tr', params.get('collapse_ips', DONT_COLLAPSE))
# handle flags
if drop_invalids is not None and drop_invalids == '':
drop_invalids = True
if strip_port == '':
strip_port = True
if collapse_ips is not None and collapse_ips not in [DONT_COLLAPSE, COLLAPSE_TO_CIDR, COLLAPSE_TO_RANGES]:
collapse_ips = try_parse_integer(collapse_ips, EDL_COLLAPSE_ERR_MSG)
if collapse_ips not in [0, 1, 2]:
raise DemistoException(EDL_COLLAPSE_ERR_MSG)
collapse_options = {
0: DONT_COLLAPSE,
1: COLLAPSE_TO_RANGES,
2: COLLAPSE_TO_CIDR
}
collapse_ips = collapse_options[collapse_ips]
return RequestArguments(query, limit, offset, strip_port, drop_invalids, collapse_ips)
|
42,970 | def gbs_params(
w: np.ndarray, wp: np.ndarray, Ud: np.ndarray, d: np.ndarray, T: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
r"""Converts molecular information to GBS gate parameters.
**Example usage:**
>>> w = np.array([3765.2386, 3088.1826, 1825.1799, 1416.9512, 1326.4684, 1137.0490, 629.7144])
>>> wp = np.array([3629.9472, 3064.9143, 1566.4602, 1399.6554, 1215.3421, 1190.9077, 496.2845])
>>> Ud = np.array(
>>> [
>>> [0.9934, 0.0144, 0.0153, 0.0268, 0.0638, 0.0751, -0.0428],
>>> [-0.0149, 0.9931, 0.0742, 0.0769, -0.0361, -0.0025, 0.0173],
>>> [-0.0119, -0.0916, 0.8423, 0.1799, -0.3857, 0.3074, 0.0801],
>>> [0.0381, 0.0409, -0.3403, -0.5231, -0.6679, 0.3848, 0.1142],
>>> [-0.0413, -0.0342, -0.4004, 0.7636, -0.1036, 0.4838, 0.0941],
>>> [0.0908, -0.0418, -0.0907, 0.3151, -0.5900, -0.7193, 0.1304],
>>> [-0.0325, 0.0050, -0.0206, 0.0694, -0.2018, 0.0173, -0.9759],
>>> ]
>>> )
>>> d = np.array([0.2254, 0.1469, 1.5599, -0.3784, 0.4553, -0.3439, 0.0618])
>>> T = 0.0
>>> p = gbs_params(w, wp, Ud, d, T)
Args:
w (array): normal mode frequencies of the electronic ground state (:math:`\text{cm}^{-1}`)
wp (array): normal mode frequencies of the electronic excited state (:math:`\text{cm}^{-1}`)
Ud (array): Duschinsky matrix
d (array): Duschinsky displacement vector corrected with wp
T (float): temperature
Returns:
tuple[array, array, array, array, array]: the first interferometer unitary matrix
:math:`U_{1}`, the squeezing parameters :math:`r`, the second interferometer unitary
matrix :math:`U_{2}`, the displacement parameters :math:`\alpha`, and finally the
two-mode squeezing parameters :math:`t`
"""
c = 299792458
h = 6.62607015e-34
k = 1.380649e-23
if T < 0:
raise ValueError("Temperature must be zero or positive")
elif T > 0:
t = np.arctanh(np.exp(-0.5 * h * (w * c * 100) / k / T))
else:
t = np.zeros(len(w))
U2, s, U1 = np.linalg.svd(np.diag(wp ** 0.5) @ Ud @ np.diag(w ** -0.5))
alpha = d / np.sqrt(2)
return U1, np.log(s), U2, alpha, t
| def gbs_params(
w: np.ndarray, wp: np.ndarray, Ud: np.ndarray, d: np.ndarray, T: float
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
r"""Converts molecular information to GBS gate parameters.
**Example usage:**
>>> w = np.array([3765.2386, 3088.1826, 1825.1799, 1416.9512, 1326.4684, 1137.0490, 629.7144])
>>> wp = np.array([3629.9472, 3064.9143, 1566.4602, 1399.6554, 1215.3421, 1190.9077, 496.2845])
>>> Ud = np.array(
>>> [
>>> [0.9934, 0.0144, 0.0153, 0.0268, 0.0638, 0.0751, -0.0428],
>>> [-0.0149, 0.9931, 0.0742, 0.0769, -0.0361, -0.0025, 0.0173],
>>> [-0.0119, -0.0916, 0.8423, 0.1799, -0.3857, 0.3074, 0.0801],
>>> [0.0381, 0.0409, -0.3403, -0.5231, -0.6679, 0.3848, 0.1142],
>>> [-0.0413, -0.0342, -0.4004, 0.7636, -0.1036, 0.4838, 0.0941],
>>> [0.0908, -0.0418, -0.0907, 0.3151, -0.5900, -0.7193, 0.1304],
>>> [-0.0325, 0.0050, -0.0206, 0.0694, -0.2018, 0.0173, -0.9759],
>>> ]
>>> )
>>> d = np.array([0.2254, 0.1469, 1.5599, -0.3784, 0.4553, -0.3439, 0.0618])
>>> T = 0.0
>>> p = gbs_params(w, wp, Ud, d, T)
Args:
w (array): normal mode frequencies of the electronic ground state (:math:`\text{cm}^{-1}`)
wp (array): normal mode frequencies of the electronic excited state (:math:`\text{cm}^{-1}`)
Ud (array): Duschinsky matrix
d (array): Duschinsky displacement vector corrected with wp
T (float): temperature
Returns:
tuple[array, array, array, array, array]: the first interferometer unitary matrix
:math:`U_{1}`, the squeezing parameters :math:`r`, the second interferometer unitary
matrix :math:`U_{2}`, the displacement parameters :math:`\alpha`, and finally the
two-mode squeezing parameters :math:`t`
"""
c = 299792458
h = 6.62607015e-34
k = 1.380649e-23
if T < 0:
raise ValueError("Temperature must be zero or positive")
if T > 0:
t = np.arctanh(np.exp(-0.5 * h * (w * c * 100) / k / T))
else:
t = np.zeros(len(w))
U2, s, U1 = np.linalg.svd(np.diag(wp ** 0.5) @ Ud @ np.diag(w ** -0.5))
alpha = d / np.sqrt(2)
return U1, np.log(s), U2, alpha, t
|
31,026 | def panorama_zone_lookup_command():
"""
Gets the outgoing interface from the Palo Alto Firewall route table, and the list of interfaces
comparing the two
"""
dest_ip = demisto.args().get("dest_ip")
vr = demisto.args().get("virtual_router", None)
route = panorama_route_lookup(dest_ip, vr)
if not route:
demisto.results(f"Could find a matching route to {dest_ip}.")
return
interface = route["interface"]
interfaces = panorama_get_interfaces()
r = {}
if "ifnet" in interfaces["response"]["result"]:
for entry in interfaces["response"]["result"]["ifnet"]["entry"]:
if entry["name"] == interface:
if "zone" in entry:
r = {**entry, **route}
if r:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': r,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': f'The IP {dest_ip} is in zone {r["zone"]}',
'EntryContext': {"Panorama.ZoneLookup(val.Name == obj.Name)": r} # add key -> deleted: true
})
return r
else:
demisto.results(f"Could not map {dest_ip} to zone.")
return {}
| def panorama_zone_lookup_command():
"""
Gets the outgoing interface from the Palo Alto Firewall route table, and the list of interfaces
comparing the two
"""
dest_ip = demisto.args().get("dest_ip")
vr = demisto.args().get("virtual_router", None)
route = panorama_route_lookup(dest_ip, vr)
if not route:
demisto.results(f"Could find a matching route to {dest_ip}.")
return
interface = route.get("interface")
interfaces = panorama_get_interfaces()
r = {}
if "ifnet" in interfaces["response"]["result"]:
for entry in interfaces["response"]["result"]["ifnet"]["entry"]:
if entry["name"] == interface:
if "zone" in entry:
r = {**entry, **route}
if r:
demisto.results({
'Type': entryTypes['note'],
'ContentsFormat': formats['json'],
'Contents': r,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': f'The IP {dest_ip} is in zone {r["zone"]}',
'EntryContext': {"Panorama.ZoneLookup(val.Name == obj.Name)": r} # add key -> deleted: true
})
return r
else:
demisto.results(f"Could not map {dest_ip} to zone.")
return {}
|
6,429 | def get_partywise_advanced_payment_amount(party_type, posting_date = None, future_payment=0, company=None):
cond = "1=1"
if posting_date:
if future_payment:
cond = "DATE(creation) <= '{0}'".format(posting_date)
else:
cond = "posting_date <= '{0}'".format(posting_date)
if company:
cond += "and company = '{0}'".format(company)
data = frappe.db.sql(""" SELECT party, sum({0}) as amount
FROM `tabGL Entry`
WHERE
party_type = %s and against_voucher is null
and {1} GROUP BY party"""
.format(("credit") if party_type == "Customer" else "debit", cond) , party_type)
if data:
return frappe._dict(data)
| def get_partywise_advanced_payment_amount(party_type, posting_date = None, future_payment=0, company=None):
cond = "1=1"
if posting_date:
if future_payment:
cond = "posting_date <= '{0}' or DATE(creation) <= '{0}'".format(posting_date, posting_date)
else:
cond = "posting_date <= '{0}'".format(posting_date)
if company:
cond += "and company = '{0}'".format(company)
data = frappe.db.sql(""" SELECT party, sum({0}) as amount
FROM `tabGL Entry`
WHERE
party_type = %s and against_voucher is null
and {1} GROUP BY party"""
.format(("credit") if party_type == "Customer" else "debit", cond) , party_type)
if data:
return frappe._dict(data)
|
35,491 | def main(sm=None, pm=None):
gc.disable()
set_realtime_priority(5)
if sm is None:
sm = messaging.SubMaster(['liveLocationKalman', 'carState'], poll=['liveLocationKalman'])
if pm is None:
pm = messaging.PubMaster(['liveParameters'])
params_reader = Params()
# wait for stats about the car to come in from controls
cloudlog.info("paramsd is waiting for CarParams")
CP = car.CarParams.from_bytes(params_reader.get("CarParams", block=True))
cloudlog.info("paramsd got CarParams")
min_sr, max_sr = 0.5 * CP.steerRatio, 2.0 * CP.steerRatio
params = params_reader.get("LiveParameters")
# Check if car model matches
if params is not None:
params = json.loads(params)
if params.get('carFingerprint', None) != CP.carFingerprint:
cloudlog.info("Parameter learner found parameters for wrong car.")
params = None
# Check if starting values are sane
if params is not None:
try:
angle_offset_sane = abs(params.get('angleOffsetAverageDeg')) < 10.0
steer_ratio_sane = min_sr <= params['steerRatio'] <= max_sr
params_sane = angle_offset_sane and steer_ratio_sane
if not params_sane:
cloudlog.info(f"Invalid starting values found {params}")
params = None
except Exception as e:
cloudlog.info(f"Error reading params {params}: {str(e)}")
params = None
# TODO: cache the params with the capnp struct
if params is None:
params = {
'carFingerprint': CP.carFingerprint,
'steerRatio': CP.steerRatio,
'stiffnessFactor': 1.0,
'angleOffsetAverageDeg': 0.0,
}
cloudlog.info("Parameter learner resetting to default values")
# When driving in wet conditions the stiffness can go down, and then be too low on the next drive
# Without a way to detect this we have to reset the stiffness every drive
params['stiffnessFactor'] = 1.0
learner = ParamsLearner(CP, params['steerRatio'], params['stiffnessFactor'], math.radians(params['angleOffsetAverageDeg']))
angle_offset_average = params['angleOffsetAverageDeg']
angle_offset = angle_offset_average
while True:
sm.update()
for which in sorted(sm.updated.keys(), key=lambda x: sm.logMonoTime[x]):
if sm.updated[which] and sm.all_alive_and_valid():
t = sm.logMonoTime[which] * 1e-9
learner.handle_log(t, which, sm[which])
if sm.updated['liveLocationKalman']:
x = learner.kf.x
P = np.sqrt(learner.kf.P.diagonal())
if not all(map(math.isfinite, x)):
cloudlog.error("NaN in liveParameters estimate. Resetting to default values")
learner = ParamsLearner(CP, CP.steerRatio, 1.0, 0.0)
x = learner.kf.x
angle_offset_average = clip(math.degrees(x[States.ANGLE_OFFSET]), angle_offset_average - MAX_ANGLE_OFFSET_DELTA, angle_offset_average + MAX_ANGLE_OFFSET_DELTA)
angle_offset = clip(math.degrees(x[States.ANGLE_OFFSET] + x[States.ANGLE_OFFSET_FAST]), angle_offset - MAX_ANGLE_OFFSET_DELTA, angle_offset + MAX_ANGLE_OFFSET_DELTA)
msg = messaging.new_message('liveParameters')
msg.logMonoTime = sm.logMonoTime['carState']
liveParameters = msg.liveParameters
liveParameters.posenetValid = True
liveParameters.sensorValid = True
liveParameters.steerRatio = float(x[States.STEER_RATIO])
liveParameters.stiffnessFactor = float(x[States.STIFFNESS])
liveParameters.roll = float(x[States.ROAD_ROLL])
liveParameters.angleOffsetAverageDeg = angle_offset_average
liveParameters.angleOffsetDeg = angle_offset
liveParameters.valid = all((
abs(liveParameters.angleOffsetAverageDeg) < 10.0,
abs(liveParameters.angleOffsetDeg) < 10.0,
0.2 <= liveParameters.stiffnessFactor <= 5.0,
min_sr <= liveParameters.steerRatio <= max_sr,
))
liveParameters.steerRatioStd = float(P[States.STEER_RATIO])
liveParameters.stiffnessFactorStd = float(P[States.STIFFNESS])
liveParameters.angleOffsetAverageStd = float(P[States.ANGLE_OFFSET])
liveParameters.angleOffsetFastStd = float(P[States.ANGLE_OFFSET_FAST])
msg.valid = liveParameters.valid and sm.all_alive_and_valid()
if sm.frame % 1200 == 0: # once a minute
params = {
'carFingerprint': CP.carFingerprint,
'steerRatio': liveParameters.steerRatio,
'stiffnessFactor': liveParameters.stiffnessFactor,
'angleOffsetAverageDeg': liveParameters.angleOffsetAverageDeg,
}
put_nonblocking("LiveParameters", json.dumps(params))
pm.send('liveParameters', msg)
| def main(sm=None, pm=None):
gc.disable()
set_realtime_priority(5)
if sm is None:
sm = messaging.SubMaster(['liveLocationKalman', 'carState'], poll=['liveLocationKalman'])
if pm is None:
pm = messaging.PubMaster(['liveParameters'])
params_reader = Params()
# wait for stats about the car to come in from controls
cloudlog.info("paramsd is waiting for CarParams")
CP = car.CarParams.from_bytes(params_reader.get("CarParams", block=True))
cloudlog.info("paramsd got CarParams")
min_sr, max_sr = 0.5 * CP.steerRatio, 2.0 * CP.steerRatio
params = params_reader.get("LiveParameters")
# Check if car model matches
if params is not None:
params = json.loads(params)
if params.get('carFingerprint', None) != CP.carFingerprint:
cloudlog.info("Parameter learner found parameters for wrong car.")
params = None
# Check if starting values are sane
if params is not None:
try:
angle_offset_sane = abs(params.get('angleOffsetAverageDeg')) < 10.0
steer_ratio_sane = min_sr <= params['steerRatio'] <= max_sr
params_sane = angle_offset_sane and steer_ratio_sane
if not params_sane:
cloudlog.info(f"Invalid starting values found {params}")
params = None
except Exception as e:
cloudlog.info(f"Error reading params {params}: {str(e)}")
params = None
# TODO: cache the params with the capnp struct
if params is None:
params = {
'carFingerprint': CP.carFingerprint,
'steerRatio': CP.steerRatio,
'stiffnessFactor': 1.0,
'angleOffsetAverageDeg': 0.0,
}
cloudlog.info("Parameter learner resetting to default values")
# When driving in wet conditions the stiffness can go down, and then be too low on the next drive
# Without a way to detect this we have to reset the stiffness every drive
params['stiffnessFactor'] = 1.0
learner = ParamsLearner(CP, params['steerRatio'], params['stiffnessFactor'], math.radians(params['angleOffsetAverageDeg']))
angle_offset_average = params['angleOffsetAverageDeg']
angle_offset = angle_offset_average
while True:
sm.update()
for which in sorted(sm.updated.keys(), key=lambda x: sm.logMonoTime[x]):
if sm.updated[which] and sm.all_alive_and_valid([which]):
t = sm.logMonoTime[which] * 1e-9
learner.handle_log(t, which, sm[which])
if sm.updated['liveLocationKalman']:
x = learner.kf.x
P = np.sqrt(learner.kf.P.diagonal())
if not all(map(math.isfinite, x)):
cloudlog.error("NaN in liveParameters estimate. Resetting to default values")
learner = ParamsLearner(CP, CP.steerRatio, 1.0, 0.0)
x = learner.kf.x
angle_offset_average = clip(math.degrees(x[States.ANGLE_OFFSET]), angle_offset_average - MAX_ANGLE_OFFSET_DELTA, angle_offset_average + MAX_ANGLE_OFFSET_DELTA)
angle_offset = clip(math.degrees(x[States.ANGLE_OFFSET] + x[States.ANGLE_OFFSET_FAST]), angle_offset - MAX_ANGLE_OFFSET_DELTA, angle_offset + MAX_ANGLE_OFFSET_DELTA)
msg = messaging.new_message('liveParameters')
msg.logMonoTime = sm.logMonoTime['carState']
liveParameters = msg.liveParameters
liveParameters.posenetValid = True
liveParameters.sensorValid = True
liveParameters.steerRatio = float(x[States.STEER_RATIO])
liveParameters.stiffnessFactor = float(x[States.STIFFNESS])
liveParameters.roll = float(x[States.ROAD_ROLL])
liveParameters.angleOffsetAverageDeg = angle_offset_average
liveParameters.angleOffsetDeg = angle_offset
liveParameters.valid = all((
abs(liveParameters.angleOffsetAverageDeg) < 10.0,
abs(liveParameters.angleOffsetDeg) < 10.0,
0.2 <= liveParameters.stiffnessFactor <= 5.0,
min_sr <= liveParameters.steerRatio <= max_sr,
))
liveParameters.steerRatioStd = float(P[States.STEER_RATIO])
liveParameters.stiffnessFactorStd = float(P[States.STIFFNESS])
liveParameters.angleOffsetAverageStd = float(P[States.ANGLE_OFFSET])
liveParameters.angleOffsetFastStd = float(P[States.ANGLE_OFFSET_FAST])
msg.valid = liveParameters.valid and sm.all_alive_and_valid()
if sm.frame % 1200 == 0: # once a minute
params = {
'carFingerprint': CP.carFingerprint,
'steerRatio': liveParameters.steerRatio,
'stiffnessFactor': liveParameters.stiffnessFactor,
'angleOffsetAverageDeg': liveParameters.angleOffsetAverageDeg,
}
put_nonblocking("LiveParameters", json.dumps(params))
pm.send('liveParameters', msg)
|
45,466 | def test_additional_headers(test_client_factory):
def app(scope):
async def asgi(receive, send):
websocket = WebSocket(scope, receive=receive, send=send)
await websocket.accept(headers=[(b"additional", b"header")])
await websocket.close()
return asgi
client = test_client_factory(app)
with client.websocket_connect("/") as websocket:
websocket.additional_headers = [(b"additional", b"header")]
| def test_additional_headers(test_client_factory):
def app(scope):
async def asgi(receive, send):
websocket = WebSocket(scope, receive=receive, send=send)
await websocket.accept(headers=[(b"additional", b"header")])
await websocket.close()
return asgi
client = test_client_factory(app)
with client.websocket_connect("/") as websocket:
assert websocket.additional_headers == [(b"additional", b"header")]
|
16,369 | def _schema_with_defaults(
username="", host="", port=80, path="/", ssl=False, verifiy_ssl=True
):
return vol.Schema(
{
vol.Required(CONF_USERNAME, default=username): str,
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_PORT, default=port): cv.port,
vol.Required(CONF_PATH, default=path): str,
vol.Required(CONF_SSL, default=ssl): bool,
vol.Required(CONF_VERIFY_SSL, default=verifiy_ssl): bool,
},
extra=vol.ALLOW_EXTRA,
)
| def _schema_with_defaults(
username="", host="", port=80, path="/", ssl=False, verify_ssl=True
):
return vol.Schema(
{
vol.Required(CONF_USERNAME, default=username): str,
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_PORT, default=port): cv.port,
vol.Required(CONF_PATH, default=path): str,
vol.Required(CONF_SSL, default=ssl): bool,
vol.Required(CONF_VERIFY_SSL, default=verifiy_ssl): bool,
},
extra=vol.ALLOW_EXTRA,
)
|
34,952 | def make_search_policies(
search_policy, tasks, num_measures_per_round, load_model_file=None, load_log_file=None
):
"""Make a list of search policies for a list of search tasks.
It creates one policy per task.
Parameters
----------
search_policy: Union[str, List[SearchPolicy]]
The name of search policy.
tasks: List[SearchTask]
The list of all tasks
num_measures_per_round: int
The number of schedules to be measured at each search round.
This should be the same as `TuningOptions.num_measures_per_round`
load_model_file: Optional[str]
Load pre-trained model from this file
load_log_file: Optional[str]
Load measurement records from this file
Returns
-------
policies: List[SearchPolicy]
The list of search policies
"""
if search_policy == "default":
search_policy = "sketch.xgb"
if isinstance(search_policy, str):
policy_type, model_type = search_policy.split(".")
if model_type == "xgb":
cost_model = XGBModel(num_warmup_sample=len(tasks) * num_measures_per_round)
if load_model_file:
logger.info("Load pretrained model...")
cost_model.load(load_model_file)
elif load_log_file:
cost_model.load_log_file(load_log_file)
elif model_type == "random":
cost_model = RandomModel()
else:
raise ValueError("Invalid search policy: " + search_policy)
if policy_type == "sketch":
search_policies = [SketchPolicy(task, cost_model) for task in tasks]
else:
raise ValueError("Invalid search policy: " + search_policy)
else:
# check type
assert isinstance(search_policy, (tuple, list))
for item in search_policy:
assert isinstance(item, SearchPolicy)
search_policies = search_policy
return search_policies
| def make_search_policies(
search_policy, tasks, num_measures_per_round, load_model_file=None, load_log_file=None
):
"""Make a list of search policies for a list of search tasks.
It creates one policy per task.
Parameters
----------
search_policy: Union[str, List[SearchPolicy]]
The name of search policy.
tasks: List[SearchTask]
The list of all tasks
num_measures_per_round: int
The number of schedules to be measured at each search round.
This should be the same as `TuningOptions.num_measures_per_round`
load_model_file: Optional[str]
Load pre-trained model from this file. If not presented, the cost model will be trained from scratch.
load_log_file: Optional[str]
Load measurement records from this file
Returns
-------
policies: List[SearchPolicy]
The list of search policies
"""
if search_policy == "default":
search_policy = "sketch.xgb"
if isinstance(search_policy, str):
policy_type, model_type = search_policy.split(".")
if model_type == "xgb":
cost_model = XGBModel(num_warmup_sample=len(tasks) * num_measures_per_round)
if load_model_file:
logger.info("Load pretrained model...")
cost_model.load(load_model_file)
elif load_log_file:
cost_model.load_log_file(load_log_file)
elif model_type == "random":
cost_model = RandomModel()
else:
raise ValueError("Invalid search policy: " + search_policy)
if policy_type == "sketch":
search_policies = [SketchPolicy(task, cost_model) for task in tasks]
else:
raise ValueError("Invalid search policy: " + search_policy)
else:
# check type
assert isinstance(search_policy, (tuple, list))
for item in search_policy:
assert isinstance(item, SearchPolicy)
search_policies = search_policy
return search_policies
|
50,532 | def read_postgis(
sql,
con,
geom_col="geom",
crs=None,
index_col=None,
coerce_float=True,
parse_dates=None,
params=None,
):
"""
Returns a GeoDataFrame corresponding to the result of the query
string, which must contain a geometry column in WKB representation.
Parameters
----------
sql : string
SQL query to execute in selecting entries from database, or name
of the table to read from the database.
con : DB connection object or SQLAlchemy engine
Active connection to the database to query.
geom_col : string, default 'geom'
column name to convert to shapely geometries
crs : multiple types allowed, optional
Coordinate Reference System to use for the returned GeoDataFrame;
can accept anything accepted by pyproj.CRS.from_user_input()
The following types are accepted:
CRS WKT string
An authority string (i.e. "epsg:4326")
An EPSG integer code (i.e. 4326)
A pyproj.CRS
An object with a to_wkt method.
PROJ string
Dictionary of PROJ parameters
PROJ keyword arguments for parameters
JSON string with PROJ parameters
For reference, a few very common projections and their EPSG codes:
WGS84 Latitude/Longitude: "EPSG:4326"
UTM Zones (North): "EPSG:32633"
UTM Zones (South): "EPSG:32733"
See geopandas/doc/source/projections.rst for more info on CRS
See the documentation for pandas.read_sql for further explanation
of the following parameters:
index_col, coerce_float, parse_dates, params
Returns
-------
GeoDataFrame
Example
-------
PostGIS
>>> sql = "SELECT geom, kind FROM polygons"
SpatiaLite
>>> sql = "SELECT ST_AsBinary(geom) AS geom, kind FROM polygons"
>>> df = geopandas.read_postgis(sql, con)
"""
df = pd.read_sql(
sql,
con,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
params=params,
)
if geom_col not in df:
raise ValueError("Query missing geometry column '{}'".format(geom_col))
geoms = df[geom_col].dropna()
if not geoms.empty:
load_geom_bytes = shapely.wkb.loads
"""Load from Python 3 binary."""
def load_geom_buffer(x):
"""Load from Python 2 binary."""
return shapely.wkb.loads(str(x))
def load_geom_text(x):
"""Load from binary encoded as text."""
return shapely.wkb.loads(str(x), hex=True)
if sys.version_info.major < 3:
if isinstance(geoms.iat[0], buffer):
load_geom = load_geom_buffer
else:
load_geom = load_geom_text
elif isinstance(geoms.iat[0], bytes):
load_geom = load_geom_bytes
else:
load_geom = load_geom_text
df[geom_col] = geoms = geoms.apply(load_geom)
if crs is None:
srid = shapely.geos.lgeos.GEOSGetSRID(geoms.iat[0]._geom)
# if no defined SRID in geodatabase, returns SRID of 0
if srid != 0:
crs = "epsg:{}".format(srid)
return GeoDataFrame(df, crs=crs, geometry=geom_col)
| def read_postgis(
sql,
con,
geom_col="geom",
crs=None,
index_col=None,
coerce_float=True,
parse_dates=None,
params=None,
):
"""
Returns a GeoDataFrame corresponding to the result of the query
string, which must contain a geometry column in WKB representation.
Parameters
----------
sql : string
SQL query to execute in selecting entries from database, or name
of the table to read from the database.
con : DB connection object or SQLAlchemy engine
Active connection to the database to query.
geom_col : string, default 'geom'
column name to convert to shapely geometries
crs : pyproj.CRS, optional
CRS to use for the returned GeoDataFrame. The value can be anything accepted
by :meth:`pyproj.CRS.from_user_input() <pyproj.crs.CRS.from_user_input>`,
such as an authority string (eg "EPSG:4326") or a WKT string.
If not set, tries to determine CRS from the SRID associated with the
first geometry in the database, and assigns that to all geometries.
See the documentation for pandas.read_sql for further explanation
of the following parameters:
index_col, coerce_float, parse_dates, params
Returns
-------
GeoDataFrame
Example
-------
PostGIS
>>> sql = "SELECT geom, kind FROM polygons"
SpatiaLite
>>> sql = "SELECT ST_AsBinary(geom) AS geom, kind FROM polygons"
>>> df = geopandas.read_postgis(sql, con)
"""
df = pd.read_sql(
sql,
con,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
params=params,
)
if geom_col not in df:
raise ValueError("Query missing geometry column '{}'".format(geom_col))
geoms = df[geom_col].dropna()
if not geoms.empty:
load_geom_bytes = shapely.wkb.loads
"""Load from Python 3 binary."""
def load_geom_buffer(x):
"""Load from Python 2 binary."""
return shapely.wkb.loads(str(x))
def load_geom_text(x):
"""Load from binary encoded as text."""
return shapely.wkb.loads(str(x), hex=True)
if sys.version_info.major < 3:
if isinstance(geoms.iat[0], buffer):
load_geom = load_geom_buffer
else:
load_geom = load_geom_text
elif isinstance(geoms.iat[0], bytes):
load_geom = load_geom_bytes
else:
load_geom = load_geom_text
df[geom_col] = geoms = geoms.apply(load_geom)
if crs is None:
srid = shapely.geos.lgeos.GEOSGetSRID(geoms.iat[0]._geom)
# if no defined SRID in geodatabase, returns SRID of 0
if srid != 0:
crs = "epsg:{}".format(srid)
return GeoDataFrame(df, crs=crs, geometry=geom_col)
|
7,210 | def map_histogram(hist, min_val, max_val, n_pixels):
"""Calculate the equalized lookup table (mapping).
It does so by cumulating the input histogram.
Parameters
----------
hist : ndarray
Clipped histogram.
min_val : int
Minimum value for mapping.
max_val : int
Maximum value for mapping.
n_pixels : int
Number of pixels in the region.
Returns
-------
out : ndarray
Mapped intensity LUT.
"""
warnings.warn("map_histogram is deprecated and will be removed in version "
"0.19. Please use the rivate function _map_histogram "
"instead.", category=FutureWarning, stacklevel=2)
return _map_histogram(hist, min_val, max_val, n_pixels)
| def map_histogram(hist, min_val, max_val, n_pixels):
"""Calculate the equalized lookup table (mapping).
It does so by cumulating the input histogram.
Parameters
----------
hist : ndarray
Clipped histogram.
min_val : int
Minimum value for mapping.
max_val : int
Maximum value for mapping.
n_pixels : int
Number of pixels in the region.
Returns
-------
out : ndarray
Mapped intensity LUT.
"""
warnings.warn("map_histogram is deprecated and will be removed in version "
"0.19. Please use the private function _map_histogram "
"instead.", category=FutureWarning, stacklevel=2)
return _map_histogram(hist, min_val, max_val, n_pixels)
|
26,350 | def run_stubtest(dist: Path) -> None:
with open(dist / "METADATA.toml") as f:
metadata = dict(tomli.loads(f.read()))
# Ignore stubs that don't support Python 3
if not has_py3_stubs(dist):
return
with tempfile.TemporaryDirectory() as tmp:
venv_dir = Path(tmp)
venv.create(venv_dir, with_pip=True, clear=True)
pip_exe = str(venv_dir / "bin" / "pip")
python_exe = str(venv_dir / "bin" / "python")
dist_version = metadata.get("version")
assert isinstance(dist_version, str)
if dist_version == "0.1":
dist_req = dist.name
elif dist_version.endswith(".*"):
dist_req = f"{dist.name}=={dist_version}"
else:
dist_req = f"{dist.name}=={dist_version}.*"
# If @tests/requirements-stubtest.txt exists, run "pip install" on it.
req_path = dist / "@tests" / "requirements-stubtest.txt"
if req_path.exists():
try:
pip_cmd = [pip_exe, "install", "-r", str(req_path)]
subprocess.run(pip_cmd, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
print(f"Failed to install requirements for {dist.name}", file=sys.stderr)
print(e.stdout.decode(), file=sys.stderr)
print(e.stderr.decode(), file=sys.stderr)
raise
# We need stubtest to be able to import the package, so install mypy into the venv
# Hopefully mypy continues to not need too many dependencies
# TODO: Maybe find a way to cache these in CI
dists_to_install = [dist_req, get_mypy_req()]
dists_to_install.extend(metadata.get("requires", []))
pip_cmd = [pip_exe, "install"] + dists_to_install
print(" ".join(pip_cmd), file=sys.stderr)
try:
subprocess.run(pip_cmd, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
print(f"Failed to install {dist.name}", file=sys.stderr)
print(e.stdout.decode(), file=sys.stderr)
print(e.stderr.decode(), file=sys.stderr)
raise
packages_to_check = [d.name for d in dist.iterdir() if d.is_dir() and d.name.isidentifier()]
modules_to_check = [d.stem for d in dist.iterdir() if d.is_file() and d.suffix == ".pyi"]
cmd = [
python_exe,
"-m",
"mypy.stubtest",
# Use --ignore-missing-stub, because if someone makes a correct addition, they'll need to
# also make a allowlist change and if someone makes an incorrect addition, they'll run into
# false negatives.
"--ignore-missing-stub",
# Use --custom-typeshed-dir in case we make linked changes to stdlib or _typeshed
"--custom-typeshed-dir",
str(dist.parent.parent),
*packages_to_check,
*modules_to_check,
]
allowlist_path = dist / "@tests/stubtest_allowlist.txt"
if allowlist_path.exists():
cmd.extend(["--allowlist", str(allowlist_path)])
try:
print(f"MYPYPATH={dist}", " ".join(cmd), file=sys.stderr)
subprocess.run(cmd, env={"MYPYPATH": str(dist), "MYPY_FORCE_COLOR": "1"}, check=True)
except subprocess.CalledProcessError:
print(f"stubtest failed for {dist.name}", file=sys.stderr)
print("\n\n", file=sys.stderr)
if not allowlist_path.exists():
print(
"Re-running stubtest with --generate-allowlist.\n"
f"Add the following to {allowlist_path}:"
)
subprocess.run(cmd + ["--generate-allowlist"], env={"MYPYPATH": str(dist)})
print("\n\n")
raise StubtestFailed from None
else:
print(f"stubtest succeeded for {dist.name}", file=sys.stderr)
print("\n\n", file=sys.stderr)
| def run_stubtest(dist: Path) -> None:
with open(dist / "METADATA.toml") as f:
metadata = dict(tomli.loads(f.read()))
# Ignore stubs that don't support Python 3
if not has_py3_stubs(dist):
return
with tempfile.TemporaryDirectory() as tmp:
venv_dir = Path(tmp)
venv.create(venv_dir, with_pip=True, clear=True)
pip_exe = str(venv_dir / "bin" / "pip")
python_exe = str(venv_dir / "bin" / "python")
dist_version = metadata["version"]
assert isinstance(dist_version, str)
if dist_version == "0.1":
dist_req = dist.name
elif dist_version.endswith(".*"):
dist_req = f"{dist.name}=={dist_version}"
else:
dist_req = f"{dist.name}=={dist_version}.*"
# If @tests/requirements-stubtest.txt exists, run "pip install" on it.
req_path = dist / "@tests" / "requirements-stubtest.txt"
if req_path.exists():
try:
pip_cmd = [pip_exe, "install", "-r", str(req_path)]
subprocess.run(pip_cmd, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
print(f"Failed to install requirements for {dist.name}", file=sys.stderr)
print(e.stdout.decode(), file=sys.stderr)
print(e.stderr.decode(), file=sys.stderr)
raise
# We need stubtest to be able to import the package, so install mypy into the venv
# Hopefully mypy continues to not need too many dependencies
# TODO: Maybe find a way to cache these in CI
dists_to_install = [dist_req, get_mypy_req()]
dists_to_install.extend(metadata.get("requires", []))
pip_cmd = [pip_exe, "install"] + dists_to_install
print(" ".join(pip_cmd), file=sys.stderr)
try:
subprocess.run(pip_cmd, check=True, capture_output=True)
except subprocess.CalledProcessError as e:
print(f"Failed to install {dist.name}", file=sys.stderr)
print(e.stdout.decode(), file=sys.stderr)
print(e.stderr.decode(), file=sys.stderr)
raise
packages_to_check = [d.name for d in dist.iterdir() if d.is_dir() and d.name.isidentifier()]
modules_to_check = [d.stem for d in dist.iterdir() if d.is_file() and d.suffix == ".pyi"]
cmd = [
python_exe,
"-m",
"mypy.stubtest",
# Use --ignore-missing-stub, because if someone makes a correct addition, they'll need to
# also make a allowlist change and if someone makes an incorrect addition, they'll run into
# false negatives.
"--ignore-missing-stub",
# Use --custom-typeshed-dir in case we make linked changes to stdlib or _typeshed
"--custom-typeshed-dir",
str(dist.parent.parent),
*packages_to_check,
*modules_to_check,
]
allowlist_path = dist / "@tests/stubtest_allowlist.txt"
if allowlist_path.exists():
cmd.extend(["--allowlist", str(allowlist_path)])
try:
print(f"MYPYPATH={dist}", " ".join(cmd), file=sys.stderr)
subprocess.run(cmd, env={"MYPYPATH": str(dist), "MYPY_FORCE_COLOR": "1"}, check=True)
except subprocess.CalledProcessError:
print(f"stubtest failed for {dist.name}", file=sys.stderr)
print("\n\n", file=sys.stderr)
if not allowlist_path.exists():
print(
"Re-running stubtest with --generate-allowlist.\n"
f"Add the following to {allowlist_path}:"
)
subprocess.run(cmd + ["--generate-allowlist"], env={"MYPYPATH": str(dist)})
print("\n\n")
raise StubtestFailed from None
else:
print(f"stubtest succeeded for {dist.name}", file=sys.stderr)
print("\n\n", file=sys.stderr)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.