Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
create_testcase06 | () | Conversion of the physical parameter to the internally defined parameter
to be passed to george
| Conversion of the physical parameter to the internally defined parameter
to be passed to george
| def create_testcase06():
import george
bjd0 = photometry['phot_bjd'] - Tref
err = bjd0 * 0 + photometry['phot_precision']
""" Conversion of the physical parameter to the internally defined parameter
to be passed to george
"""
gp_pams = np.zeros(4)
gp_pams[0] = np.log(activity['Hamp_PH']) * 2
gp_pams[1] = np.log(activity['Pdec']) * 2
gp_pams[2] = 1. / (2 * activity['Oamp'] ** 2)
gp_pams[3] = np.log(activity['Prot'])
kernel = np.exp(gp_pams[0]) * \
george.kernels.ExpSquaredKernel(metric=np.exp(gp_pams[1])) * \
george.kernels.ExpSine2Kernel(gamma=gp_pams[2], log_period=gp_pams[3])
gp = george.GP(kernel)
gp.compute(bjd0, err)
prediction = gp.sample(bjd0)
obs_photometry = np.random.normal(prediction, photometry['phot_precision'])
fileout = open('TestCase06_photometry.dat', 'w')
for b, p in zip(photometry['phot_bjd'], obs_photometry):
fileout.write('{0:14f} {1:14f} {2:14f} {3:5d} {4:5d} {5:5d} \n'.format(
b, p, photometry['phot_precision'], 0, 0, -1))
fileout.close()
bjd0 = bjd_obs - Tref
err = bjd0 * 0 + instrument['RV_precision']
gp_pams[0] = np.log(activity['Hamp_RV1']) * 2
kernel = np.exp(gp_pams[0]) * \
george.kernels.ExpSquaredKernel(metric=np.exp(gp_pams[1])) * \
george.kernels.ExpSine2Kernel(gamma=gp_pams[2], log_period=gp_pams[3])
gp = george.GP(kernel)
gp.compute(bjd0, err)
prediction = gp.sample(bjd0)
y_pla = kp.kepler_RV_T0P(bjd0,
planet_b['f'],
planet_b['P'],
planet_b['K'],
planet_b['e'],
planet_b['o']) + instrument['RV_offset1']
mod_pl = np.random.normal(y_pla + prediction, instrument['RV_precision'])
Tcent_b = np.random.normal(
np.arange(0,1)*planet_b['P'] + kp.kepler_phase2Tc_Tref(planet_b['P'], planet_b['f'], planet_b['e'], planet_b['o']) + Tref,
instrument['T0_precision'])
fileout = open('TestCase06_Tcent_b.dat', 'w')
for i_Tc, v_Tc in enumerate(Tcent_b):
fileout.write('{0:d} {1:.4f} {2:.4f} {3:d}\n'.format(i_Tc, v_Tc, instrument['T0_precision'], 0))
fileout.close()
fileout = open('TestCase06_RV.dat', 'w')
for b, r in zip(bjd_obs, mod_pl):
fileout.write('{0:f} {1:.2f} {2:.2f} {3:d} {4:d} {5:d}\n'.format(b, r, instrument['RV_precision'], 0, 0, -1))
fileout.close() | [
"def",
"create_testcase06",
"(",
")",
":",
"import",
"george",
"bjd0",
"=",
"photometry",
"[",
"'phot_bjd'",
"]",
"-",
"Tref",
"err",
"=",
"bjd0",
"*",
"0",
"+",
"photometry",
"[",
"'phot_precision'",
"]",
"gp_pams",
"=",
"np",
".",
"zeros",
"(",
"4",
")",
"gp_pams",
"[",
"0",
"]",
"=",
"np",
".",
"log",
"(",
"activity",
"[",
"'Hamp_PH'",
"]",
")",
"*",
"2",
"gp_pams",
"[",
"1",
"]",
"=",
"np",
".",
"log",
"(",
"activity",
"[",
"'Pdec'",
"]",
")",
"*",
"2",
"gp_pams",
"[",
"2",
"]",
"=",
"1.",
"/",
"(",
"2",
"*",
"activity",
"[",
"'Oamp'",
"]",
"**",
"2",
")",
"gp_pams",
"[",
"3",
"]",
"=",
"np",
".",
"log",
"(",
"activity",
"[",
"'Prot'",
"]",
")",
"kernel",
"=",
"np",
".",
"exp",
"(",
"gp_pams",
"[",
"0",
"]",
")",
"*",
"george",
".",
"kernels",
".",
"ExpSquaredKernel",
"(",
"metric",
"=",
"np",
".",
"exp",
"(",
"gp_pams",
"[",
"1",
"]",
")",
")",
"*",
"george",
".",
"kernels",
".",
"ExpSine2Kernel",
"(",
"gamma",
"=",
"gp_pams",
"[",
"2",
"]",
",",
"log_period",
"=",
"gp_pams",
"[",
"3",
"]",
")",
"gp",
"=",
"george",
".",
"GP",
"(",
"kernel",
")",
"gp",
".",
"compute",
"(",
"bjd0",
",",
"err",
")",
"prediction",
"=",
"gp",
".",
"sample",
"(",
"bjd0",
")",
"obs_photometry",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"prediction",
",",
"photometry",
"[",
"'phot_precision'",
"]",
")",
"fileout",
"=",
"open",
"(",
"'TestCase06_photometry.dat'",
",",
"'w'",
")",
"for",
"b",
",",
"p",
"in",
"zip",
"(",
"photometry",
"[",
"'phot_bjd'",
"]",
",",
"obs_photometry",
")",
":",
"fileout",
".",
"write",
"(",
"'{0:14f} {1:14f} {2:14f} {3:5d} {4:5d} {5:5d} \\n'",
".",
"format",
"(",
"b",
",",
"p",
",",
"photometry",
"[",
"'phot_precision'",
"]",
",",
"0",
",",
"0",
",",
"-",
"1",
")",
")",
"fileout",
".",
"close",
"(",
")",
"bjd0",
"=",
"bjd_obs",
"-",
"Tref",
"err",
"=",
"bjd0",
"*",
"0",
"+",
"instrument",
"[",
"'RV_precision'",
"]",
"gp_pams",
"[",
"0",
"]",
"=",
"np",
".",
"log",
"(",
"activity",
"[",
"'Hamp_RV1'",
"]",
")",
"*",
"2",
"kernel",
"=",
"np",
".",
"exp",
"(",
"gp_pams",
"[",
"0",
"]",
")",
"*",
"george",
".",
"kernels",
".",
"ExpSquaredKernel",
"(",
"metric",
"=",
"np",
".",
"exp",
"(",
"gp_pams",
"[",
"1",
"]",
")",
")",
"*",
"george",
".",
"kernels",
".",
"ExpSine2Kernel",
"(",
"gamma",
"=",
"gp_pams",
"[",
"2",
"]",
",",
"log_period",
"=",
"gp_pams",
"[",
"3",
"]",
")",
"gp",
"=",
"george",
".",
"GP",
"(",
"kernel",
")",
"gp",
".",
"compute",
"(",
"bjd0",
",",
"err",
")",
"prediction",
"=",
"gp",
".",
"sample",
"(",
"bjd0",
")",
"y_pla",
"=",
"kp",
".",
"kepler_RV_T0P",
"(",
"bjd0",
",",
"planet_b",
"[",
"'f'",
"]",
",",
"planet_b",
"[",
"'P'",
"]",
",",
"planet_b",
"[",
"'K'",
"]",
",",
"planet_b",
"[",
"'e'",
"]",
",",
"planet_b",
"[",
"'o'",
"]",
")",
"+",
"instrument",
"[",
"'RV_offset1'",
"]",
"mod_pl",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"y_pla",
"+",
"prediction",
",",
"instrument",
"[",
"'RV_precision'",
"]",
")",
"Tcent_b",
"=",
"np",
".",
"random",
".",
"normal",
"(",
"np",
".",
"arange",
"(",
"0",
",",
"1",
")",
"*",
"planet_b",
"[",
"'P'",
"]",
"+",
"kp",
".",
"kepler_phase2Tc_Tref",
"(",
"planet_b",
"[",
"'P'",
"]",
",",
"planet_b",
"[",
"'f'",
"]",
",",
"planet_b",
"[",
"'e'",
"]",
",",
"planet_b",
"[",
"'o'",
"]",
")",
"+",
"Tref",
",",
"instrument",
"[",
"'T0_precision'",
"]",
")",
"fileout",
"=",
"open",
"(",
"'TestCase06_Tcent_b.dat'",
",",
"'w'",
")",
"for",
"i_Tc",
",",
"v_Tc",
"in",
"enumerate",
"(",
"Tcent_b",
")",
":",
"fileout",
".",
"write",
"(",
"'{0:d} {1:.4f} {2:.4f} {3:d}\\n'",
".",
"format",
"(",
"i_Tc",
",",
"v_Tc",
",",
"instrument",
"[",
"'T0_precision'",
"]",
",",
"0",
")",
")",
"fileout",
".",
"close",
"(",
")",
"fileout",
"=",
"open",
"(",
"'TestCase06_RV.dat'",
",",
"'w'",
")",
"for",
"b",
",",
"r",
"in",
"zip",
"(",
"bjd_obs",
",",
"mod_pl",
")",
":",
"fileout",
".",
"write",
"(",
"'{0:f} {1:.2f} {2:.2f} {3:d} {4:d} {5:d}\\n'",
".",
"format",
"(",
"b",
",",
"r",
",",
"instrument",
"[",
"'RV_precision'",
"]",
",",
"0",
",",
"0",
",",
"-",
"1",
")",
")",
"fileout",
".",
"close",
"(",
")"
] | [
268,
0
] | [
334,
19
] | python | en | ['en', 'en', 'en'] | True |
async_setup | (hass, config) | Set up Coolmaster components. | Set up Coolmaster components. | async def async_setup(hass, config):
"""Set up Coolmaster components."""
hass.data.setdefault(DOMAIN, {})
return True | [
"async",
"def",
"async_setup",
"(",
"hass",
",",
"config",
")",
":",
"hass",
".",
"data",
".",
"setdefault",
"(",
"DOMAIN",
",",
"{",
"}",
")",
"return",
"True"
] | [
15,
0
] | [
18,
15
] | python | en | ['en', 'da', 'en'] | True |
async_setup_entry | (hass, entry) | Set up Coolmaster from a config entry. | Set up Coolmaster from a config entry. | async def async_setup_entry(hass, entry):
"""Set up Coolmaster from a config entry."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
coolmaster = CoolMasterNet(host, port)
try:
info = await coolmaster.info()
if not info:
raise ConfigEntryNotReady
except (OSError, ConnectionRefusedError, TimeoutError) as error:
raise ConfigEntryNotReady() from error
coordinator = CoolmasterDataUpdateCoordinator(hass, coolmaster)
await coordinator.async_refresh()
hass.data[DOMAIN][entry.entry_id] = {
DATA_INFO: info,
DATA_COORDINATOR: coordinator,
}
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "climate")
)
return True | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
",",
"entry",
")",
":",
"host",
"=",
"entry",
".",
"data",
"[",
"CONF_HOST",
"]",
"port",
"=",
"entry",
".",
"data",
"[",
"CONF_PORT",
"]",
"coolmaster",
"=",
"CoolMasterNet",
"(",
"host",
",",
"port",
")",
"try",
":",
"info",
"=",
"await",
"coolmaster",
".",
"info",
"(",
")",
"if",
"not",
"info",
":",
"raise",
"ConfigEntryNotReady",
"except",
"(",
"OSError",
",",
"ConnectionRefusedError",
",",
"TimeoutError",
")",
"as",
"error",
":",
"raise",
"ConfigEntryNotReady",
"(",
")",
"from",
"error",
"coordinator",
"=",
"CoolmasterDataUpdateCoordinator",
"(",
"hass",
",",
"coolmaster",
")",
"await",
"coordinator",
".",
"async_refresh",
"(",
")",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"=",
"{",
"DATA_INFO",
":",
"info",
",",
"DATA_COORDINATOR",
":",
"coordinator",
",",
"}",
"hass",
".",
"async_create_task",
"(",
"hass",
".",
"config_entries",
".",
"async_forward_entry_setup",
"(",
"entry",
",",
"\"climate\"",
")",
")",
"return",
"True"
] | [
21,
0
] | [
42,
15
] | python | en | ['en', 'en', 'en'] | True |
async_unload_entry | (hass, entry) | Unload a Coolmaster config entry. | Unload a Coolmaster config entry. | async def async_unload_entry(hass, entry):
"""Unload a Coolmaster config entry."""
unload_ok = await hass.config_entries.async_forward_entry_unload(entry, "climate")
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok | [
"async",
"def",
"async_unload_entry",
"(",
"hass",
",",
"entry",
")",
":",
"unload_ok",
"=",
"await",
"hass",
".",
"config_entries",
".",
"async_forward_entry_unload",
"(",
"entry",
",",
"\"climate\"",
")",
"if",
"unload_ok",
":",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
".",
"pop",
"(",
"entry",
".",
"entry_id",
")",
"return",
"unload_ok"
] | [
45,
0
] | [
52,
20
] | python | en | ['en', 'en', 'en'] | True |
CoolmasterDataUpdateCoordinator.__init__ | (self, hass, coolmaster) | Initialize global Coolmaster data updater. | Initialize global Coolmaster data updater. | def __init__(self, hass, coolmaster):
"""Initialize global Coolmaster data updater."""
self._coolmaster = coolmaster
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
) | [
"def",
"__init__",
"(",
"self",
",",
"hass",
",",
"coolmaster",
")",
":",
"self",
".",
"_coolmaster",
"=",
"coolmaster",
"super",
"(",
")",
".",
"__init__",
"(",
"hass",
",",
"_LOGGER",
",",
"name",
"=",
"DOMAIN",
",",
"update_interval",
"=",
"SCAN_INTERVAL",
",",
")"
] | [
58,
4
] | [
67,
9
] | python | en | ['tr', 'en', 'en'] | True |
CoolmasterDataUpdateCoordinator._async_update_data | (self) | Fetch data from Coolmaster. | Fetch data from Coolmaster. | async def _async_update_data(self):
"""Fetch data from Coolmaster."""
try:
return await self._coolmaster.status()
except (OSError, ConnectionRefusedError, TimeoutError) as error:
raise UpdateFailed from error | [
"async",
"def",
"_async_update_data",
"(",
"self",
")",
":",
"try",
":",
"return",
"await",
"self",
".",
"_coolmaster",
".",
"status",
"(",
")",
"except",
"(",
"OSError",
",",
"ConnectionRefusedError",
",",
"TimeoutError",
")",
"as",
"error",
":",
"raise",
"UpdateFailed",
"from",
"error"
] | [
69,
4
] | [
74,
41
] | python | en | ['en', 'en', 'en'] | True |
prepare_backend | (model_name, backend_name, im_size: List[int] = None,
max_batch_size: int = 1,
force_fp16: bool = False,
download_model: bool = True,
config: Configs = None) |
Check if ONNX, MXNet and TensorRT models exist and download/create them otherwise.
:param model_name: Name of required model. Must be one of keys in `models` dict.
:param backend_name: Name of inference backend. (onnx, trt)
:param im_size: Desired maximum size of image in W,H form. Will be overridden if model doesn't support reshaping.
:param max_batch_size: Maximum batch size for inference, currently supported for ArcFace model only.
:param force_fp16: Force use of FP16 precision, even if device doesn't support it. Be careful. TensorRT specific.
:param download_model: Download MXNet or ONNX model if it not exist.
:param config: Configs class instance
:return: ONNX model serialized to string, or path to TensorRT engine
|
Check if ONNX, MXNet and TensorRT models exist and download/create them otherwise. | def prepare_backend(model_name, backend_name, im_size: List[int] = None,
max_batch_size: int = 1,
force_fp16: bool = False,
download_model: bool = True,
config: Configs = None):
"""
Check if ONNX, MXNet and TensorRT models exist and download/create them otherwise.
:param model_name: Name of required model. Must be one of keys in `models` dict.
:param backend_name: Name of inference backend. (onnx, trt)
:param im_size: Desired maximum size of image in W,H form. Will be overridden if model doesn't support reshaping.
:param max_batch_size: Maximum batch size for inference, currently supported for ArcFace model only.
:param force_fp16: Force use of FP16 precision, even if device doesn't support it. Be careful. TensorRT specific.
:param download_model: Download MXNet or ONNX model if it not exist.
:param config: Configs class instance
:return: ONNX model serialized to string, or path to TensorRT engine
"""
prepare_folders([config.mxnet_models_dir, config.onnx_models_dir, config.trt_engines_dir])
in_package = config.in_official_package(model_name)
reshape_allowed = config.mxnet_models[model_name].get('reshape')
shape = config.get_shape(model_name)
if reshape_allowed is True and im_size is not None:
shape = (1, 3) + tuple(im_size)[::-1]
mxnet_symbol, mxnet_params = config.get_mxnet_model_paths(model_name)
onnx_dir, onnx_path = config.build_model_paths(model_name, 'onnx')
trt_dir, trt_path = config.build_model_paths(model_name, 'plan')
if not os.path.exists(onnx_path) and download_model is True:
prepare_folders([onnx_dir])
if in_package:
print(f"Downloading model: {model_name}...")
get_model_file(model_name, root=config.mxnet_models_dir)
convert_insight_model(mxnet_symbol, mxnet_params, onnx_path, shape)
else:
dl_link = config.get_dl_link(model_name)
if dl_link:
download(config.get_dl_link(model_name), onnx_path)
remove_initializer_from_input(onnx_path, onnx_path)
elif os.path.exists(mxnet_symbol) and os.path.exists(mxnet_params):
convert_insight_model(mxnet_symbol, mxnet_params, onnx_path, shape)
else:
logging.error("You have requested non standard model, but haven't provided download link or "
"MXNet model. Place model to proper folder and change configs.py accordingly.")
if backend_name == 'onnx':
model = onnx.load(onnx_path)
if reshape_allowed is True:
logging.info(f'Reshaping ONNX inputs to: {shape}')
model = reshape(model, h=im_size[1], w=im_size[0])
return model.SerializeToString()
if backend_name == "trt":
if reshape_allowed is True:
trt_path = trt_path.replace('.plan', f'_{shape[3]}_{shape[2]}.plan')
if max_batch_size > 1:
trt_path = trt_path.replace('.plan', f'_batch{max_batch_size}.plan')
if force_fp16 is True:
trt_path = trt_path.replace('.plan', '_fp16.plan')
if not os.path.exists(trt_path):
prepare_folders([trt_dir])
if reshape_allowed is True or max_batch_size!=1:
logging.info(f'Reshaping ONNX inputs to: {shape}')
model = onnx.load(onnx_path)
onnx_batch_size = 1
if max_batch_size != 1:
onnx_batch_size = -1
reshaped = reshape(model, n=onnx_batch_size, h=shape[2], w=shape[3])
temp_onnx_model = reshaped.SerializeToString()
else:
temp_onnx_model = onnx_path
logging.info(f"Building TRT engine for {model_name}...")
convert_onnx(temp_onnx_model,
engine_file_path=trt_path,
max_batch_size=max_batch_size,
force_fp16=force_fp16)
logging.info('Building TRT engine complete!')
return trt_path | [
"def",
"prepare_backend",
"(",
"model_name",
",",
"backend_name",
",",
"im_size",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"max_batch_size",
":",
"int",
"=",
"1",
",",
"force_fp16",
":",
"bool",
"=",
"False",
",",
"download_model",
":",
"bool",
"=",
"True",
",",
"config",
":",
"Configs",
"=",
"None",
")",
":",
"prepare_folders",
"(",
"[",
"config",
".",
"mxnet_models_dir",
",",
"config",
".",
"onnx_models_dir",
",",
"config",
".",
"trt_engines_dir",
"]",
")",
"in_package",
"=",
"config",
".",
"in_official_package",
"(",
"model_name",
")",
"reshape_allowed",
"=",
"config",
".",
"mxnet_models",
"[",
"model_name",
"]",
".",
"get",
"(",
"'reshape'",
")",
"shape",
"=",
"config",
".",
"get_shape",
"(",
"model_name",
")",
"if",
"reshape_allowed",
"is",
"True",
"and",
"im_size",
"is",
"not",
"None",
":",
"shape",
"=",
"(",
"1",
",",
"3",
")",
"+",
"tuple",
"(",
"im_size",
")",
"[",
":",
":",
"-",
"1",
"]",
"mxnet_symbol",
",",
"mxnet_params",
"=",
"config",
".",
"get_mxnet_model_paths",
"(",
"model_name",
")",
"onnx_dir",
",",
"onnx_path",
"=",
"config",
".",
"build_model_paths",
"(",
"model_name",
",",
"'onnx'",
")",
"trt_dir",
",",
"trt_path",
"=",
"config",
".",
"build_model_paths",
"(",
"model_name",
",",
"'plan'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"onnx_path",
")",
"and",
"download_model",
"is",
"True",
":",
"prepare_folders",
"(",
"[",
"onnx_dir",
"]",
")",
"if",
"in_package",
":",
"print",
"(",
"f\"Downloading model: {model_name}...\"",
")",
"get_model_file",
"(",
"model_name",
",",
"root",
"=",
"config",
".",
"mxnet_models_dir",
")",
"convert_insight_model",
"(",
"mxnet_symbol",
",",
"mxnet_params",
",",
"onnx_path",
",",
"shape",
")",
"else",
":",
"dl_link",
"=",
"config",
".",
"get_dl_link",
"(",
"model_name",
")",
"if",
"dl_link",
":",
"download",
"(",
"config",
".",
"get_dl_link",
"(",
"model_name",
")",
",",
"onnx_path",
")",
"remove_initializer_from_input",
"(",
"onnx_path",
",",
"onnx_path",
")",
"elif",
"os",
".",
"path",
".",
"exists",
"(",
"mxnet_symbol",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"mxnet_params",
")",
":",
"convert_insight_model",
"(",
"mxnet_symbol",
",",
"mxnet_params",
",",
"onnx_path",
",",
"shape",
")",
"else",
":",
"logging",
".",
"error",
"(",
"\"You have requested non standard model, but haven't provided download link or \"",
"\"MXNet model. Place model to proper folder and change configs.py accordingly.\"",
")",
"if",
"backend_name",
"==",
"'onnx'",
":",
"model",
"=",
"onnx",
".",
"load",
"(",
"onnx_path",
")",
"if",
"reshape_allowed",
"is",
"True",
":",
"logging",
".",
"info",
"(",
"f'Reshaping ONNX inputs to: {shape}'",
")",
"model",
"=",
"reshape",
"(",
"model",
",",
"h",
"=",
"im_size",
"[",
"1",
"]",
",",
"w",
"=",
"im_size",
"[",
"0",
"]",
")",
"return",
"model",
".",
"SerializeToString",
"(",
")",
"if",
"backend_name",
"==",
"\"trt\"",
":",
"if",
"reshape_allowed",
"is",
"True",
":",
"trt_path",
"=",
"trt_path",
".",
"replace",
"(",
"'.plan'",
",",
"f'_{shape[3]}_{shape[2]}.plan'",
")",
"if",
"max_batch_size",
">",
"1",
":",
"trt_path",
"=",
"trt_path",
".",
"replace",
"(",
"'.plan'",
",",
"f'_batch{max_batch_size}.plan'",
")",
"if",
"force_fp16",
"is",
"True",
":",
"trt_path",
"=",
"trt_path",
".",
"replace",
"(",
"'.plan'",
",",
"'_fp16.plan'",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"trt_path",
")",
":",
"prepare_folders",
"(",
"[",
"trt_dir",
"]",
")",
"if",
"reshape_allowed",
"is",
"True",
"or",
"max_batch_size",
"!=",
"1",
":",
"logging",
".",
"info",
"(",
"f'Reshaping ONNX inputs to: {shape}'",
")",
"model",
"=",
"onnx",
".",
"load",
"(",
"onnx_path",
")",
"onnx_batch_size",
"=",
"1",
"if",
"max_batch_size",
"!=",
"1",
":",
"onnx_batch_size",
"=",
"-",
"1",
"reshaped",
"=",
"reshape",
"(",
"model",
",",
"n",
"=",
"onnx_batch_size",
",",
"h",
"=",
"shape",
"[",
"2",
"]",
",",
"w",
"=",
"shape",
"[",
"3",
"]",
")",
"temp_onnx_model",
"=",
"reshaped",
".",
"SerializeToString",
"(",
")",
"else",
":",
"temp_onnx_model",
"=",
"onnx_path",
"logging",
".",
"info",
"(",
"f\"Building TRT engine for {model_name}...\"",
")",
"convert_onnx",
"(",
"temp_onnx_model",
",",
"engine_file_path",
"=",
"trt_path",
",",
"max_batch_size",
"=",
"max_batch_size",
",",
"force_fp16",
"=",
"force_fp16",
")",
"logging",
".",
"info",
"(",
"'Building TRT engine complete!'",
")",
"return",
"trt_path"
] | [
48,
0
] | [
131,
23
] | python | en | ['en', 'error', 'th'] | False |
get_model | (model_name: str, backend_name: str, im_size: List[int] = None, max_batch_size: int = 1, force_fp16: bool = False,
root_dir: str = "/models", download_model: bool = True, **kwargs) |
Returns inference backend instance with loaded model.
:param model_name: Name of required model. Must be one of keys in `models` dict.
:param backend_name: Name of inference backend. (onnx, mxnet, trt)
:param im_size: Desired maximum size of image in W,H form. Will be overridden if model doesn't support reshaping.
:param max_batch_size: Maximum batch size for inference, currently supported for ArcFace model only.
:param force_fp16: Force use of FP16 precision, even if device doesn't support it. Be careful. TensorRT specific.
:param root_dir: Root directory where models will be stored.
:param download_model: Download MXNet or ONNX model. Might be disabled if TRT model was already created.
:param kwargs: Placeholder.
:return: Inference backend with loaded model.
|
Returns inference backend instance with loaded model. | def get_model(model_name: str, backend_name: str, im_size: List[int] = None, max_batch_size: int = 1, force_fp16: bool = False,
root_dir: str = "/models", download_model: bool = True, **kwargs):
"""
Returns inference backend instance with loaded model.
:param model_name: Name of required model. Must be one of keys in `models` dict.
:param backend_name: Name of inference backend. (onnx, mxnet, trt)
:param im_size: Desired maximum size of image in W,H form. Will be overridden if model doesn't support reshaping.
:param max_batch_size: Maximum batch size for inference, currently supported for ArcFace model only.
:param force_fp16: Force use of FP16 precision, even if device doesn't support it. Be careful. TensorRT specific.
:param root_dir: Root directory where models will be stored.
:param download_model: Download MXNet or ONNX model. Might be disabled if TRT model was already created.
:param kwargs: Placeholder.
:return: Inference backend with loaded model.
"""
config = Configs(models_dir=root_dir)
backends = {
'onnx': onnx_backend,
'trt': trt_backend,
'mxnet': 'mxnet',
'triton': triton_backend
}
if backend_name not in backends:
logging.error(f"Unknown backend '{backend_name}' specified. Exiting.")
exit(1)
if model_name not in models:
logging.error(f"Unknown model {model_name} specified."
f" Please select one of the following:\n"
f"{', '.join(list(models.keys()))}")
exit(1)
# Keep original InsightFace package available for a while for testing purposes.
if backend_name == 'mxnet':
return get_model_orig(model_name, root=config.mxnet_models_dir)
backend = backends[backend_name]
model_path = prepare_backend(model_name, backend_name, im_size=im_size, max_batch_size=max_batch_size, config=config, force_fp16=force_fp16,
download_model=download_model)
outputs = config.get_outputs_order(model_name)
model = models[model_name](model_path=model_path, backend=backend, outputs=outputs)
return model | [
"def",
"get_model",
"(",
"model_name",
":",
"str",
",",
"backend_name",
":",
"str",
",",
"im_size",
":",
"List",
"[",
"int",
"]",
"=",
"None",
",",
"max_batch_size",
":",
"int",
"=",
"1",
",",
"force_fp16",
":",
"bool",
"=",
"False",
",",
"root_dir",
":",
"str",
"=",
"\"/models\"",
",",
"download_model",
":",
"bool",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"config",
"=",
"Configs",
"(",
"models_dir",
"=",
"root_dir",
")",
"backends",
"=",
"{",
"'onnx'",
":",
"onnx_backend",
",",
"'trt'",
":",
"trt_backend",
",",
"'mxnet'",
":",
"'mxnet'",
",",
"'triton'",
":",
"triton_backend",
"}",
"if",
"backend_name",
"not",
"in",
"backends",
":",
"logging",
".",
"error",
"(",
"f\"Unknown backend '{backend_name}' specified. Exiting.\"",
")",
"exit",
"(",
"1",
")",
"if",
"model_name",
"not",
"in",
"models",
":",
"logging",
".",
"error",
"(",
"f\"Unknown model {model_name} specified.\"",
"f\" Please select one of the following:\\n\"",
"f\"{', '.join(list(models.keys()))}\"",
")",
"exit",
"(",
"1",
")",
"# Keep original InsightFace package available for a while for testing purposes.",
"if",
"backend_name",
"==",
"'mxnet'",
":",
"return",
"get_model_orig",
"(",
"model_name",
",",
"root",
"=",
"config",
".",
"mxnet_models_dir",
")",
"backend",
"=",
"backends",
"[",
"backend_name",
"]",
"model_path",
"=",
"prepare_backend",
"(",
"model_name",
",",
"backend_name",
",",
"im_size",
"=",
"im_size",
",",
"max_batch_size",
"=",
"max_batch_size",
",",
"config",
"=",
"config",
",",
"force_fp16",
"=",
"force_fp16",
",",
"download_model",
"=",
"download_model",
")",
"outputs",
"=",
"config",
".",
"get_outputs_order",
"(",
"model_name",
")",
"model",
"=",
"models",
"[",
"model_name",
"]",
"(",
"model_path",
"=",
"model_path",
",",
"backend",
"=",
"backend",
",",
"outputs",
"=",
"outputs",
")",
"return",
"model"
] | [
134,
0
] | [
180,
16
] | python | en | ['en', 'error', 'th'] | False |
async_setup_platform | (hass, config, async_add_entities, discovery_info=None) | Set up DSMR Reader sensors. | Set up DSMR Reader sensors. | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up DSMR Reader sensors."""
sensors = []
for topic in DEFINITIONS:
sensors.append(DSMRSensor(topic))
async_add_entities(sensors) | [
"async",
"def",
"async_setup_platform",
"(",
"hass",
",",
"config",
",",
"async_add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"sensors",
"=",
"[",
"]",
"for",
"topic",
"in",
"DEFINITIONS",
":",
"sensors",
".",
"append",
"(",
"DSMRSensor",
"(",
"topic",
")",
")",
"async_add_entities",
"(",
"sensors",
")"
] | [
11,
0
] | [
18,
31
] | python | en | ['en', 'da', 'en'] | True |
DSMRSensor.__init__ | (self, topic) | Initialize the sensor. | Initialize the sensor. | def __init__(self, topic):
"""Initialize the sensor."""
self._definition = DEFINITIONS[topic]
self._entity_id = slugify(topic.replace("/", "_"))
self._topic = topic
self._name = self._definition.get("name", topic.split("/")[-1])
self._device_class = self._definition.get("device_class")
self._enable_default = self._definition.get("enable_default")
self._unit_of_measurement = self._definition.get("unit")
self._icon = self._definition.get("icon")
self._transform = self._definition.get("transform")
self._state = None | [
"def",
"__init__",
"(",
"self",
",",
"topic",
")",
":",
"self",
".",
"_definition",
"=",
"DEFINITIONS",
"[",
"topic",
"]",
"self",
".",
"_entity_id",
"=",
"slugify",
"(",
"topic",
".",
"replace",
"(",
"\"/\"",
",",
"\"_\"",
")",
")",
"self",
".",
"_topic",
"=",
"topic",
"self",
".",
"_name",
"=",
"self",
".",
"_definition",
".",
"get",
"(",
"\"name\"",
",",
"topic",
".",
"split",
"(",
"\"/\"",
")",
"[",
"-",
"1",
"]",
")",
"self",
".",
"_device_class",
"=",
"self",
".",
"_definition",
".",
"get",
"(",
"\"device_class\"",
")",
"self",
".",
"_enable_default",
"=",
"self",
".",
"_definition",
".",
"get",
"(",
"\"enable_default\"",
")",
"self",
".",
"_unit_of_measurement",
"=",
"self",
".",
"_definition",
".",
"get",
"(",
"\"unit\"",
")",
"self",
".",
"_icon",
"=",
"self",
".",
"_definition",
".",
"get",
"(",
"\"icon\"",
")",
"self",
".",
"_transform",
"=",
"self",
".",
"_definition",
".",
"get",
"(",
"\"transform\"",
")",
"self",
".",
"_state",
"=",
"None"
] | [
24,
4
] | [
38,
26
] | python | en | ['en', 'en', 'en'] | True |
DSMRSensor.async_added_to_hass | (self) | Subscribe to MQTT events. | Subscribe to MQTT events. | async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
@callback
def message_received(message):
"""Handle new MQTT messages."""
if self._transform is not None:
self._state = self._transform(message.payload)
else:
self._state = message.payload
self.async_write_ha_state()
await mqtt.async_subscribe(self.hass, self._topic, message_received, 1) | [
"async",
"def",
"async_added_to_hass",
"(",
"self",
")",
":",
"@",
"callback",
"def",
"message_received",
"(",
"message",
")",
":",
"\"\"\"Handle new MQTT messages.\"\"\"",
"if",
"self",
".",
"_transform",
"is",
"not",
"None",
":",
"self",
".",
"_state",
"=",
"self",
".",
"_transform",
"(",
"message",
".",
"payload",
")",
"else",
":",
"self",
".",
"_state",
"=",
"message",
".",
"payload",
"self",
".",
"async_write_ha_state",
"(",
")",
"await",
"mqtt",
".",
"async_subscribe",
"(",
"self",
".",
"hass",
",",
"self",
".",
"_topic",
",",
"message_received",
",",
"1",
")"
] | [
40,
4
] | [
54,
79
] | python | en | ['en', 'en', 'en'] | True |
DSMRSensor.name | (self) | Return the name of the sensor supplied in constructor. | Return the name of the sensor supplied in constructor. | def name(self):
"""Return the name of the sensor supplied in constructor."""
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] | [
57,
4
] | [
59,
25
] | python | en | ['en', 'en', 'en'] | True |
DSMRSensor.entity_id | (self) | Return the entity ID for this sensor. | Return the entity ID for this sensor. | def entity_id(self):
"""Return the entity ID for this sensor."""
return f"sensor.{self._entity_id}" | [
"def",
"entity_id",
"(",
"self",
")",
":",
"return",
"f\"sensor.{self._entity_id}\""
] | [
62,
4
] | [
64,
42
] | python | en | ['en', 'en', 'en'] | True |
DSMRSensor.state | (self) | Return the current state of the entity. | Return the current state of the entity. | def state(self):
"""Return the current state of the entity."""
return self._state | [
"def",
"state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_state"
] | [
67,
4
] | [
69,
26
] | python | en | ['en', 'en', 'en'] | True |
DSMRSensor.device_class | (self) | Return the device_class of this sensor. | Return the device_class of this sensor. | def device_class(self):
"""Return the device_class of this sensor."""
return self._device_class | [
"def",
"device_class",
"(",
"self",
")",
":",
"return",
"self",
".",
"_device_class"
] | [
72,
4
] | [
74,
33
] | python | en | ['en', 'en', 'en'] | True |
DSMRSensor.unit_of_measurement | (self) | Return the unit_of_measurement of this sensor. | Return the unit_of_measurement of this sensor. | def unit_of_measurement(self):
"""Return the unit_of_measurement of this sensor."""
return self._unit_of_measurement | [
"def",
"unit_of_measurement",
"(",
"self",
")",
":",
"return",
"self",
".",
"_unit_of_measurement"
] | [
77,
4
] | [
79,
40
] | python | en | ['en', 'id', 'en'] | True |
DSMRSensor.entity_registry_enabled_default | (self) | Return if the entity should be enabled when first added to the entity registry. | Return if the entity should be enabled when first added to the entity registry. | def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enable_default | [
"def",
"entity_registry_enabled_default",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"_enable_default"
] | [
82,
4
] | [
84,
35
] | python | en | ['en', 'en', 'en'] | True |
DSMRSensor.icon | (self) | Return the icon of this sensor. | Return the icon of this sensor. | def icon(self):
"""Return the icon of this sensor."""
return self._icon | [
"def",
"icon",
"(",
"self",
")",
":",
"return",
"self",
".",
"_icon"
] | [
87,
4
] | [
89,
25
] | python | en | ['en', 'en', 'en'] | True |
test_better_snakecase | (value, expected) | Test that better snakecase works better. | Test that better snakecase works better. | def test_better_snakecase(value, expected):
"""Test that better snakecase works better."""
assert device_tracker._better_snakecase(value) == expected | [
"def",
"test_better_snakecase",
"(",
"value",
",",
"expected",
")",
":",
"assert",
"device_tracker",
".",
"_better_snakecase",
"(",
"value",
")",
"==",
"expected"
] | [
17,
0
] | [
19,
62
] | python | en | ['en', 'no', 'en'] | True |
async_setup_platform | (
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) | Set up MQTT fan through configuration.yaml. | Set up MQTT fan through configuration.yaml. | async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT fan through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, config, async_add_entities) | [
"async",
"def",
"async_setup_platform",
"(",
"hass",
":",
"HomeAssistantType",
",",
"config",
":",
"ConfigType",
",",
"async_add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"await",
"async_setup_reload_service",
"(",
"hass",
",",
"DOMAIN",
",",
"PLATFORMS",
")",
"await",
"_async_setup_entity",
"(",
"hass",
",",
"config",
",",
"async_add_entities",
")"
] | [
112,
0
] | [
117,
63
] | python | en | ['en', 'ny', 'en'] | True |
async_setup_entry | (hass, config_entry, async_add_entities) | Set up MQTT fan dynamically through MQTT discovery. | Set up MQTT fan dynamically through MQTT discovery. | async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT fan dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT fan."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
hass, config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(fan.DOMAIN, "mqtt"), async_discover
) | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
",",
"config_entry",
",",
"async_add_entities",
")",
":",
"async",
"def",
"async_discover",
"(",
"discovery_payload",
")",
":",
"\"\"\"Discover and add a MQTT fan.\"\"\"",
"discovery_data",
"=",
"discovery_payload",
".",
"discovery_data",
"try",
":",
"config",
"=",
"PLATFORM_SCHEMA",
"(",
"discovery_payload",
")",
"await",
"_async_setup_entity",
"(",
"hass",
",",
"config",
",",
"async_add_entities",
",",
"config_entry",
",",
"discovery_data",
")",
"except",
"Exception",
":",
"clear_discovery_hash",
"(",
"hass",
",",
"discovery_data",
"[",
"ATTR_DISCOVERY_HASH",
"]",
")",
"raise",
"async_dispatcher_connect",
"(",
"hass",
",",
"MQTT_DISCOVERY_NEW",
".",
"format",
"(",
"fan",
".",
"DOMAIN",
",",
"\"mqtt\"",
")",
",",
"async_discover",
")"
] | [
120,
0
] | [
137,
5
] | python | en | ['en', 'lb', 'en'] | True |
_async_setup_entity | (
hass, config, async_add_entities, config_entry=None, discovery_data=None
) | Set up the MQTT fan. | Set up the MQTT fan. | async def _async_setup_entity(
hass, config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up the MQTT fan."""
async_add_entities([MqttFan(hass, config, config_entry, discovery_data)]) | [
"async",
"def",
"_async_setup_entity",
"(",
"hass",
",",
"config",
",",
"async_add_entities",
",",
"config_entry",
"=",
"None",
",",
"discovery_data",
"=",
"None",
")",
":",
"async_add_entities",
"(",
"[",
"MqttFan",
"(",
"hass",
",",
"config",
",",
"config_entry",
",",
"discovery_data",
")",
"]",
")"
] | [
140,
0
] | [
144,
77
] | python | en | ['en', 'fy', 'en'] | True |
MqttFan.__init__ | (self, hass, config, config_entry, discovery_data) | Initialize the MQTT fan. | Initialize the MQTT fan. | def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the MQTT fan."""
self.hass = hass
self._unique_id = config.get(CONF_UNIQUE_ID)
self._state = False
self._speed = None
self._oscillation = None
self._supported_features = 0
self._sub_state = None
self._topic = None
self._payload = None
self._templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_speed = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry) | [
"def",
"__init__",
"(",
"self",
",",
"hass",
",",
"config",
",",
"config_entry",
",",
"discovery_data",
")",
":",
"self",
".",
"hass",
"=",
"hass",
"self",
".",
"_unique_id",
"=",
"config",
".",
"get",
"(",
"CONF_UNIQUE_ID",
")",
"self",
".",
"_state",
"=",
"False",
"self",
".",
"_speed",
"=",
"None",
"self",
".",
"_oscillation",
"=",
"None",
"self",
".",
"_supported_features",
"=",
"0",
"self",
".",
"_sub_state",
"=",
"None",
"self",
".",
"_topic",
"=",
"None",
"self",
".",
"_payload",
"=",
"None",
"self",
".",
"_templates",
"=",
"None",
"self",
".",
"_optimistic",
"=",
"None",
"self",
".",
"_optimistic_oscillation",
"=",
"None",
"self",
".",
"_optimistic_speed",
"=",
"None",
"# Load config",
"self",
".",
"_setup_from_config",
"(",
"config",
")",
"device_config",
"=",
"config",
".",
"get",
"(",
"CONF_DEVICE",
")",
"MqttAttributes",
".",
"__init__",
"(",
"self",
",",
"config",
")",
"MqttAvailability",
".",
"__init__",
"(",
"self",
",",
"config",
")",
"MqttDiscoveryUpdate",
".",
"__init__",
"(",
"self",
",",
"discovery_data",
",",
"self",
".",
"discovery_update",
")",
"MqttEntityDeviceInfo",
".",
"__init__",
"(",
"self",
",",
"device_config",
",",
"config_entry",
")"
] | [
156,
4
] | [
181,
72
] | python | en | ['en', 'fy', 'en'] | True |
MqttFan.async_added_to_hass | (self) | Subscribe to MQTT events. | Subscribe to MQTT events. | async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics() | [
"async",
"def",
"async_added_to_hass",
"(",
"self",
")",
":",
"await",
"super",
"(",
")",
".",
"async_added_to_hass",
"(",
")",
"await",
"self",
".",
"_subscribe_topics",
"(",
")"
] | [
183,
4
] | [
186,
38
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.discovery_update | (self, discovery_payload) | Handle updated discovery message. | Handle updated discovery message. | async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state() | [
"async",
"def",
"discovery_update",
"(",
"self",
",",
"discovery_payload",
")",
":",
"config",
"=",
"PLATFORM_SCHEMA",
"(",
"discovery_payload",
")",
"self",
".",
"_setup_from_config",
"(",
"config",
")",
"await",
"self",
".",
"attributes_discovery_update",
"(",
"config",
")",
"await",
"self",
".",
"availability_discovery_update",
"(",
"config",
")",
"await",
"self",
".",
"device_info_discovery_update",
"(",
"config",
")",
"await",
"self",
".",
"_subscribe_topics",
"(",
")",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
188,
4
] | [
196,
35
] | python | en | ['en', 'en', 'en'] | True |
MqttFan._setup_from_config | (self, config) | (Re)Setup the entity. | (Re)Setup the entity. | def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topic = {
key: config.get(key)
for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_SPEED_STATE_TOPIC,
CONF_SPEED_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_SPEED: config.get(CONF_SPEED_VALUE_TEMPLATE),
OSCILLATION: config.get(CONF_OSCILLATION_VALUE_TEMPLATE),
}
self._payload = {
"STATE_ON": config[CONF_PAYLOAD_ON],
"STATE_OFF": config[CONF_PAYLOAD_OFF],
"OSCILLATE_ON_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_ON],
"OSCILLATE_OFF_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_OFF],
"SPEED_LOW": config[CONF_PAYLOAD_LOW_SPEED],
"SPEED_MEDIUM": config[CONF_PAYLOAD_MEDIUM_SPEED],
"SPEED_HIGH": config[CONF_PAYLOAD_HIGH_SPEED],
"SPEED_OFF": config[CONF_PAYLOAD_OFF_SPEED],
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None
)
self._optimistic_speed = (
optimistic or self._topic[CONF_SPEED_STATE_TOPIC] is None
)
self._supported_features = 0
self._supported_features |= (
self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is not None
and SUPPORT_OSCILLATE
)
self._supported_features |= (
self._topic[CONF_SPEED_COMMAND_TOPIC] is not None and SUPPORT_SET_SPEED
)
for key, tpl in list(self._templates.items()):
if tpl is None:
self._templates[key] = lambda value: value
else:
tpl.hass = self.hass
self._templates[key] = tpl.async_render_with_possible_json_value | [
"def",
"_setup_from_config",
"(",
"self",
",",
"config",
")",
":",
"self",
".",
"_config",
"=",
"config",
"self",
".",
"_topic",
"=",
"{",
"key",
":",
"config",
".",
"get",
"(",
"key",
")",
"for",
"key",
"in",
"(",
"CONF_STATE_TOPIC",
",",
"CONF_COMMAND_TOPIC",
",",
"CONF_SPEED_STATE_TOPIC",
",",
"CONF_SPEED_COMMAND_TOPIC",
",",
"CONF_OSCILLATION_STATE_TOPIC",
",",
"CONF_OSCILLATION_COMMAND_TOPIC",
",",
")",
"}",
"self",
".",
"_templates",
"=",
"{",
"CONF_STATE",
":",
"config",
".",
"get",
"(",
"CONF_STATE_VALUE_TEMPLATE",
")",
",",
"ATTR_SPEED",
":",
"config",
".",
"get",
"(",
"CONF_SPEED_VALUE_TEMPLATE",
")",
",",
"OSCILLATION",
":",
"config",
".",
"get",
"(",
"CONF_OSCILLATION_VALUE_TEMPLATE",
")",
",",
"}",
"self",
".",
"_payload",
"=",
"{",
"\"STATE_ON\"",
":",
"config",
"[",
"CONF_PAYLOAD_ON",
"]",
",",
"\"STATE_OFF\"",
":",
"config",
"[",
"CONF_PAYLOAD_OFF",
"]",
",",
"\"OSCILLATE_ON_PAYLOAD\"",
":",
"config",
"[",
"CONF_PAYLOAD_OSCILLATION_ON",
"]",
",",
"\"OSCILLATE_OFF_PAYLOAD\"",
":",
"config",
"[",
"CONF_PAYLOAD_OSCILLATION_OFF",
"]",
",",
"\"SPEED_LOW\"",
":",
"config",
"[",
"CONF_PAYLOAD_LOW_SPEED",
"]",
",",
"\"SPEED_MEDIUM\"",
":",
"config",
"[",
"CONF_PAYLOAD_MEDIUM_SPEED",
"]",
",",
"\"SPEED_HIGH\"",
":",
"config",
"[",
"CONF_PAYLOAD_HIGH_SPEED",
"]",
",",
"\"SPEED_OFF\"",
":",
"config",
"[",
"CONF_PAYLOAD_OFF_SPEED",
"]",
",",
"}",
"optimistic",
"=",
"config",
"[",
"CONF_OPTIMISTIC",
"]",
"self",
".",
"_optimistic",
"=",
"optimistic",
"or",
"self",
".",
"_topic",
"[",
"CONF_STATE_TOPIC",
"]",
"is",
"None",
"self",
".",
"_optimistic_oscillation",
"=",
"(",
"optimistic",
"or",
"self",
".",
"_topic",
"[",
"CONF_OSCILLATION_STATE_TOPIC",
"]",
"is",
"None",
")",
"self",
".",
"_optimistic_speed",
"=",
"(",
"optimistic",
"or",
"self",
".",
"_topic",
"[",
"CONF_SPEED_STATE_TOPIC",
"]",
"is",
"None",
")",
"self",
".",
"_supported_features",
"=",
"0",
"self",
".",
"_supported_features",
"|=",
"(",
"self",
".",
"_topic",
"[",
"CONF_OSCILLATION_COMMAND_TOPIC",
"]",
"is",
"not",
"None",
"and",
"SUPPORT_OSCILLATE",
")",
"self",
".",
"_supported_features",
"|=",
"(",
"self",
".",
"_topic",
"[",
"CONF_SPEED_COMMAND_TOPIC",
"]",
"is",
"not",
"None",
"and",
"SUPPORT_SET_SPEED",
")",
"for",
"key",
",",
"tpl",
"in",
"list",
"(",
"self",
".",
"_templates",
".",
"items",
"(",
")",
")",
":",
"if",
"tpl",
"is",
"None",
":",
"self",
".",
"_templates",
"[",
"key",
"]",
"=",
"lambda",
"value",
":",
"value",
"else",
":",
"tpl",
".",
"hass",
"=",
"self",
".",
"hass",
"self",
".",
"_templates",
"[",
"key",
"]",
"=",
"tpl",
".",
"async_render_with_possible_json_value"
] | [
198,
4
] | [
250,
80
] | python | en | ['en', 'haw', 'en'] | True |
MqttFan._subscribe_topics | (self) | (Re)Subscribe to topics. | (Re)Subscribe to topics. | async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[CONF_STATE](msg.payload)
if payload == self._payload["STATE_ON"]:
self._state = True
elif payload == self._payload["STATE_OFF"]:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def speed_received(msg):
"""Handle new received MQTT message for the speed."""
payload = self._templates[ATTR_SPEED](msg.payload)
if payload == self._payload["SPEED_LOW"]:
self._speed = SPEED_LOW
elif payload == self._payload["SPEED_MEDIUM"]:
self._speed = SPEED_MEDIUM
elif payload == self._payload["SPEED_HIGH"]:
self._speed = SPEED_HIGH
elif payload == self._payload["SPEED_OFF"]:
self._speed = SPEED_OFF
self.async_write_ha_state()
if self._topic[CONF_SPEED_STATE_TOPIC] is not None:
topics[CONF_SPEED_STATE_TOPIC] = {
"topic": self._topic[CONF_SPEED_STATE_TOPIC],
"msg_callback": speed_received,
"qos": self._config[CONF_QOS],
}
self._speed = SPEED_OFF
@callback
@log_messages(self.hass, self.entity_id)
def oscillation_received(msg):
"""Handle new received MQTT message for the oscillation."""
payload = self._templates[OSCILLATION](msg.payload)
if payload == self._payload["OSCILLATE_ON_PAYLOAD"]:
self._oscillation = True
elif payload == self._payload["OSCILLATE_OFF_PAYLOAD"]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
"topic": self._topic[CONF_OSCILLATION_STATE_TOPIC],
"msg_callback": oscillation_received,
"qos": self._config[CONF_QOS],
}
self._oscillation = False
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
) | [
"async",
"def",
"_subscribe_topics",
"(",
"self",
")",
":",
"topics",
"=",
"{",
"}",
"@",
"callback",
"@",
"log_messages",
"(",
"self",
".",
"hass",
",",
"self",
".",
"entity_id",
")",
"def",
"state_received",
"(",
"msg",
")",
":",
"\"\"\"Handle new received MQTT message.\"\"\"",
"payload",
"=",
"self",
".",
"_templates",
"[",
"CONF_STATE",
"]",
"(",
"msg",
".",
"payload",
")",
"if",
"payload",
"==",
"self",
".",
"_payload",
"[",
"\"STATE_ON\"",
"]",
":",
"self",
".",
"_state",
"=",
"True",
"elif",
"payload",
"==",
"self",
".",
"_payload",
"[",
"\"STATE_OFF\"",
"]",
":",
"self",
".",
"_state",
"=",
"False",
"self",
".",
"async_write_ha_state",
"(",
")",
"if",
"self",
".",
"_topic",
"[",
"CONF_STATE_TOPIC",
"]",
"is",
"not",
"None",
":",
"topics",
"[",
"CONF_STATE_TOPIC",
"]",
"=",
"{",
"\"topic\"",
":",
"self",
".",
"_topic",
"[",
"CONF_STATE_TOPIC",
"]",
",",
"\"msg_callback\"",
":",
"state_received",
",",
"\"qos\"",
":",
"self",
".",
"_config",
"[",
"CONF_QOS",
"]",
",",
"}",
"@",
"callback",
"@",
"log_messages",
"(",
"self",
".",
"hass",
",",
"self",
".",
"entity_id",
")",
"def",
"speed_received",
"(",
"msg",
")",
":",
"\"\"\"Handle new received MQTT message for the speed.\"\"\"",
"payload",
"=",
"self",
".",
"_templates",
"[",
"ATTR_SPEED",
"]",
"(",
"msg",
".",
"payload",
")",
"if",
"payload",
"==",
"self",
".",
"_payload",
"[",
"\"SPEED_LOW\"",
"]",
":",
"self",
".",
"_speed",
"=",
"SPEED_LOW",
"elif",
"payload",
"==",
"self",
".",
"_payload",
"[",
"\"SPEED_MEDIUM\"",
"]",
":",
"self",
".",
"_speed",
"=",
"SPEED_MEDIUM",
"elif",
"payload",
"==",
"self",
".",
"_payload",
"[",
"\"SPEED_HIGH\"",
"]",
":",
"self",
".",
"_speed",
"=",
"SPEED_HIGH",
"elif",
"payload",
"==",
"self",
".",
"_payload",
"[",
"\"SPEED_OFF\"",
"]",
":",
"self",
".",
"_speed",
"=",
"SPEED_OFF",
"self",
".",
"async_write_ha_state",
"(",
")",
"if",
"self",
".",
"_topic",
"[",
"CONF_SPEED_STATE_TOPIC",
"]",
"is",
"not",
"None",
":",
"topics",
"[",
"CONF_SPEED_STATE_TOPIC",
"]",
"=",
"{",
"\"topic\"",
":",
"self",
".",
"_topic",
"[",
"CONF_SPEED_STATE_TOPIC",
"]",
",",
"\"msg_callback\"",
":",
"speed_received",
",",
"\"qos\"",
":",
"self",
".",
"_config",
"[",
"CONF_QOS",
"]",
",",
"}",
"self",
".",
"_speed",
"=",
"SPEED_OFF",
"@",
"callback",
"@",
"log_messages",
"(",
"self",
".",
"hass",
",",
"self",
".",
"entity_id",
")",
"def",
"oscillation_received",
"(",
"msg",
")",
":",
"\"\"\"Handle new received MQTT message for the oscillation.\"\"\"",
"payload",
"=",
"self",
".",
"_templates",
"[",
"OSCILLATION",
"]",
"(",
"msg",
".",
"payload",
")",
"if",
"payload",
"==",
"self",
".",
"_payload",
"[",
"\"OSCILLATE_ON_PAYLOAD\"",
"]",
":",
"self",
".",
"_oscillation",
"=",
"True",
"elif",
"payload",
"==",
"self",
".",
"_payload",
"[",
"\"OSCILLATE_OFF_PAYLOAD\"",
"]",
":",
"self",
".",
"_oscillation",
"=",
"False",
"self",
".",
"async_write_ha_state",
"(",
")",
"if",
"self",
".",
"_topic",
"[",
"CONF_OSCILLATION_STATE_TOPIC",
"]",
"is",
"not",
"None",
":",
"topics",
"[",
"CONF_OSCILLATION_STATE_TOPIC",
"]",
"=",
"{",
"\"topic\"",
":",
"self",
".",
"_topic",
"[",
"CONF_OSCILLATION_STATE_TOPIC",
"]",
",",
"\"msg_callback\"",
":",
"oscillation_received",
",",
"\"qos\"",
":",
"self",
".",
"_config",
"[",
"CONF_QOS",
"]",
",",
"}",
"self",
".",
"_oscillation",
"=",
"False",
"self",
".",
"_sub_state",
"=",
"await",
"subscription",
".",
"async_subscribe_topics",
"(",
"self",
".",
"hass",
",",
"self",
".",
"_sub_state",
",",
"topics",
")"
] | [
252,
4
] | [
318,
9
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.async_will_remove_from_hass | (self) | Unsubscribe when removed. | Unsubscribe when removed. | async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self) | [
"async",
"def",
"async_will_remove_from_hass",
"(",
"self",
")",
":",
"self",
".",
"_sub_state",
"=",
"await",
"subscription",
".",
"async_unsubscribe_topics",
"(",
"self",
".",
"hass",
",",
"self",
".",
"_sub_state",
")",
"await",
"MqttAttributes",
".",
"async_will_remove_from_hass",
"(",
"self",
")",
"await",
"MqttAvailability",
".",
"async_will_remove_from_hass",
"(",
"self",
")",
"await",
"MqttDiscoveryUpdate",
".",
"async_will_remove_from_hass",
"(",
"self",
")"
] | [
320,
4
] | [
327,
67
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.should_poll | (self) | No polling needed for a MQTT fan. | No polling needed for a MQTT fan. | def should_poll(self):
"""No polling needed for a MQTT fan."""
return False | [
"def",
"should_poll",
"(",
"self",
")",
":",
"return",
"False"
] | [
330,
4
] | [
332,
20
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.assumed_state | (self) | Return true if we do optimistic updates. | Return true if we do optimistic updates. | def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic | [
"def",
"assumed_state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_optimistic"
] | [
335,
4
] | [
337,
31
] | python | en | ['pt', 'la', 'en'] | False |
MqttFan.is_on | (self) | Return true if device is on. | Return true if device is on. | def is_on(self):
"""Return true if device is on."""
return self._state | [
"def",
"is_on",
"(",
"self",
")",
":",
"return",
"self",
".",
"_state"
] | [
340,
4
] | [
342,
26
] | python | en | ['en', 'fy', 'en'] | True |
MqttFan.name | (self) | Get entity name. | Get entity name. | def name(self) -> str:
"""Get entity name."""
return self._config[CONF_NAME] | [
"def",
"name",
"(",
"self",
")",
"->",
"str",
":",
"return",
"self",
".",
"_config",
"[",
"CONF_NAME",
"]"
] | [
345,
4
] | [
347,
38
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.speed_list | (self) | Get the list of available speeds. | Get the list of available speeds. | def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._config[CONF_SPEED_LIST] | [
"def",
"speed_list",
"(",
"self",
")",
"->",
"list",
":",
"return",
"self",
".",
"_config",
"[",
"CONF_SPEED_LIST",
"]"
] | [
350,
4
] | [
352,
44
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.supported_features | (self) | Flag supported features. | Flag supported features. | def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features | [
"def",
"supported_features",
"(",
"self",
")",
"->",
"int",
":",
"return",
"self",
".",
"_supported_features"
] | [
355,
4
] | [
357,
39
] | python | en | ['da', 'en', 'en'] | True |
MqttFan.speed | (self) | Return the current speed. | Return the current speed. | def speed(self):
"""Return the current speed."""
return self._speed | [
"def",
"speed",
"(",
"self",
")",
":",
"return",
"self",
".",
"_speed"
] | [
360,
4
] | [
362,
26
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.oscillating | (self) | Return the oscillation state. | Return the oscillation state. | def oscillating(self):
"""Return the oscillation state."""
return self._oscillation | [
"def",
"oscillating",
"(",
"self",
")",
":",
"return",
"self",
".",
"_oscillation"
] | [
365,
4
] | [
367,
32
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.async_turn_on | (self, speed: str = None, **kwargs) | Turn on the entity.
This method is a coroutine.
| Turn on the entity. | async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
self._payload["STATE_ON"],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if speed:
await self.async_set_speed(speed)
if self._optimistic:
self._state = True
self.async_write_ha_state() | [
"async",
"def",
"async_turn_on",
"(",
"self",
",",
"speed",
":",
"str",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"mqtt",
".",
"async_publish",
"(",
"self",
".",
"hass",
",",
"self",
".",
"_topic",
"[",
"CONF_COMMAND_TOPIC",
"]",
",",
"self",
".",
"_payload",
"[",
"\"STATE_ON\"",
"]",
",",
"self",
".",
"_config",
"[",
"CONF_QOS",
"]",
",",
"self",
".",
"_config",
"[",
"CONF_RETAIN",
"]",
",",
")",
"if",
"speed",
":",
"await",
"self",
".",
"async_set_speed",
"(",
"speed",
")",
"if",
"self",
".",
"_optimistic",
":",
"self",
".",
"_state",
"=",
"True",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
369,
4
] | [
385,
39
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.async_turn_off | (self, **kwargs) | Turn off the entity.
This method is a coroutine.
| Turn off the entity. | async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
self._payload["STATE_OFF"],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self._state = False
self.async_write_ha_state() | [
"async",
"def",
"async_turn_off",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"mqtt",
".",
"async_publish",
"(",
"self",
".",
"hass",
",",
"self",
".",
"_topic",
"[",
"CONF_COMMAND_TOPIC",
"]",
",",
"self",
".",
"_payload",
"[",
"\"STATE_OFF\"",
"]",
",",
"self",
".",
"_config",
"[",
"CONF_QOS",
"]",
",",
"self",
".",
"_config",
"[",
"CONF_RETAIN",
"]",
",",
")",
"if",
"self",
".",
"_optimistic",
":",
"self",
".",
"_state",
"=",
"False",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
387,
4
] | [
401,
39
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.async_set_speed | (self, speed: str) | Set the speed of the fan.
This method is a coroutine.
| Set the speed of the fan. | async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan.
This method is a coroutine.
"""
if speed == SPEED_LOW:
mqtt_payload = self._payload["SPEED_LOW"]
elif speed == SPEED_MEDIUM:
mqtt_payload = self._payload["SPEED_MEDIUM"]
elif speed == SPEED_HIGH:
mqtt_payload = self._payload["SPEED_HIGH"]
elif speed == SPEED_OFF:
mqtt_payload = self._payload["SPEED_OFF"]
else:
mqtt_payload = speed
mqtt.async_publish(
self.hass,
self._topic[CONF_SPEED_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic_speed:
self._speed = speed
self.async_write_ha_state() | [
"async",
"def",
"async_set_speed",
"(",
"self",
",",
"speed",
":",
"str",
")",
"->",
"None",
":",
"if",
"speed",
"==",
"SPEED_LOW",
":",
"mqtt_payload",
"=",
"self",
".",
"_payload",
"[",
"\"SPEED_LOW\"",
"]",
"elif",
"speed",
"==",
"SPEED_MEDIUM",
":",
"mqtt_payload",
"=",
"self",
".",
"_payload",
"[",
"\"SPEED_MEDIUM\"",
"]",
"elif",
"speed",
"==",
"SPEED_HIGH",
":",
"mqtt_payload",
"=",
"self",
".",
"_payload",
"[",
"\"SPEED_HIGH\"",
"]",
"elif",
"speed",
"==",
"SPEED_OFF",
":",
"mqtt_payload",
"=",
"self",
".",
"_payload",
"[",
"\"SPEED_OFF\"",
"]",
"else",
":",
"mqtt_payload",
"=",
"speed",
"mqtt",
".",
"async_publish",
"(",
"self",
".",
"hass",
",",
"self",
".",
"_topic",
"[",
"CONF_SPEED_COMMAND_TOPIC",
"]",
",",
"mqtt_payload",
",",
"self",
".",
"_config",
"[",
"CONF_QOS",
"]",
",",
"self",
".",
"_config",
"[",
"CONF_RETAIN",
"]",
",",
")",
"if",
"self",
".",
"_optimistic_speed",
":",
"self",
".",
"_speed",
"=",
"speed",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
403,
4
] | [
429,
39
] | python | en | ['en', 'en', 'en'] | True |
MqttFan.async_oscillate | (self, oscillating: bool) | Set oscillation.
This method is a coroutine.
| Set oscillation. | async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation.
This method is a coroutine.
"""
if oscillating is False:
payload = self._payload["OSCILLATE_OFF_PAYLOAD"]
else:
payload = self._payload["OSCILLATE_ON_PAYLOAD"]
mqtt.async_publish(
self.hass,
self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state() | [
"async",
"def",
"async_oscillate",
"(",
"self",
",",
"oscillating",
":",
"bool",
")",
"->",
"None",
":",
"if",
"oscillating",
"is",
"False",
":",
"payload",
"=",
"self",
".",
"_payload",
"[",
"\"OSCILLATE_OFF_PAYLOAD\"",
"]",
"else",
":",
"payload",
"=",
"self",
".",
"_payload",
"[",
"\"OSCILLATE_ON_PAYLOAD\"",
"]",
"mqtt",
".",
"async_publish",
"(",
"self",
".",
"hass",
",",
"self",
".",
"_topic",
"[",
"CONF_OSCILLATION_COMMAND_TOPIC",
"]",
",",
"payload",
",",
"self",
".",
"_config",
"[",
"CONF_QOS",
"]",
",",
"self",
".",
"_config",
"[",
"CONF_RETAIN",
"]",
",",
")",
"if",
"self",
".",
"_optimistic_oscillation",
":",
"self",
".",
"_oscillation",
"=",
"oscillating",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
431,
4
] | [
451,
39
] | python | en | ['it', 'ru', 'en'] | False |
MqttFan.unique_id | (self) | Return a unique ID. | Return a unique ID. | def unique_id(self):
"""Return a unique ID."""
return self._unique_id | [
"def",
"unique_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"_unique_id"
] | [
454,
4
] | [
456,
30
] | python | ca | ['fr', 'ca', 'en'] | False |
shift_tokens_right | (input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int) |
Shift input ids one token to the right.
|
Shift input ids one token to the right.
| def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids | [
"def",
"shift_tokens_right",
"(",
"input_ids",
":",
"torch",
".",
"Tensor",
",",
"pad_token_id",
":",
"int",
",",
"decoder_start_token_id",
":",
"int",
")",
":",
"shifted_input_ids",
"=",
"input_ids",
".",
"new_zeros",
"(",
"input_ids",
".",
"shape",
")",
"shifted_input_ids",
"[",
":",
",",
"1",
":",
"]",
"=",
"input_ids",
"[",
":",
",",
":",
"-",
"1",
"]",
".",
"clone",
"(",
")",
"shifted_input_ids",
"[",
":",
",",
"0",
"]",
"=",
"decoder_start_token_id",
"assert",
"pad_token_id",
"is",
"not",
"None",
",",
"\"self.model.config.pad_token_id has to be defined.\"",
"# replace possible -100 values in labels by `pad_token_id`",
"shifted_input_ids",
".",
"masked_fill_",
"(",
"shifted_input_ids",
"==",
"-",
"100",
",",
"pad_token_id",
")",
"return",
"shifted_input_ids"
] | [
62,
0
] | [
74,
28
] | python | en | ['en', 'error', 'th'] | False |
_make_causal_mask | (input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0) |
Make causal mask used for bi-directional self-attention.
|
Make causal mask used for bi-directional self-attention.
| def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) | [
"def",
"_make_causal_mask",
"(",
"input_ids_shape",
":",
"torch",
".",
"Size",
",",
"dtype",
":",
"torch",
".",
"dtype",
",",
"past_key_values_length",
":",
"int",
"=",
"0",
")",
":",
"bsz",
",",
"tgt_len",
"=",
"input_ids_shape",
"mask",
"=",
"torch",
".",
"full",
"(",
"(",
"tgt_len",
",",
"tgt_len",
")",
",",
"float",
"(",
"\"-inf\"",
")",
")",
"mask_cond",
"=",
"torch",
".",
"arange",
"(",
"mask",
".",
"size",
"(",
"-",
"1",
")",
")",
"mask",
".",
"masked_fill_",
"(",
"mask_cond",
"<",
"(",
"mask_cond",
"+",
"1",
")",
".",
"view",
"(",
"mask",
".",
"size",
"(",
"-",
"1",
")",
",",
"1",
")",
",",
"0",
")",
"mask",
"=",
"mask",
".",
"to",
"(",
"dtype",
")",
"if",
"past_key_values_length",
">",
"0",
":",
"mask",
"=",
"torch",
".",
"cat",
"(",
"[",
"torch",
".",
"zeros",
"(",
"tgt_len",
",",
"past_key_values_length",
",",
"dtype",
"=",
"dtype",
")",
",",
"mask",
"]",
",",
"dim",
"=",
"-",
"1",
")",
"return",
"mask",
"[",
"None",
",",
"None",
",",
":",
",",
":",
"]",
".",
"expand",
"(",
"bsz",
",",
"1",
",",
"tgt_len",
",",
"tgt_len",
"+",
"past_key_values_length",
")"
] | [
77,
0
] | [
89,
91
] | python | en | ['en', 'error', 'th'] | False |
_expand_mask | (mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None) |
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
| def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) | [
"def",
"_expand_mask",
"(",
"mask",
":",
"torch",
".",
"Tensor",
",",
"dtype",
":",
"torch",
".",
"dtype",
",",
"tgt_len",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
")",
":",
"bsz",
",",
"src_len",
"=",
"mask",
".",
"size",
"(",
")",
"tgt_len",
"=",
"tgt_len",
"if",
"tgt_len",
"is",
"not",
"None",
"else",
"src_len",
"expanded_mask",
"=",
"mask",
"[",
":",
",",
"None",
",",
"None",
",",
":",
"]",
".",
"expand",
"(",
"bsz",
",",
"1",
",",
"tgt_len",
",",
"src_len",
")",
".",
"to",
"(",
"dtype",
")",
"inverted_mask",
"=",
"1.0",
"-",
"expanded_mask",
"return",
"inverted_mask",
".",
"masked_fill",
"(",
"inverted_mask",
".",
"bool",
"(",
")",
",",
"torch",
".",
"finfo",
"(",
"dtype",
")",
".",
"min",
")"
] | [
92,
0
] | [
103,
82
] | python | en | ['en', 'error', 'th'] | False |
BartLearnedPositionalEmbedding.forward | (self, input_ids_shape: torch.Size, past_key_values_length: int = 0) | `input_ids_shape` is expected to be [bsz x seqlen]. | `input_ids_shape` is expected to be [bsz x seqlen]. | def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions + self.offset) | [
"def",
"forward",
"(",
"self",
",",
"input_ids_shape",
":",
"torch",
".",
"Size",
",",
"past_key_values_length",
":",
"int",
"=",
"0",
")",
":",
"bsz",
",",
"seq_len",
"=",
"input_ids_shape",
"[",
":",
"2",
"]",
"positions",
"=",
"torch",
".",
"arange",
"(",
"past_key_values_length",
",",
"past_key_values_length",
"+",
"seq_len",
",",
"dtype",
"=",
"torch",
".",
"long",
",",
"device",
"=",
"self",
".",
"weight",
".",
"device",
")",
"return",
"super",
"(",
")",
".",
"forward",
"(",
"positions",
"+",
"self",
".",
"offset",
")"
] | [
117,
4
] | [
123,
55
] | python | en | ['en', 'en', 'en'] | True |
BartAttention.forward | (
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) | Input shape: Batch x Time x Channel | Input shape: Batch x Time x Channel | def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value | [
"def",
"forward",
"(",
"self",
",",
"hidden_states",
":",
"torch",
".",
"Tensor",
",",
"key_value_states",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"past_key_value",
":",
"Optional",
"[",
"Tuple",
"[",
"torch",
".",
"Tensor",
"]",
"]",
"=",
"None",
",",
"attention_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"layer_head_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"output_attentions",
":",
"bool",
"=",
"False",
",",
")",
"->",
"Tuple",
"[",
"torch",
".",
"Tensor",
",",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
",",
"Optional",
"[",
"Tuple",
"[",
"torch",
".",
"Tensor",
"]",
"]",
"]",
":",
"# if key_value_states are provided this layer is used as a cross-attention layer",
"# for the decoder",
"is_cross_attention",
"=",
"key_value_states",
"is",
"not",
"None",
"bsz",
",",
"tgt_len",
",",
"embed_dim",
"=",
"hidden_states",
".",
"size",
"(",
")",
"# get query proj",
"query_states",
"=",
"self",
".",
"q_proj",
"(",
"hidden_states",
")",
"*",
"self",
".",
"scaling",
"# get key, value proj",
"if",
"is_cross_attention",
"and",
"past_key_value",
"is",
"not",
"None",
":",
"# reuse k,v, cross_attentions",
"key_states",
"=",
"past_key_value",
"[",
"0",
"]",
"value_states",
"=",
"past_key_value",
"[",
"1",
"]",
"elif",
"is_cross_attention",
":",
"# cross_attentions",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"key_value_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"key_value_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"elif",
"past_key_value",
"is",
"not",
"None",
":",
"# reuse k, v, self_attention",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"key_states",
"=",
"torch",
".",
"cat",
"(",
"[",
"past_key_value",
"[",
"0",
"]",
",",
"key_states",
"]",
",",
"dim",
"=",
"2",
")",
"value_states",
"=",
"torch",
".",
"cat",
"(",
"[",
"past_key_value",
"[",
"1",
"]",
",",
"value_states",
"]",
",",
"dim",
"=",
"2",
")",
"else",
":",
"# self_attention",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"if",
"self",
".",
"is_decoder",
":",
"# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.",
"# Further calls to cross_attention layer can then reuse all cross-attention",
"# key/value_states (first \"if\" case)",
"# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of",
"# all previous decoder key/value_states. Further calls to uni-directional self-attention",
"# can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)",
"# if encoder bi-directional self-attention `past_key_value` is always `None`",
"past_key_value",
"=",
"(",
"key_states",
",",
"value_states",
")",
"proj_shape",
"=",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"-",
"1",
",",
"self",
".",
"head_dim",
")",
"query_states",
"=",
"self",
".",
"_shape",
"(",
"query_states",
",",
"tgt_len",
",",
"bsz",
")",
".",
"view",
"(",
"*",
"proj_shape",
")",
"key_states",
"=",
"key_states",
".",
"view",
"(",
"*",
"proj_shape",
")",
"value_states",
"=",
"value_states",
".",
"view",
"(",
"*",
"proj_shape",
")",
"src_len",
"=",
"key_states",
".",
"size",
"(",
"1",
")",
"attn_weights",
"=",
"torch",
".",
"bmm",
"(",
"query_states",
",",
"key_states",
".",
"transpose",
"(",
"1",
",",
"2",
")",
")",
"assert",
"attn_weights",
".",
"size",
"(",
")",
"==",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
",",
")",
",",
"f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}\"",
"if",
"attention_mask",
"is",
"not",
"None",
":",
"assert",
"attention_mask",
".",
"size",
"(",
")",
"==",
"(",
"bsz",
",",
"1",
",",
"tgt_len",
",",
"src_len",
",",
")",
",",
"f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}\"",
"attn_weights",
"=",
"attn_weights",
".",
"view",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"+",
"attention_mask",
"attn_weights",
"=",
"attn_weights",
".",
"view",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"attn_weights",
"=",
"F",
".",
"softmax",
"(",
"attn_weights",
",",
"dim",
"=",
"-",
"1",
")",
"if",
"layer_head_mask",
"is",
"not",
"None",
":",
"assert",
"layer_head_mask",
".",
"size",
"(",
")",
"==",
"(",
"self",
".",
"num_heads",
",",
")",
",",
"f\"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}\"",
"attn_weights",
"=",
"layer_head_mask",
".",
"view",
"(",
"1",
",",
"-",
"1",
",",
"1",
",",
"1",
")",
"*",
"attn_weights",
".",
"view",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"attn_weights",
"=",
"attn_weights",
".",
"view",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"if",
"output_attentions",
":",
"# this operation is a bit akward, but it's required to",
"# make sure that attn_weights keeps its gradient.",
"# In order to do so, attn_weights have to reshaped",
"# twice and have to be reused in the following",
"attn_weights_reshaped",
"=",
"attn_weights",
".",
"view",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"attn_weights",
"=",
"attn_weights_reshaped",
".",
"view",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
"else",
":",
"attn_weights_reshaped",
"=",
"None",
"attn_probs",
"=",
"F",
".",
"dropout",
"(",
"attn_weights",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"attn_output",
"=",
"torch",
".",
"bmm",
"(",
"attn_probs",
",",
"value_states",
")",
"assert",
"attn_output",
".",
"size",
"(",
")",
"==",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"self",
".",
"head_dim",
",",
")",
",",
"f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}\"",
"attn_output",
"=",
"(",
"attn_output",
".",
"view",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"self",
".",
"head_dim",
")",
".",
"transpose",
"(",
"1",
",",
"2",
")",
".",
"reshape",
"(",
"bsz",
",",
"tgt_len",
",",
"embed_dim",
")",
")",
"attn_output",
"=",
"self",
".",
"out_proj",
"(",
"attn_output",
")",
"return",
"attn_output",
",",
"attn_weights_reshaped",
",",
"past_key_value"
] | [
156,
4
] | [
265,
65
] | python | en | ['en', 'pl', 'en'] | True |
BartEncoderLayer.forward | (
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
) |
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
|
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
| def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs | [
"def",
"forward",
"(",
"self",
",",
"hidden_states",
":",
"torch",
".",
"Tensor",
",",
"attention_mask",
":",
"torch",
".",
"Tensor",
",",
"layer_head_mask",
":",
"torch",
".",
"Tensor",
",",
"output_attentions",
":",
"bool",
"=",
"False",
",",
")",
":",
"residual",
"=",
"hidden_states",
"hidden_states",
",",
"attn_weights",
",",
"_",
"=",
"self",
".",
"self_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"attention_mask",
"=",
"attention_mask",
",",
"layer_head_mask",
"=",
"layer_head_mask",
",",
"output_attentions",
"=",
"output_attentions",
",",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"self_attn_layer_norm",
"(",
"hidden_states",
")",
"residual",
"=",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"activation_fn",
"(",
"self",
".",
"fc1",
"(",
"hidden_states",
")",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"activation_dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"self",
".",
"fc2",
"(",
"hidden_states",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"final_layer_norm",
"(",
"hidden_states",
")",
"if",
"hidden_states",
".",
"dtype",
"==",
"torch",
".",
"float16",
"and",
"(",
"torch",
".",
"isinf",
"(",
"hidden_states",
")",
".",
"any",
"(",
")",
"or",
"torch",
".",
"isnan",
"(",
"hidden_states",
")",
".",
"any",
"(",
")",
")",
":",
"clamp_value",
"=",
"torch",
".",
"finfo",
"(",
"hidden_states",
".",
"dtype",
")",
".",
"max",
"-",
"1000",
"hidden_states",
"=",
"torch",
".",
"clamp",
"(",
"hidden_states",
",",
"min",
"=",
"-",
"clamp_value",
",",
"max",
"=",
"clamp_value",
")",
"outputs",
"=",
"(",
"hidden_states",
",",
")",
"if",
"output_attentions",
":",
"outputs",
"+=",
"(",
"attn_weights",
",",
")",
"return",
"outputs"
] | [
285,
4
] | [
333,
22
] | python | en | ['en', 'error', 'th'] | False |
BartDecoderLayer.forward | (
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
) |
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
|
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
| def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
encoder_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(config.encoder_attention_heads,)`.
encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of
size `(config.encoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=encoder_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs | [
"def",
"forward",
"(",
"self",
",",
"hidden_states",
":",
"torch",
".",
"Tensor",
",",
"attention_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_hidden_states",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_attention_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"layer_head_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_layer_head_mask",
":",
"Optional",
"[",
"torch",
".",
"Tensor",
"]",
"=",
"None",
",",
"past_key_value",
":",
"Optional",
"[",
"Tuple",
"[",
"torch",
".",
"Tensor",
"]",
"]",
"=",
"None",
",",
"output_attentions",
":",
"Optional",
"[",
"bool",
"]",
"=",
"False",
",",
"use_cache",
":",
"Optional",
"[",
"bool",
"]",
"=",
"True",
",",
")",
":",
"residual",
"=",
"hidden_states",
"# Self Attention",
"# decoder uni-directional self-attention cached key/values tuple is at positions 1,2",
"self_attn_past_key_value",
"=",
"past_key_value",
"[",
":",
"2",
"]",
"if",
"past_key_value",
"is",
"not",
"None",
"else",
"None",
"# add present self-attn cache to positions 1,2 of present_key_value tuple",
"hidden_states",
",",
"self_attn_weights",
",",
"present_key_value",
"=",
"self",
".",
"self_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"past_key_value",
"=",
"self_attn_past_key_value",
",",
"attention_mask",
"=",
"attention_mask",
",",
"layer_head_mask",
"=",
"layer_head_mask",
",",
"output_attentions",
"=",
"output_attentions",
",",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"self_attn_layer_norm",
"(",
"hidden_states",
")",
"# Cross-Attention Block",
"cross_attn_present_key_value",
"=",
"None",
"cross_attn_weights",
"=",
"None",
"if",
"encoder_hidden_states",
"is",
"not",
"None",
":",
"residual",
"=",
"hidden_states",
"# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple",
"cross_attn_past_key_value",
"=",
"past_key_value",
"[",
"-",
"2",
":",
"]",
"if",
"past_key_value",
"is",
"not",
"None",
"else",
"None",
"hidden_states",
",",
"cross_attn_weights",
",",
"cross_attn_present_key_value",
"=",
"self",
".",
"encoder_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"key_value_states",
"=",
"encoder_hidden_states",
",",
"attention_mask",
"=",
"encoder_attention_mask",
",",
"layer_head_mask",
"=",
"encoder_layer_head_mask",
",",
"past_key_value",
"=",
"cross_attn_past_key_value",
",",
"output_attentions",
"=",
"output_attentions",
",",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"encoder_attn_layer_norm",
"(",
"hidden_states",
")",
"# add cross-attn to positions 3,4 of present_key_value tuple",
"present_key_value",
"=",
"present_key_value",
"+",
"cross_attn_present_key_value",
"# Fully Connected",
"residual",
"=",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"activation_fn",
"(",
"self",
".",
"fc1",
"(",
"hidden_states",
")",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"activation_dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"self",
".",
"fc2",
"(",
"hidden_states",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"final_layer_norm",
"(",
"hidden_states",
")",
"outputs",
"=",
"(",
"hidden_states",
",",
")",
"if",
"output_attentions",
":",
"outputs",
"+=",
"(",
"self_attn_weights",
",",
"cross_attn_weights",
")",
"if",
"use_cache",
":",
"outputs",
"+=",
"(",
"present_key_value",
",",
")",
"return",
"outputs"
] | [
363,
4
] | [
449,
22
] | python | en | ['en', 'error', 'th'] | False |
BartEncoder.forward | (
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) | r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
| r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it. | def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
) | [
"def",
"forward",
"(",
"self",
",",
"input_ids",
"=",
"None",
",",
"attention_mask",
"=",
"None",
",",
"head_mask",
"=",
"None",
",",
"inputs_embeds",
"=",
"None",
",",
"output_attentions",
"=",
"None",
",",
"output_hidden_states",
"=",
"None",
",",
"return_dict",
"=",
"None",
",",
")",
":",
"output_attentions",
"=",
"output_attentions",
"if",
"output_attentions",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_attentions",
"output_hidden_states",
"=",
"(",
"output_hidden_states",
"if",
"output_hidden_states",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_hidden_states",
")",
"return_dict",
"=",
"return_dict",
"if",
"return_dict",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"use_return_dict",
"# retrieve input_ids and inputs_embeds",
"if",
"input_ids",
"is",
"not",
"None",
"and",
"inputs_embeds",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You cannot specify both input_ids and inputs_embeds at the same time\"",
")",
"elif",
"input_ids",
"is",
"not",
"None",
":",
"input_shape",
"=",
"input_ids",
".",
"size",
"(",
")",
"input_ids",
"=",
"input_ids",
".",
"view",
"(",
"-",
"1",
",",
"input_shape",
"[",
"-",
"1",
"]",
")",
"elif",
"inputs_embeds",
"is",
"not",
"None",
":",
"input_shape",
"=",
"inputs_embeds",
".",
"size",
"(",
")",
"[",
":",
"-",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"You have to specify either input_ids or inputs_embeds\"",
")",
"if",
"inputs_embeds",
"is",
"None",
":",
"inputs_embeds",
"=",
"self",
".",
"embed_tokens",
"(",
"input_ids",
")",
"*",
"self",
".",
"embed_scale",
"embed_pos",
"=",
"self",
".",
"embed_positions",
"(",
"input_shape",
")",
"hidden_states",
"=",
"inputs_embeds",
"+",
"embed_pos",
"hidden_states",
"=",
"self",
".",
"layernorm_embedding",
"(",
"hidden_states",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"# expand attention_mask",
"if",
"attention_mask",
"is",
"not",
"None",
":",
"# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]",
"attention_mask",
"=",
"_expand_mask",
"(",
"attention_mask",
",",
"inputs_embeds",
".",
"dtype",
")",
"encoder_states",
"=",
"(",
")",
"if",
"output_hidden_states",
"else",
"None",
"all_attentions",
"=",
"(",
")",
"if",
"output_attentions",
"else",
"None",
"# check if head_mask has a correct number of layers specified if desired",
"if",
"head_mask",
"is",
"not",
"None",
":",
"assert",
"head_mask",
".",
"size",
"(",
")",
"[",
"0",
"]",
"==",
"(",
"len",
"(",
"self",
".",
"layers",
")",
")",
",",
"f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"",
"for",
"idx",
",",
"encoder_layer",
"in",
"enumerate",
"(",
"self",
".",
"layers",
")",
":",
"if",
"output_hidden_states",
":",
"encoder_states",
"=",
"encoder_states",
"+",
"(",
"hidden_states",
",",
")",
"# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)",
"dropout_probability",
"=",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"if",
"self",
".",
"training",
"and",
"(",
"dropout_probability",
"<",
"self",
".",
"layerdrop",
")",
":",
"# skip the layer",
"layer_outputs",
"=",
"(",
"None",
",",
"None",
")",
"else",
":",
"if",
"getattr",
"(",
"self",
".",
"config",
",",
"\"gradient_checkpointing\"",
",",
"False",
")",
"and",
"self",
".",
"training",
":",
"def",
"create_custom_forward",
"(",
"module",
")",
":",
"def",
"custom_forward",
"(",
"*",
"inputs",
")",
":",
"return",
"module",
"(",
"*",
"inputs",
",",
"output_attentions",
")",
"return",
"custom_forward",
"layer_outputs",
"=",
"torch",
".",
"utils",
".",
"checkpoint",
".",
"checkpoint",
"(",
"create_custom_forward",
"(",
"encoder_layer",
")",
",",
"hidden_states",
",",
"attention_mask",
",",
"(",
"head_mask",
"[",
"idx",
"]",
"if",
"head_mask",
"is",
"not",
"None",
"else",
"None",
")",
",",
")",
"else",
":",
"layer_outputs",
"=",
"encoder_layer",
"(",
"hidden_states",
",",
"attention_mask",
",",
"layer_head_mask",
"=",
"(",
"head_mask",
"[",
"idx",
"]",
"if",
"head_mask",
"is",
"not",
"None",
"else",
"None",
")",
",",
"output_attentions",
"=",
"output_attentions",
",",
")",
"hidden_states",
"=",
"layer_outputs",
"[",
"0",
"]",
"if",
"output_attentions",
":",
"all_attentions",
"=",
"all_attentions",
"+",
"(",
"layer_outputs",
"[",
"1",
"]",
",",
")",
"if",
"output_hidden_states",
":",
"encoder_states",
"=",
"encoder_states",
"+",
"(",
"hidden_states",
",",
")",
"if",
"not",
"return_dict",
":",
"return",
"tuple",
"(",
"v",
"for",
"v",
"in",
"[",
"hidden_states",
",",
"encoder_states",
",",
"all_attentions",
"]",
"if",
"v",
"is",
"not",
"None",
")",
"return",
"BaseModelOutput",
"(",
"last_hidden_state",
"=",
"hidden_states",
",",
"hidden_states",
"=",
"encoder_states",
",",
"attentions",
"=",
"all_attentions",
")"
] | [
684,
4
] | [
812,
9
] | python | cy | ['en', 'cy', 'hi'] | False |
BartDecoder.forward | (
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) | r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
| r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it. | def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
encoder_head_mask[idx] if encoder_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
) | [
"def",
"forward",
"(",
"self",
",",
"input_ids",
"=",
"None",
",",
"attention_mask",
"=",
"None",
",",
"encoder_hidden_states",
"=",
"None",
",",
"encoder_attention_mask",
"=",
"None",
",",
"head_mask",
"=",
"None",
",",
"encoder_head_mask",
"=",
"None",
",",
"past_key_values",
"=",
"None",
",",
"inputs_embeds",
"=",
"None",
",",
"use_cache",
"=",
"None",
",",
"output_attentions",
"=",
"None",
",",
"output_hidden_states",
"=",
"None",
",",
"return_dict",
"=",
"None",
",",
")",
":",
"output_attentions",
"=",
"output_attentions",
"if",
"output_attentions",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_attentions",
"output_hidden_states",
"=",
"(",
"output_hidden_states",
"if",
"output_hidden_states",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_hidden_states",
")",
"use_cache",
"=",
"use_cache",
"if",
"use_cache",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"use_cache",
"return_dict",
"=",
"return_dict",
"if",
"return_dict",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"use_return_dict",
"# retrieve input_ids and inputs_embeds",
"if",
"input_ids",
"is",
"not",
"None",
"and",
"inputs_embeds",
"is",
"not",
"None",
":",
"raise",
"ValueError",
"(",
"\"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time\"",
")",
"elif",
"input_ids",
"is",
"not",
"None",
":",
"input_shape",
"=",
"input_ids",
".",
"size",
"(",
")",
"input_ids",
"=",
"input_ids",
".",
"view",
"(",
"-",
"1",
",",
"input_shape",
"[",
"-",
"1",
"]",
")",
"elif",
"inputs_embeds",
"is",
"not",
"None",
":",
"input_shape",
"=",
"inputs_embeds",
".",
"size",
"(",
")",
"[",
":",
"-",
"1",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"\"You have to specify either decoder_input_ids or decoder_inputs_embeds\"",
")",
"# past_key_values_length",
"past_key_values_length",
"=",
"past_key_values",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"shape",
"[",
"2",
"]",
"if",
"past_key_values",
"is",
"not",
"None",
"else",
"0",
"if",
"inputs_embeds",
"is",
"None",
":",
"inputs_embeds",
"=",
"self",
".",
"embed_tokens",
"(",
"input_ids",
")",
"*",
"self",
".",
"embed_scale",
"attention_mask",
"=",
"self",
".",
"_prepare_decoder_attention_mask",
"(",
"attention_mask",
",",
"input_shape",
",",
"inputs_embeds",
",",
"past_key_values_length",
")",
"# expand encoder attention mask",
"if",
"encoder_hidden_states",
"is",
"not",
"None",
"and",
"encoder_attention_mask",
"is",
"not",
"None",
":",
"# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]",
"encoder_attention_mask",
"=",
"_expand_mask",
"(",
"encoder_attention_mask",
",",
"inputs_embeds",
".",
"dtype",
",",
"tgt_len",
"=",
"input_shape",
"[",
"-",
"1",
"]",
")",
"# embed positions",
"positions",
"=",
"self",
".",
"embed_positions",
"(",
"input_shape",
",",
"past_key_values_length",
")",
"hidden_states",
"=",
"inputs_embeds",
"+",
"positions",
"hidden_states",
"=",
"self",
".",
"layernorm_embedding",
"(",
"hidden_states",
")",
"hidden_states",
"=",
"F",
".",
"dropout",
"(",
"hidden_states",
",",
"p",
"=",
"self",
".",
"dropout",
",",
"training",
"=",
"self",
".",
"training",
")",
"# decoder layers",
"all_hidden_states",
"=",
"(",
")",
"if",
"output_hidden_states",
"else",
"None",
"all_self_attns",
"=",
"(",
")",
"if",
"output_attentions",
"else",
"None",
"all_cross_attentions",
"=",
"(",
")",
"if",
"(",
"output_attentions",
"and",
"encoder_hidden_states",
"is",
"not",
"None",
")",
"else",
"None",
"next_decoder_cache",
"=",
"(",
")",
"if",
"use_cache",
"else",
"None",
"# check if head_mask has a correct number of layers specified if desired",
"if",
"head_mask",
"is",
"not",
"None",
":",
"assert",
"head_mask",
".",
"size",
"(",
")",
"[",
"0",
"]",
"==",
"(",
"len",
"(",
"self",
".",
"layers",
")",
")",
",",
"f\"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}.\"",
"for",
"idx",
",",
"decoder_layer",
"in",
"enumerate",
"(",
"self",
".",
"layers",
")",
":",
"# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)",
"if",
"output_hidden_states",
":",
"all_hidden_states",
"+=",
"(",
"hidden_states",
",",
")",
"dropout_probability",
"=",
"random",
".",
"uniform",
"(",
"0",
",",
"1",
")",
"if",
"self",
".",
"training",
"and",
"(",
"dropout_probability",
"<",
"self",
".",
"layerdrop",
")",
":",
"continue",
"past_key_value",
"=",
"past_key_values",
"[",
"idx",
"]",
"if",
"past_key_values",
"is",
"not",
"None",
"else",
"None",
"if",
"getattr",
"(",
"self",
".",
"config",
",",
"\"gradient_checkpointing\"",
",",
"False",
")",
"and",
"self",
".",
"training",
":",
"if",
"use_cache",
":",
"logger",
".",
"warn",
"(",
"\"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting \"",
"\"`use_cache=False`...\"",
")",
"use_cache",
"=",
"False",
"def",
"create_custom_forward",
"(",
"module",
")",
":",
"def",
"custom_forward",
"(",
"*",
"inputs",
")",
":",
"# None for past_key_value",
"return",
"module",
"(",
"*",
"inputs",
",",
"output_attentions",
",",
"use_cache",
")",
"return",
"custom_forward",
"layer_outputs",
"=",
"torch",
".",
"utils",
".",
"checkpoint",
".",
"checkpoint",
"(",
"create_custom_forward",
"(",
"decoder_layer",
")",
",",
"hidden_states",
",",
"attention_mask",
",",
"encoder_hidden_states",
",",
"encoder_attention_mask",
",",
"head_mask",
"[",
"idx",
"]",
"if",
"head_mask",
"is",
"not",
"None",
"else",
"None",
",",
"encoder_head_mask",
"[",
"idx",
"]",
"if",
"encoder_head_mask",
"is",
"not",
"None",
"else",
"None",
",",
"None",
",",
")",
"else",
":",
"layer_outputs",
"=",
"decoder_layer",
"(",
"hidden_states",
",",
"attention_mask",
"=",
"attention_mask",
",",
"encoder_hidden_states",
"=",
"encoder_hidden_states",
",",
"encoder_attention_mask",
"=",
"encoder_attention_mask",
",",
"layer_head_mask",
"=",
"(",
"head_mask",
"[",
"idx",
"]",
"if",
"head_mask",
"is",
"not",
"None",
"else",
"None",
")",
",",
"encoder_layer_head_mask",
"=",
"(",
"encoder_head_mask",
"[",
"idx",
"]",
"if",
"encoder_head_mask",
"is",
"not",
"None",
"else",
"None",
")",
",",
"past_key_value",
"=",
"past_key_value",
",",
"output_attentions",
"=",
"output_attentions",
",",
"use_cache",
"=",
"use_cache",
",",
")",
"hidden_states",
"=",
"layer_outputs",
"[",
"0",
"]",
"if",
"use_cache",
":",
"next_decoder_cache",
"+=",
"(",
"layer_outputs",
"[",
"3",
"if",
"output_attentions",
"else",
"1",
"]",
",",
")",
"if",
"output_attentions",
":",
"all_self_attns",
"+=",
"(",
"layer_outputs",
"[",
"1",
"]",
",",
")",
"if",
"encoder_hidden_states",
"is",
"not",
"None",
":",
"all_cross_attentions",
"+=",
"(",
"layer_outputs",
"[",
"2",
"]",
",",
")",
"# add hidden states from the last decoder layer",
"if",
"output_hidden_states",
":",
"all_hidden_states",
"+=",
"(",
"hidden_states",
",",
")",
"next_cache",
"=",
"next_decoder_cache",
"if",
"use_cache",
"else",
"None",
"if",
"not",
"return_dict",
":",
"return",
"tuple",
"(",
"v",
"for",
"v",
"in",
"[",
"hidden_states",
",",
"next_cache",
",",
"all_hidden_states",
",",
"all_self_attns",
",",
"all_cross_attentions",
"]",
"if",
"v",
"is",
"not",
"None",
")",
"return",
"BaseModelOutputWithPastAndCrossAttentions",
"(",
"last_hidden_state",
"=",
"hidden_states",
",",
"past_key_values",
"=",
"next_cache",
",",
"hidden_states",
"=",
"all_hidden_states",
",",
"attentions",
"=",
"all_self_attns",
",",
"cross_attentions",
"=",
"all_cross_attentions",
",",
")"
] | [
870,
4
] | [
1077,
9
] | python | cy | ['en', 'cy', 'hi'] | False |
BartForCausalLM.forward | (
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
) | r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
Example::
>>> from transformers import BartTokenizer, BartForCausalLM
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
>>> model = BartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
| r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it. | def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BartTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention
on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the heas is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
Example::
>>> from transformers import BartTokenizer, BartForCausalLM
>>> tokenizer = BartTokenizer.from_pretrained('facebook/bart-large')
>>> model = BartForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
encoder_head_mask=encoder_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
) | [
"def",
"forward",
"(",
"self",
",",
"input_ids",
"=",
"None",
",",
"attention_mask",
"=",
"None",
",",
"encoder_hidden_states",
"=",
"None",
",",
"encoder_attention_mask",
"=",
"None",
",",
"head_mask",
"=",
"None",
",",
"encoder_head_mask",
"=",
"None",
",",
"past_key_values",
"=",
"None",
",",
"inputs_embeds",
"=",
"None",
",",
"labels",
"=",
"None",
",",
"use_cache",
"=",
"None",
",",
"output_attentions",
"=",
"None",
",",
"output_hidden_states",
"=",
"None",
",",
"return_dict",
"=",
"None",
",",
")",
":",
"output_attentions",
"=",
"output_attentions",
"if",
"output_attentions",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_attentions",
"output_hidden_states",
"=",
"(",
"output_hidden_states",
"if",
"output_hidden_states",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"output_hidden_states",
")",
"return_dict",
"=",
"return_dict",
"if",
"return_dict",
"is",
"not",
"None",
"else",
"self",
".",
"config",
".",
"use_return_dict",
"# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)",
"outputs",
"=",
"self",
".",
"model",
".",
"decoder",
"(",
"input_ids",
"=",
"input_ids",
",",
"attention_mask",
"=",
"attention_mask",
",",
"encoder_hidden_states",
"=",
"encoder_hidden_states",
",",
"encoder_attention_mask",
"=",
"encoder_attention_mask",
",",
"head_mask",
"=",
"head_mask",
",",
"encoder_head_mask",
"=",
"encoder_head_mask",
",",
"past_key_values",
"=",
"past_key_values",
",",
"inputs_embeds",
"=",
"inputs_embeds",
",",
"use_cache",
"=",
"use_cache",
",",
"output_attentions",
"=",
"output_attentions",
",",
"output_hidden_states",
"=",
"output_hidden_states",
",",
"return_dict",
"=",
"return_dict",
",",
")",
"logits",
"=",
"self",
".",
"lm_head",
"(",
"outputs",
"[",
"0",
"]",
")",
"loss",
"=",
"None",
"if",
"labels",
"is",
"not",
"None",
":",
"loss_fct",
"=",
"CrossEntropyLoss",
"(",
")",
"loss",
"=",
"loss_fct",
"(",
"logits",
".",
"view",
"(",
"-",
"1",
",",
"self",
".",
"config",
".",
"vocab_size",
")",
",",
"labels",
".",
"view",
"(",
"-",
"1",
")",
")",
"if",
"not",
"return_dict",
":",
"output",
"=",
"(",
"logits",
",",
")",
"+",
"outputs",
"[",
"1",
":",
"]",
"return",
"(",
"loss",
",",
")",
"+",
"output",
"if",
"loss",
"is",
"not",
"None",
"else",
"output",
"return",
"CausalLMOutputWithCrossAttentions",
"(",
"loss",
"=",
"loss",
",",
"logits",
"=",
"logits",
",",
"past_key_values",
"=",
"outputs",
".",
"past_key_values",
",",
"hidden_states",
"=",
"outputs",
".",
"hidden_states",
",",
"attentions",
"=",
"outputs",
".",
"attentions",
",",
"cross_attentions",
"=",
"outputs",
".",
"cross_attentions",
",",
")"
] | [
1623,
4
] | [
1758,
9
] | python | cy | ['en', 'cy', 'hi'] | False |
RobertaConfig.__init__ | (self, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs) | Constructs RobertaConfig. | Constructs RobertaConfig. | def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs):
"""Constructs RobertaConfig."""
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) | [
"def",
"__init__",
"(",
"self",
",",
"pad_token_id",
"=",
"1",
",",
"bos_token_id",
"=",
"0",
",",
"eos_token_id",
"=",
"2",
",",
"*",
"*",
"kwargs",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"pad_token_id",
"=",
"pad_token_id",
",",
"bos_token_id",
"=",
"bos_token_id",
",",
"eos_token_id",
"=",
"eos_token_id",
",",
"*",
"*",
"kwargs",
")"
] | [
61,
4
] | [
63,
115
] | python | ca | ['en', 'ca', 'it'] | False |
test_reproducing_states | (hass, caplog) | Test reproducing Fan states. | Test reproducing Fan states. | async def test_reproducing_states(hass, caplog):
"""Test reproducing Fan states."""
hass.states.async_set("fan.entity_off", "off", {})
hass.states.async_set("fan.entity_on", "on", {})
hass.states.async_set("fan.entity_speed", "on", {"speed": "high"})
hass.states.async_set("fan.entity_oscillating", "on", {"oscillating": True})
hass.states.async_set("fan.entity_direction", "on", {"direction": "forward"})
turn_on_calls = async_mock_service(hass, "fan", "turn_on")
turn_off_calls = async_mock_service(hass, "fan", "turn_off")
set_direction_calls = async_mock_service(hass, "fan", "set_direction")
oscillate_calls = async_mock_service(hass, "fan", "oscillate")
set_speed_calls = async_mock_service(hass, "fan", "set_speed")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[
State("fan.entity_off", "off"),
State("fan.entity_on", "on"),
State("fan.entity_speed", "on", {"speed": "high"}),
State("fan.entity_oscillating", "on", {"oscillating": True}),
State("fan.entity_direction", "on", {"direction": "forward"}),
],
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(set_direction_calls) == 0
assert len(oscillate_calls) == 0
assert len(set_speed_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("fan.entity_off", "not_supported")]
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
assert len(set_direction_calls) == 0
assert len(oscillate_calls) == 0
assert len(set_speed_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("fan.entity_on", "off"),
State("fan.entity_off", "on"),
State("fan.entity_speed", "on", {"speed": "low"}),
State("fan.entity_oscillating", "on", {"oscillating": False}),
State("fan.entity_direction", "on", {"direction": "reverse"}),
# Should not raise
State("fan.non_existing", "on"),
],
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "fan"
assert turn_on_calls[0].data == {"entity_id": "fan.entity_off"}
assert len(set_direction_calls) == 1
assert set_direction_calls[0].domain == "fan"
assert set_direction_calls[0].data == {
"entity_id": "fan.entity_direction",
"direction": "reverse",
}
assert len(oscillate_calls) == 1
assert oscillate_calls[0].domain == "fan"
assert oscillate_calls[0].data == {
"entity_id": "fan.entity_oscillating",
"oscillating": False,
}
assert len(set_speed_calls) == 1
assert set_speed_calls[0].domain == "fan"
assert set_speed_calls[0].data == {"entity_id": "fan.entity_speed", "speed": "low"}
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "fan"
assert turn_off_calls[0].data == {"entity_id": "fan.entity_on"} | [
"async",
"def",
"test_reproducing_states",
"(",
"hass",
",",
"caplog",
")",
":",
"hass",
".",
"states",
".",
"async_set",
"(",
"\"fan.entity_off\"",
",",
"\"off\"",
",",
"{",
"}",
")",
"hass",
".",
"states",
".",
"async_set",
"(",
"\"fan.entity_on\"",
",",
"\"on\"",
",",
"{",
"}",
")",
"hass",
".",
"states",
".",
"async_set",
"(",
"\"fan.entity_speed\"",
",",
"\"on\"",
",",
"{",
"\"speed\"",
":",
"\"high\"",
"}",
")",
"hass",
".",
"states",
".",
"async_set",
"(",
"\"fan.entity_oscillating\"",
",",
"\"on\"",
",",
"{",
"\"oscillating\"",
":",
"True",
"}",
")",
"hass",
".",
"states",
".",
"async_set",
"(",
"\"fan.entity_direction\"",
",",
"\"on\"",
",",
"{",
"\"direction\"",
":",
"\"forward\"",
"}",
")",
"turn_on_calls",
"=",
"async_mock_service",
"(",
"hass",
",",
"\"fan\"",
",",
"\"turn_on\"",
")",
"turn_off_calls",
"=",
"async_mock_service",
"(",
"hass",
",",
"\"fan\"",
",",
"\"turn_off\"",
")",
"set_direction_calls",
"=",
"async_mock_service",
"(",
"hass",
",",
"\"fan\"",
",",
"\"set_direction\"",
")",
"oscillate_calls",
"=",
"async_mock_service",
"(",
"hass",
",",
"\"fan\"",
",",
"\"oscillate\"",
")",
"set_speed_calls",
"=",
"async_mock_service",
"(",
"hass",
",",
"\"fan\"",
",",
"\"set_speed\"",
")",
"# These calls should do nothing as entities already in desired state",
"await",
"hass",
".",
"helpers",
".",
"state",
".",
"async_reproduce_state",
"(",
"[",
"State",
"(",
"\"fan.entity_off\"",
",",
"\"off\"",
")",
",",
"State",
"(",
"\"fan.entity_on\"",
",",
"\"on\"",
")",
",",
"State",
"(",
"\"fan.entity_speed\"",
",",
"\"on\"",
",",
"{",
"\"speed\"",
":",
"\"high\"",
"}",
")",
",",
"State",
"(",
"\"fan.entity_oscillating\"",
",",
"\"on\"",
",",
"{",
"\"oscillating\"",
":",
"True",
"}",
")",
",",
"State",
"(",
"\"fan.entity_direction\"",
",",
"\"on\"",
",",
"{",
"\"direction\"",
":",
"\"forward\"",
"}",
")",
",",
"]",
",",
")",
"assert",
"len",
"(",
"turn_on_calls",
")",
"==",
"0",
"assert",
"len",
"(",
"turn_off_calls",
")",
"==",
"0",
"assert",
"len",
"(",
"set_direction_calls",
")",
"==",
"0",
"assert",
"len",
"(",
"oscillate_calls",
")",
"==",
"0",
"assert",
"len",
"(",
"set_speed_calls",
")",
"==",
"0",
"# Test invalid state is handled",
"await",
"hass",
".",
"helpers",
".",
"state",
".",
"async_reproduce_state",
"(",
"[",
"State",
"(",
"\"fan.entity_off\"",
",",
"\"not_supported\"",
")",
"]",
")",
"assert",
"\"not_supported\"",
"in",
"caplog",
".",
"text",
"assert",
"len",
"(",
"turn_on_calls",
")",
"==",
"0",
"assert",
"len",
"(",
"turn_off_calls",
")",
"==",
"0",
"assert",
"len",
"(",
"set_direction_calls",
")",
"==",
"0",
"assert",
"len",
"(",
"oscillate_calls",
")",
"==",
"0",
"assert",
"len",
"(",
"set_speed_calls",
")",
"==",
"0",
"# Make sure correct services are called",
"await",
"hass",
".",
"helpers",
".",
"state",
".",
"async_reproduce_state",
"(",
"[",
"State",
"(",
"\"fan.entity_on\"",
",",
"\"off\"",
")",
",",
"State",
"(",
"\"fan.entity_off\"",
",",
"\"on\"",
")",
",",
"State",
"(",
"\"fan.entity_speed\"",
",",
"\"on\"",
",",
"{",
"\"speed\"",
":",
"\"low\"",
"}",
")",
",",
"State",
"(",
"\"fan.entity_oscillating\"",
",",
"\"on\"",
",",
"{",
"\"oscillating\"",
":",
"False",
"}",
")",
",",
"State",
"(",
"\"fan.entity_direction\"",
",",
"\"on\"",
",",
"{",
"\"direction\"",
":",
"\"reverse\"",
"}",
")",
",",
"# Should not raise",
"State",
"(",
"\"fan.non_existing\"",
",",
"\"on\"",
")",
",",
"]",
",",
")",
"assert",
"len",
"(",
"turn_on_calls",
")",
"==",
"1",
"assert",
"turn_on_calls",
"[",
"0",
"]",
".",
"domain",
"==",
"\"fan\"",
"assert",
"turn_on_calls",
"[",
"0",
"]",
".",
"data",
"==",
"{",
"\"entity_id\"",
":",
"\"fan.entity_off\"",
"}",
"assert",
"len",
"(",
"set_direction_calls",
")",
"==",
"1",
"assert",
"set_direction_calls",
"[",
"0",
"]",
".",
"domain",
"==",
"\"fan\"",
"assert",
"set_direction_calls",
"[",
"0",
"]",
".",
"data",
"==",
"{",
"\"entity_id\"",
":",
"\"fan.entity_direction\"",
",",
"\"direction\"",
":",
"\"reverse\"",
",",
"}",
"assert",
"len",
"(",
"oscillate_calls",
")",
"==",
"1",
"assert",
"oscillate_calls",
"[",
"0",
"]",
".",
"domain",
"==",
"\"fan\"",
"assert",
"oscillate_calls",
"[",
"0",
"]",
".",
"data",
"==",
"{",
"\"entity_id\"",
":",
"\"fan.entity_oscillating\"",
",",
"\"oscillating\"",
":",
"False",
",",
"}",
"assert",
"len",
"(",
"set_speed_calls",
")",
"==",
"1",
"assert",
"set_speed_calls",
"[",
"0",
"]",
".",
"domain",
"==",
"\"fan\"",
"assert",
"set_speed_calls",
"[",
"0",
"]",
".",
"data",
"==",
"{",
"\"entity_id\"",
":",
"\"fan.entity_speed\"",
",",
"\"speed\"",
":",
"\"low\"",
"}",
"assert",
"len",
"(",
"turn_off_calls",
")",
"==",
"1",
"assert",
"turn_off_calls",
"[",
"0",
"]",
".",
"domain",
"==",
"\"fan\"",
"assert",
"turn_off_calls",
"[",
"0",
"]",
".",
"data",
"==",
"{",
"\"entity_id\"",
":",
"\"fan.entity_on\"",
"}"
] | [
6,
0
] | [
86,
67
] | python | en | ['en', 'en', 'en'] | True |
async_setup | (hass: HomeAssistant, config: dict) | Set up the flo component. | Set up the flo component. | async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the flo component."""
hass.data[DOMAIN] = {}
return True | [
"async",
"def",
"async_setup",
"(",
"hass",
":",
"HomeAssistant",
",",
"config",
":",
"dict",
")",
":",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"=",
"{",
"}",
"return",
"True"
] | [
24,
0
] | [
27,
15
] | python | en | ['en', 'en', 'en'] | True |
async_setup_entry | (hass: HomeAssistant, entry: ConfigEntry) | Set up flo from a config entry. | Set up flo from a config entry. | async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up flo from a config entry."""
session = async_get_clientsession(hass)
hass.data[DOMAIN][entry.entry_id] = {}
try:
hass.data[DOMAIN][entry.entry_id][CLIENT] = client = await async_get_api(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], session=session
)
except RequestError as err:
raise ConfigEntryNotReady from err
user_info = await client.user.get_info(include_location_info=True)
_LOGGER.debug("Flo user information with locations: %s", user_info)
hass.data[DOMAIN][entry.entry_id]["devices"] = devices = [
FloDeviceDataUpdateCoordinator(hass, client, location["id"], device["id"])
for location in user_info["locations"]
for device in location["devices"]
]
tasks = [device.async_refresh() for device in devices]
await asyncio.gather(*tasks)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
":",
"HomeAssistant",
",",
"entry",
":",
"ConfigEntry",
")",
":",
"session",
"=",
"async_get_clientsession",
"(",
"hass",
")",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"=",
"{",
"}",
"try",
":",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"[",
"CLIENT",
"]",
"=",
"client",
"=",
"await",
"async_get_api",
"(",
"entry",
".",
"data",
"[",
"CONF_USERNAME",
"]",
",",
"entry",
".",
"data",
"[",
"CONF_PASSWORD",
"]",
",",
"session",
"=",
"session",
")",
"except",
"RequestError",
"as",
"err",
":",
"raise",
"ConfigEntryNotReady",
"from",
"err",
"user_info",
"=",
"await",
"client",
".",
"user",
".",
"get_info",
"(",
"include_location_info",
"=",
"True",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Flo user information with locations: %s\"",
",",
"user_info",
")",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"[",
"\"devices\"",
"]",
"=",
"devices",
"=",
"[",
"FloDeviceDataUpdateCoordinator",
"(",
"hass",
",",
"client",
",",
"location",
"[",
"\"id\"",
"]",
",",
"device",
"[",
"\"id\"",
"]",
")",
"for",
"location",
"in",
"user_info",
"[",
"\"locations\"",
"]",
"for",
"device",
"in",
"location",
"[",
"\"devices\"",
"]",
"]",
"tasks",
"=",
"[",
"device",
".",
"async_refresh",
"(",
")",
"for",
"device",
"in",
"devices",
"]",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"tasks",
")",
"for",
"component",
"in",
"PLATFORMS",
":",
"hass",
".",
"async_create_task",
"(",
"hass",
".",
"config_entries",
".",
"async_forward_entry_setup",
"(",
"entry",
",",
"component",
")",
")",
"return",
"True"
] | [
30,
0
] | [
59,
15
] | python | en | ['en', 'en', 'en'] | True |
async_unload_entry | (hass: HomeAssistant, entry: ConfigEntry) | Unload a config entry. | Unload a config entry. | async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok | [
"async",
"def",
"async_unload_entry",
"(",
"hass",
":",
"HomeAssistant",
",",
"entry",
":",
"ConfigEntry",
")",
":",
"unload_ok",
"=",
"all",
"(",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"[",
"hass",
".",
"config_entries",
".",
"async_forward_entry_unload",
"(",
"entry",
",",
"component",
")",
"for",
"component",
"in",
"PLATFORMS",
"]",
")",
")",
"if",
"unload_ok",
":",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
".",
"pop",
"(",
"entry",
".",
"entry_id",
")",
"return",
"unload_ok"
] | [
62,
0
] | [
75,
20
] | python | en | ['en', 'es', 'en'] | True |
get_coap_context | (hass) | Get CoAP context to be used in all Shelly devices. | Get CoAP context to be used in all Shelly devices. | async def get_coap_context(hass):
"""Get CoAP context to be used in all Shelly devices."""
context = aioshelly.COAP()
await context.initialize()
@callback
def shutdown_listener(ev):
context.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown_listener)
return context | [
"async",
"def",
"get_coap_context",
"(",
"hass",
")",
":",
"context",
"=",
"aioshelly",
".",
"COAP",
"(",
")",
"await",
"context",
".",
"initialize",
"(",
")",
"@",
"callback",
"def",
"shutdown_listener",
"(",
"ev",
")",
":",
"context",
".",
"close",
"(",
")",
"hass",
".",
"bus",
".",
"async_listen_once",
"(",
"EVENT_HOMEASSISTANT_STOP",
",",
"shutdown_listener",
")",
"return",
"context"
] | [
42,
0
] | [
53,
18
] | python | en | ['en', 'en', 'en'] | True |
get_device_name | (device) | Naming for device. | Naming for device. | def get_device_name(device):
"""Naming for device."""
return device.settings["name"] or device.settings["device"]["hostname"] | [
"def",
"get_device_name",
"(",
"device",
")",
":",
"return",
"device",
".",
"settings",
"[",
"\"name\"",
"]",
"or",
"device",
".",
"settings",
"[",
"\"device\"",
"]",
"[",
"\"hostname\"",
"]"
] | [
56,
0
] | [
58,
75
] | python | en | ['da', 'en', 'en'] | True |
async_setup | (hass: HomeAssistant, config: dict) | Set up the Shelly component. | Set up the Shelly component. | async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Shelly component."""
hass.data[DOMAIN] = {DATA_CONFIG_ENTRY: {}}
return True | [
"async",
"def",
"async_setup",
"(",
"hass",
":",
"HomeAssistant",
",",
"config",
":",
"dict",
")",
":",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"=",
"{",
"DATA_CONFIG_ENTRY",
":",
"{",
"}",
"}",
"return",
"True"
] | [
61,
0
] | [
64,
15
] | python | en | ['en', 'en', 'en'] | True |
async_setup_entry | (hass: HomeAssistant, entry: ConfigEntry) | Set up Shelly from a config entry. | Set up Shelly from a config entry. | async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Shelly from a config entry."""
temperature_unit = "C" if hass.config.units.is_metric else "F"
ip_address = await hass.async_add_executor_job(gethostbyname, entry.data[CONF_HOST])
options = aioshelly.ConnectionOptions(
ip_address,
entry.data.get(CONF_USERNAME),
entry.data.get(CONF_PASSWORD),
temperature_unit,
)
coap_context = await get_coap_context(hass)
try:
async with async_timeout.timeout(SETUP_ENTRY_TIMEOUT_SEC):
device = await aioshelly.Device.create(
aiohttp_client.async_get_clientsession(hass),
coap_context,
options,
)
except (asyncio.TimeoutError, OSError) as err:
raise ConfigEntryNotReady from err
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id] = {}
coap_wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
COAP
] = ShellyDeviceWrapper(hass, entry, device)
await coap_wrapper.async_setup()
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][
REST
] = ShellyDeviceRestWrapper(hass, device)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
":",
"HomeAssistant",
",",
"entry",
":",
"ConfigEntry",
")",
":",
"temperature_unit",
"=",
"\"C\"",
"if",
"hass",
".",
"config",
".",
"units",
".",
"is_metric",
"else",
"\"F\"",
"ip_address",
"=",
"await",
"hass",
".",
"async_add_executor_job",
"(",
"gethostbyname",
",",
"entry",
".",
"data",
"[",
"CONF_HOST",
"]",
")",
"options",
"=",
"aioshelly",
".",
"ConnectionOptions",
"(",
"ip_address",
",",
"entry",
".",
"data",
".",
"get",
"(",
"CONF_USERNAME",
")",
",",
"entry",
".",
"data",
".",
"get",
"(",
"CONF_PASSWORD",
")",
",",
"temperature_unit",
",",
")",
"coap_context",
"=",
"await",
"get_coap_context",
"(",
"hass",
")",
"try",
":",
"async",
"with",
"async_timeout",
".",
"timeout",
"(",
"SETUP_ENTRY_TIMEOUT_SEC",
")",
":",
"device",
"=",
"await",
"aioshelly",
".",
"Device",
".",
"create",
"(",
"aiohttp_client",
".",
"async_get_clientsession",
"(",
"hass",
")",
",",
"coap_context",
",",
"options",
",",
")",
"except",
"(",
"asyncio",
".",
"TimeoutError",
",",
"OSError",
")",
"as",
"err",
":",
"raise",
"ConfigEntryNotReady",
"from",
"err",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_CONFIG_ENTRY",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"=",
"{",
"}",
"coap_wrapper",
"=",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_CONFIG_ENTRY",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"[",
"COAP",
"]",
"=",
"ShellyDeviceWrapper",
"(",
"hass",
",",
"entry",
",",
"device",
")",
"await",
"coap_wrapper",
".",
"async_setup",
"(",
")",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_CONFIG_ENTRY",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"[",
"REST",
"]",
"=",
"ShellyDeviceRestWrapper",
"(",
"hass",
",",
"device",
")",
"for",
"component",
"in",
"PLATFORMS",
":",
"hass",
".",
"async_create_task",
"(",
"hass",
".",
"config_entries",
".",
"async_forward_entry_setup",
"(",
"entry",
",",
"component",
")",
")",
"return",
"True"
] | [
67,
0
] | [
107,
15
] | python | en | ['en', 'en', 'en'] | True |
async_unload_entry | (hass: HomeAssistant, entry: ConfigEntry) | Unload a config entry. | Unload a config entry. | async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_CONFIG_ENTRY][entry.entry_id][COAP].shutdown()
hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)
return unload_ok | [
"async",
"def",
"async_unload_entry",
"(",
"hass",
":",
"HomeAssistant",
",",
"entry",
":",
"ConfigEntry",
")",
":",
"unload_ok",
"=",
"all",
"(",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"[",
"hass",
".",
"config_entries",
".",
"async_forward_entry_unload",
"(",
"entry",
",",
"component",
")",
"for",
"component",
"in",
"PLATFORMS",
"]",
")",
")",
"if",
"unload_ok",
":",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_CONFIG_ENTRY",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"[",
"COAP",
"]",
".",
"shutdown",
"(",
")",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_CONFIG_ENTRY",
"]",
".",
"pop",
"(",
"entry",
".",
"entry_id",
")",
"return",
"unload_ok"
] | [
213,
0
] | [
227,
20
] | python | en | ['en', 'es', 'en'] | True |
ShellyDeviceWrapper.__init__ | (self, hass, entry, device: aioshelly.Device) | Initialize the Shelly device wrapper. | Initialize the Shelly device wrapper. | def __init__(self, hass, entry, device: aioshelly.Device):
"""Initialize the Shelly device wrapper."""
sleep_mode = device.settings.get("sleep_mode")
if sleep_mode:
sleep_period = sleep_mode["period"]
if sleep_mode["unit"] == "h":
sleep_period *= 60 # hours to minutes
update_interval = (
SLEEP_PERIOD_MULTIPLIER * sleep_period * 60
) # minutes to seconds
else:
update_interval = (
UPDATE_PERIOD_MULTIPLIER * device.settings["coiot"]["update_period"]
)
super().__init__(
hass,
_LOGGER,
name=get_device_name(device),
update_interval=timedelta(seconds=update_interval),
)
self.hass = hass
self.entry = entry
self.device = device
self.device.subscribe_updates(self.async_set_updated_data) | [
"def",
"__init__",
"(",
"self",
",",
"hass",
",",
"entry",
",",
"device",
":",
"aioshelly",
".",
"Device",
")",
":",
"sleep_mode",
"=",
"device",
".",
"settings",
".",
"get",
"(",
"\"sleep_mode\"",
")",
"if",
"sleep_mode",
":",
"sleep_period",
"=",
"sleep_mode",
"[",
"\"period\"",
"]",
"if",
"sleep_mode",
"[",
"\"unit\"",
"]",
"==",
"\"h\"",
":",
"sleep_period",
"*=",
"60",
"# hours to minutes",
"update_interval",
"=",
"(",
"SLEEP_PERIOD_MULTIPLIER",
"*",
"sleep_period",
"*",
"60",
")",
"# minutes to seconds",
"else",
":",
"update_interval",
"=",
"(",
"UPDATE_PERIOD_MULTIPLIER",
"*",
"device",
".",
"settings",
"[",
"\"coiot\"",
"]",
"[",
"\"update_period\"",
"]",
")",
"super",
"(",
")",
".",
"__init__",
"(",
"hass",
",",
"_LOGGER",
",",
"name",
"=",
"get_device_name",
"(",
"device",
")",
",",
"update_interval",
"=",
"timedelta",
"(",
"seconds",
"=",
"update_interval",
")",
",",
")",
"self",
".",
"hass",
"=",
"hass",
"self",
".",
"entry",
"=",
"entry",
"self",
".",
"device",
"=",
"device",
"self",
".",
"device",
".",
"subscribe_updates",
"(",
"self",
".",
"async_set_updated_data",
")"
] | [
113,
4
] | [
140,
66
] | python | en | ['en', 'en', 'en'] | True |
ShellyDeviceWrapper._async_update_data | (self) | Fetch data. | Fetch data. | async def _async_update_data(self):
"""Fetch data."""
_LOGGER.debug("Polling Shelly Device - %s", self.name)
try:
async with async_timeout.timeout(
POLLING_TIMEOUT_MULTIPLIER
* self.device.settings["coiot"]["update_period"]
):
return await self.device.update()
except OSError as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err | [
"async",
"def",
"_async_update_data",
"(",
"self",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Polling Shelly Device - %s\"",
",",
"self",
".",
"name",
")",
"try",
":",
"async",
"with",
"async_timeout",
".",
"timeout",
"(",
"POLLING_TIMEOUT_MULTIPLIER",
"*",
"self",
".",
"device",
".",
"settings",
"[",
"\"coiot\"",
"]",
"[",
"\"update_period\"",
"]",
")",
":",
"return",
"await",
"self",
".",
"device",
".",
"update",
"(",
")",
"except",
"OSError",
"as",
"err",
":",
"raise",
"update_coordinator",
".",
"UpdateFailed",
"(",
"\"Error fetching data\"",
")",
"from",
"err"
] | [
142,
4
] | [
152,
81
] | python | cy | ['de', 'cy', 'en'] | False |
ShellyDeviceWrapper.model | (self) | Model of the device. | Model of the device. | def model(self):
"""Model of the device."""
return self.device.settings["device"]["type"] | [
"def",
"model",
"(",
"self",
")",
":",
"return",
"self",
".",
"device",
".",
"settings",
"[",
"\"device\"",
"]",
"[",
"\"type\"",
"]"
] | [
155,
4
] | [
157,
53
] | python | en | ['en', 'en', 'en'] | True |
ShellyDeviceWrapper.mac | (self) | Mac address of the device. | Mac address of the device. | def mac(self):
"""Mac address of the device."""
return self.device.settings["device"]["mac"] | [
"def",
"mac",
"(",
"self",
")",
":",
"return",
"self",
".",
"device",
".",
"settings",
"[",
"\"device\"",
"]",
"[",
"\"mac\"",
"]"
] | [
160,
4
] | [
162,
52
] | python | en | ['en', 'en', 'en'] | True |
ShellyDeviceWrapper.async_setup | (self) | Set up the wrapper. | Set up the wrapper. | async def async_setup(self):
"""Set up the wrapper."""
dev_reg = await device_registry.async_get_registry(self.hass)
model_type = self.device.settings["device"]["type"]
dev_reg.async_get_or_create(
config_entry_id=self.entry.entry_id,
name=self.name,
connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},
# This is duplicate but otherwise via_device can't work
identifiers={(DOMAIN, self.mac)},
manufacturer="Shelly",
model=aioshelly.MODEL_NAMES.get(model_type, model_type),
sw_version=self.device.settings["fw"],
) | [
"async",
"def",
"async_setup",
"(",
"self",
")",
":",
"dev_reg",
"=",
"await",
"device_registry",
".",
"async_get_registry",
"(",
"self",
".",
"hass",
")",
"model_type",
"=",
"self",
".",
"device",
".",
"settings",
"[",
"\"device\"",
"]",
"[",
"\"type\"",
"]",
"dev_reg",
".",
"async_get_or_create",
"(",
"config_entry_id",
"=",
"self",
".",
"entry",
".",
"entry_id",
",",
"name",
"=",
"self",
".",
"name",
",",
"connections",
"=",
"{",
"(",
"device_registry",
".",
"CONNECTION_NETWORK_MAC",
",",
"self",
".",
"mac",
")",
"}",
",",
"# This is duplicate but otherwise via_device can't work",
"identifiers",
"=",
"{",
"(",
"DOMAIN",
",",
"self",
".",
"mac",
")",
"}",
",",
"manufacturer",
"=",
"\"Shelly\"",
",",
"model",
"=",
"aioshelly",
".",
"MODEL_NAMES",
".",
"get",
"(",
"model_type",
",",
"model_type",
")",
",",
"sw_version",
"=",
"self",
".",
"device",
".",
"settings",
"[",
"\"fw\"",
"]",
",",
")"
] | [
164,
4
] | [
177,
9
] | python | en | ['en', 'en', 'en'] | True |
ShellyDeviceWrapper.shutdown | (self) | Shutdown the wrapper. | Shutdown the wrapper. | def shutdown(self):
"""Shutdown the wrapper."""
self.device.shutdown() | [
"def",
"shutdown",
"(",
"self",
")",
":",
"self",
".",
"device",
".",
"shutdown",
"(",
")"
] | [
179,
4
] | [
181,
30
] | python | en | ['en', 'it', 'en'] | True |
ShellyDeviceRestWrapper.__init__ | (self, hass, device: aioshelly.Device) | Initialize the Shelly device wrapper. | Initialize the Shelly device wrapper. | def __init__(self, hass, device: aioshelly.Device):
"""Initialize the Shelly device wrapper."""
super().__init__(
hass,
_LOGGER,
name=get_device_name(device),
update_interval=timedelta(seconds=REST_SENSORS_UPDATE_INTERVAL),
)
self.device = device | [
"def",
"__init__",
"(",
"self",
",",
"hass",
",",
"device",
":",
"aioshelly",
".",
"Device",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"hass",
",",
"_LOGGER",
",",
"name",
"=",
"get_device_name",
"(",
"device",
")",
",",
"update_interval",
"=",
"timedelta",
"(",
"seconds",
"=",
"REST_SENSORS_UPDATE_INTERVAL",
")",
",",
")",
"self",
".",
"device",
"=",
"device"
] | [
187,
4
] | [
196,
28
] | python | en | ['en', 'en', 'en'] | True |
ShellyDeviceRestWrapper._async_update_data | (self) | Fetch data. | Fetch data. | async def _async_update_data(self):
"""Fetch data."""
try:
async with async_timeout.timeout(5):
_LOGGER.debug("REST update for %s", get_device_name(self.device))
return await self.device.update_status()
except OSError as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err | [
"async",
"def",
"_async_update_data",
"(",
"self",
")",
":",
"try",
":",
"async",
"with",
"async_timeout",
".",
"timeout",
"(",
"5",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"REST update for %s\"",
",",
"get_device_name",
"(",
"self",
".",
"device",
")",
")",
"return",
"await",
"self",
".",
"device",
".",
"update_status",
"(",
")",
"except",
"OSError",
"as",
"err",
":",
"raise",
"update_coordinator",
".",
"UpdateFailed",
"(",
"\"Error fetching data\"",
")",
"from",
"err"
] | [
198,
4
] | [
205,
81
] | python | cy | ['de', 'cy', 'en'] | False |
ShellyDeviceRestWrapper.mac | (self) | Mac address of the device. | Mac address of the device. | def mac(self):
"""Mac address of the device."""
return self.device.settings["device"]["mac"] | [
"def",
"mac",
"(",
"self",
")",
":",
"return",
"self",
".",
"device",
".",
"settings",
"[",
"\"device\"",
"]",
"[",
"\"mac\"",
"]"
] | [
208,
4
] | [
210,
52
] | python | en | ['en', 'en', 'en'] | True |
ROIPool.__init__ | (self, output_size, spatial_scale) |
:param output_size: e.g. (3,3)
:param spatial_scale: e.g. 1.0/16
|
:param output_size: e.g. (3,3)
:param spatial_scale: e.g. 1.0/16
| def __init__(self, output_size, spatial_scale):
"""
:param output_size: e.g. (3,3)
:param spatial_scale: e.g. 1.0/16
"""
super(ROIPool, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale | [
"def",
"__init__",
"(",
"self",
",",
"output_size",
",",
"spatial_scale",
")",
":",
"super",
"(",
"ROIPool",
",",
"self",
")",
".",
"__init__",
"(",
")",
"self",
".",
"output_size",
"=",
"output_size",
"self",
".",
"spatial_scale",
"=",
"spatial_scale"
] | [
49,
4
] | [
56,
42
] | python | en | ['en', 'error', 'th'] | False |
ROIPool.forward | (self, input, rois) |
:param input: the input features [B C H W]
:param rois: [k, 5] : (im_index, x1, y1, x2, y2)
:return: pooled features (K C H W), K = k
|
:param input: the input features [B C H W]
:param rois: [k, 5] : (im_index, x1, y1, x2, y2)
:return: pooled features (K C H W), K = k
| def forward(self, input, rois):
"""
:param input: the input features [B C H W]
:param rois: [k, 5] : (im_index, x1, y1, x2, y2)
:return: pooled features (K C H W), K = k
"""
return roi_pool(input.float(), rois.float(), self.output_size, self.spatial_scale) | [
"def",
"forward",
"(",
"self",
",",
"input",
",",
"rois",
")",
":",
"return",
"roi_pool",
"(",
"input",
".",
"float",
"(",
")",
",",
"rois",
".",
"float",
"(",
")",
",",
"self",
".",
"output_size",
",",
"self",
".",
"spatial_scale",
")"
] | [
58,
4
] | [
64,
90
] | python | en | ['en', 'error', 'th'] | False |
HomematicipAuth.__init__ | (self, hass, config) | Initialize HomematicIP Cloud client registration. | Initialize HomematicIP Cloud client registration. | def __init__(self, hass, config) -> None:
"""Initialize HomematicIP Cloud client registration."""
self.hass = hass
self.config = config
self.auth = None | [
"def",
"__init__",
"(",
"self",
",",
"hass",
",",
"config",
")",
"->",
"None",
":",
"self",
".",
"hass",
"=",
"hass",
"self",
".",
"config",
"=",
"config",
"self",
".",
"auth",
"=",
"None"
] | [
24,
4
] | [
28,
24
] | python | en | ['nl', 'fr', 'en'] | False |
HomematicipAuth.async_setup | (self) | Connect to HomematicIP for registration. | Connect to HomematicIP for registration. | async def async_setup(self) -> bool:
"""Connect to HomematicIP for registration."""
try:
self.auth = await self.get_auth(
self.hass, self.config.get(HMIPC_HAPID), self.config.get(HMIPC_PIN)
)
return self.auth is not None
except HmipcConnectionError:
return False | [
"async",
"def",
"async_setup",
"(",
"self",
")",
"->",
"bool",
":",
"try",
":",
"self",
".",
"auth",
"=",
"await",
"self",
".",
"get_auth",
"(",
"self",
".",
"hass",
",",
"self",
".",
"config",
".",
"get",
"(",
"HMIPC_HAPID",
")",
",",
"self",
".",
"config",
".",
"get",
"(",
"HMIPC_PIN",
")",
")",
"return",
"self",
".",
"auth",
"is",
"not",
"None",
"except",
"HmipcConnectionError",
":",
"return",
"False"
] | [
30,
4
] | [
38,
24
] | python | en | ['en', 'en', 'en'] | True |
HomematicipAuth.async_checkbutton | (self) | Check blue butten has been pressed. | Check blue butten has been pressed. | async def async_checkbutton(self) -> bool:
"""Check blue butten has been pressed."""
try:
return await self.auth.isRequestAcknowledged()
except HmipConnectionError:
return False | [
"async",
"def",
"async_checkbutton",
"(",
"self",
")",
"->",
"bool",
":",
"try",
":",
"return",
"await",
"self",
".",
"auth",
".",
"isRequestAcknowledged",
"(",
")",
"except",
"HmipConnectionError",
":",
"return",
"False"
] | [
40,
4
] | [
45,
24
] | python | en | ['en', 'en', 'en'] | True |
HomematicipAuth.async_register | (self) | Register client at HomematicIP. | Register client at HomematicIP. | async def async_register(self):
"""Register client at HomematicIP."""
try:
authtoken = await self.auth.requestAuthToken()
await self.auth.confirmAuthToken(authtoken)
return authtoken
except HmipConnectionError:
return False | [
"async",
"def",
"async_register",
"(",
"self",
")",
":",
"try",
":",
"authtoken",
"=",
"await",
"self",
".",
"auth",
".",
"requestAuthToken",
"(",
")",
"await",
"self",
".",
"auth",
".",
"confirmAuthToken",
"(",
"authtoken",
")",
"return",
"authtoken",
"except",
"HmipConnectionError",
":",
"return",
"False"
] | [
47,
4
] | [
54,
24
] | python | en | ['da', 'en', 'en'] | True |
HomematicipAuth.get_auth | (self, hass: HomeAssistantType, hapid, pin) | Create a HomematicIP access point object. | Create a HomematicIP access point object. | async def get_auth(self, hass: HomeAssistantType, hapid, pin):
"""Create a HomematicIP access point object."""
auth = AsyncAuth(hass.loop, async_get_clientsession(hass))
try:
await auth.init(hapid)
if pin:
auth.pin = pin
await auth.connectionRequest("HomeAssistant")
except HmipConnectionError:
return None
return auth | [
"async",
"def",
"get_auth",
"(",
"self",
",",
"hass",
":",
"HomeAssistantType",
",",
"hapid",
",",
"pin",
")",
":",
"auth",
"=",
"AsyncAuth",
"(",
"hass",
".",
"loop",
",",
"async_get_clientsession",
"(",
"hass",
")",
")",
"try",
":",
"await",
"auth",
".",
"init",
"(",
"hapid",
")",
"if",
"pin",
":",
"auth",
".",
"pin",
"=",
"pin",
"await",
"auth",
".",
"connectionRequest",
"(",
"\"HomeAssistant\"",
")",
"except",
"HmipConnectionError",
":",
"return",
"None",
"return",
"auth"
] | [
56,
4
] | [
66,
19
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.__init__ | (self, hass: HomeAssistantType, config_entry: ConfigEntry) | Initialize HomematicIP Cloud connection. | Initialize HomematicIP Cloud connection. | def __init__(self, hass: HomeAssistantType, config_entry: ConfigEntry) -> None:
"""Initialize HomematicIP Cloud connection."""
self.hass = hass
self.config_entry = config_entry
self.home = None
self._ws_close_requested = False
self._retry_task = None
self._tries = 0
self._accesspoint_connected = True
self.hmip_device_by_entity_id = {}
self.reset_connection_listener = None | [
"def",
"__init__",
"(",
"self",
",",
"hass",
":",
"HomeAssistantType",
",",
"config_entry",
":",
"ConfigEntry",
")",
"->",
"None",
":",
"self",
".",
"hass",
"=",
"hass",
"self",
".",
"config_entry",
"=",
"config_entry",
"self",
".",
"home",
"=",
"None",
"self",
".",
"_ws_close_requested",
"=",
"False",
"self",
".",
"_retry_task",
"=",
"None",
"self",
".",
"_tries",
"=",
"0",
"self",
".",
"_accesspoint_connected",
"=",
"True",
"self",
".",
"hmip_device_by_entity_id",
"=",
"{",
"}",
"self",
".",
"reset_connection_listener",
"=",
"None"
] | [
72,
4
] | [
83,
45
] | python | en | ['nl', 'en', 'en'] | True |
HomematicipHAP.async_setup | (self, tries: int = 0) | Initialize connection. | Initialize connection. | async def async_setup(self, tries: int = 0) -> bool:
"""Initialize connection."""
try:
self.home = await self.get_hap(
self.hass,
self.config_entry.data.get(HMIPC_HAPID),
self.config_entry.data.get(HMIPC_AUTHTOKEN),
self.config_entry.data.get(HMIPC_NAME),
)
except HmipcConnectionError as err:
raise ConfigEntryNotReady from err
except Exception as err: # pylint: disable=broad-except
_LOGGER.error("Error connecting with HomematicIP Cloud: %s", err)
return False
_LOGGER.info(
"Connected to HomematicIP with HAP %s", self.config_entry.unique_id
)
for component in COMPONENTS:
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, component
)
)
return True | [
"async",
"def",
"async_setup",
"(",
"self",
",",
"tries",
":",
"int",
"=",
"0",
")",
"->",
"bool",
":",
"try",
":",
"self",
".",
"home",
"=",
"await",
"self",
".",
"get_hap",
"(",
"self",
".",
"hass",
",",
"self",
".",
"config_entry",
".",
"data",
".",
"get",
"(",
"HMIPC_HAPID",
")",
",",
"self",
".",
"config_entry",
".",
"data",
".",
"get",
"(",
"HMIPC_AUTHTOKEN",
")",
",",
"self",
".",
"config_entry",
".",
"data",
".",
"get",
"(",
"HMIPC_NAME",
")",
",",
")",
"except",
"HmipcConnectionError",
"as",
"err",
":",
"raise",
"ConfigEntryNotReady",
"from",
"err",
"except",
"Exception",
"as",
"err",
":",
"# pylint: disable=broad-except",
"_LOGGER",
".",
"error",
"(",
"\"Error connecting with HomematicIP Cloud: %s\"",
",",
"err",
")",
"return",
"False",
"_LOGGER",
".",
"info",
"(",
"\"Connected to HomematicIP with HAP %s\"",
",",
"self",
".",
"config_entry",
".",
"unique_id",
")",
"for",
"component",
"in",
"COMPONENTS",
":",
"self",
".",
"hass",
".",
"async_create_task",
"(",
"self",
".",
"hass",
".",
"config_entries",
".",
"async_forward_entry_setup",
"(",
"self",
".",
"config_entry",
",",
"component",
")",
")",
"return",
"True"
] | [
85,
4
] | [
110,
19
] | python | en | ['en', 'en', 'en'] | False |
HomematicipHAP.async_update | (self, *args, **kwargs) | Async update the home device.
Triggered when the HMIP HOME_CHANGED event has fired.
There are several occasions for this event to happen.
1. We are interested to check whether the access point
is still connected. If not, entity state changes cannot
be forwarded to hass. So if access point is disconnected all devices
are set to unavailable.
2. We need to update home including devices and groups after a reconnect.
3. We need to update home without devices and groups in all other cases.
| Async update the home device. | def async_update(self, *args, **kwargs) -> None:
"""Async update the home device.
Triggered when the HMIP HOME_CHANGED event has fired.
There are several occasions for this event to happen.
1. We are interested to check whether the access point
is still connected. If not, entity state changes cannot
be forwarded to hass. So if access point is disconnected all devices
are set to unavailable.
2. We need to update home including devices and groups after a reconnect.
3. We need to update home without devices and groups in all other cases.
"""
if not self.home.connected:
_LOGGER.error("HMIP access point has lost connection with the cloud")
self._accesspoint_connected = False
self.set_all_to_unavailable()
elif not self._accesspoint_connected:
# Now the HOME_CHANGED event has fired indicating the access
# point has reconnected to the cloud again.
# Explicitly getting an update as entity states might have
# changed during access point disconnect."""
job = self.hass.async_create_task(self.get_state())
job.add_done_callback(self.get_state_finished)
self._accesspoint_connected = True | [
"def",
"async_update",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"if",
"not",
"self",
".",
"home",
".",
"connected",
":",
"_LOGGER",
".",
"error",
"(",
"\"HMIP access point has lost connection with the cloud\"",
")",
"self",
".",
"_accesspoint_connected",
"=",
"False",
"self",
".",
"set_all_to_unavailable",
"(",
")",
"elif",
"not",
"self",
".",
"_accesspoint_connected",
":",
"# Now the HOME_CHANGED event has fired indicating the access",
"# point has reconnected to the cloud again.",
"# Explicitly getting an update as entity states might have",
"# changed during access point disconnect.\"\"\"",
"job",
"=",
"self",
".",
"hass",
".",
"async_create_task",
"(",
"self",
".",
"get_state",
"(",
")",
")",
"job",
".",
"add_done_callback",
"(",
"self",
".",
"get_state_finished",
")",
"self",
".",
"_accesspoint_connected",
"=",
"True"
] | [
113,
4
] | [
138,
46
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.async_create_entity | (self, *args, **kwargs) | Create an entity or a group. | Create an entity or a group. | def async_create_entity(self, *args, **kwargs) -> None:
"""Create an entity or a group."""
is_device = EventType(kwargs["event_type"]) == EventType.DEVICE_ADDED
self.hass.async_create_task(self.async_create_entity_lazy(is_device)) | [
"def",
"async_create_entity",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"is_device",
"=",
"EventType",
"(",
"kwargs",
"[",
"\"event_type\"",
"]",
")",
"==",
"EventType",
".",
"DEVICE_ADDED",
"self",
".",
"hass",
".",
"async_create_task",
"(",
"self",
".",
"async_create_entity_lazy",
"(",
"is_device",
")",
")"
] | [
141,
4
] | [
144,
77
] | python | en | ['en', 'ga', 'en'] | True |
HomematicipHAP.async_create_entity_lazy | (self, is_device=True) | Delay entity creation to allow the user to enter a device name. | Delay entity creation to allow the user to enter a device name. | async def async_create_entity_lazy(self, is_device=True) -> None:
"""Delay entity creation to allow the user to enter a device name."""
if is_device:
await asyncio.sleep(30)
await self.hass.config_entries.async_reload(self.config_entry.entry_id) | [
"async",
"def",
"async_create_entity_lazy",
"(",
"self",
",",
"is_device",
"=",
"True",
")",
"->",
"None",
":",
"if",
"is_device",
":",
"await",
"asyncio",
".",
"sleep",
"(",
"30",
")",
"await",
"self",
".",
"hass",
".",
"config_entries",
".",
"async_reload",
"(",
"self",
".",
"config_entry",
".",
"entry_id",
")"
] | [
146,
4
] | [
150,
79
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.get_state | (self) | Update HMIP state and tell Home Assistant. | Update HMIP state and tell Home Assistant. | async def get_state(self) -> None:
"""Update HMIP state and tell Home Assistant."""
await self.home.get_current_state()
self.update_all() | [
"async",
"def",
"get_state",
"(",
"self",
")",
"->",
"None",
":",
"await",
"self",
".",
"home",
".",
"get_current_state",
"(",
")",
"self",
".",
"update_all",
"(",
")"
] | [
152,
4
] | [
155,
25
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.get_state_finished | (self, future) | Execute when get_state coroutine has finished. | Execute when get_state coroutine has finished. | def get_state_finished(self, future) -> None:
"""Execute when get_state coroutine has finished."""
try:
future.result()
except HmipConnectionError:
# Somehow connection could not recover. Will disconnect and
# so reconnect loop is taking over.
_LOGGER.error("Updating state after HMIP access point reconnect failed")
self.hass.async_create_task(self.home.disable_events()) | [
"def",
"get_state_finished",
"(",
"self",
",",
"future",
")",
"->",
"None",
":",
"try",
":",
"future",
".",
"result",
"(",
")",
"except",
"HmipConnectionError",
":",
"# Somehow connection could not recover. Will disconnect and",
"# so reconnect loop is taking over.",
"_LOGGER",
".",
"error",
"(",
"\"Updating state after HMIP access point reconnect failed\"",
")",
"self",
".",
"hass",
".",
"async_create_task",
"(",
"self",
".",
"home",
".",
"disable_events",
"(",
")",
")"
] | [
157,
4
] | [
165,
67
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.set_all_to_unavailable | (self) | Set all devices to unavailable and tell Home Assistant. | Set all devices to unavailable and tell Home Assistant. | def set_all_to_unavailable(self) -> None:
"""Set all devices to unavailable and tell Home Assistant."""
for device in self.home.devices:
device.unreach = True
self.update_all() | [
"def",
"set_all_to_unavailable",
"(",
"self",
")",
"->",
"None",
":",
"for",
"device",
"in",
"self",
".",
"home",
".",
"devices",
":",
"device",
".",
"unreach",
"=",
"True",
"self",
".",
"update_all",
"(",
")"
] | [
167,
4
] | [
171,
25
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.update_all | (self) | Signal all devices to update their state. | Signal all devices to update their state. | def update_all(self) -> None:
"""Signal all devices to update their state."""
for device in self.home.devices:
device.fire_update_event() | [
"def",
"update_all",
"(",
"self",
")",
"->",
"None",
":",
"for",
"device",
"in",
"self",
".",
"home",
".",
"devices",
":",
"device",
".",
"fire_update_event",
"(",
")"
] | [
173,
4
] | [
176,
38
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.async_connect | (self) | Start WebSocket connection. | Start WebSocket connection. | async def async_connect(self) -> None:
"""Start WebSocket connection."""
tries = 0
while True:
retry_delay = 2 ** min(tries, 8)
try:
await self.home.get_current_state()
hmip_events = await self.home.enable_events()
tries = 0
await hmip_events
except HmipConnectionError:
_LOGGER.error(
"Error connecting to HomematicIP with HAP %s. "
"Retrying in %d seconds",
self.config_entry.unique_id,
retry_delay,
)
if self._ws_close_requested:
break
self._ws_close_requested = False
tries += 1
try:
self._retry_task = self.hass.async_create_task(
asyncio.sleep(retry_delay)
)
await self._retry_task
except asyncio.CancelledError:
break | [
"async",
"def",
"async_connect",
"(",
"self",
")",
"->",
"None",
":",
"tries",
"=",
"0",
"while",
"True",
":",
"retry_delay",
"=",
"2",
"**",
"min",
"(",
"tries",
",",
"8",
")",
"try",
":",
"await",
"self",
".",
"home",
".",
"get_current_state",
"(",
")",
"hmip_events",
"=",
"await",
"self",
".",
"home",
".",
"enable_events",
"(",
")",
"tries",
"=",
"0",
"await",
"hmip_events",
"except",
"HmipConnectionError",
":",
"_LOGGER",
".",
"error",
"(",
"\"Error connecting to HomematicIP with HAP %s. \"",
"\"Retrying in %d seconds\"",
",",
"self",
".",
"config_entry",
".",
"unique_id",
",",
"retry_delay",
",",
")",
"if",
"self",
".",
"_ws_close_requested",
":",
"break",
"self",
".",
"_ws_close_requested",
"=",
"False",
"tries",
"+=",
"1",
"try",
":",
"self",
".",
"_retry_task",
"=",
"self",
".",
"hass",
".",
"async_create_task",
"(",
"asyncio",
".",
"sleep",
"(",
"retry_delay",
")",
")",
"await",
"self",
".",
"_retry_task",
"except",
"asyncio",
".",
"CancelledError",
":",
"break"
] | [
178,
4
] | [
208,
21
] | python | en | ['en', 'da', 'en'] | True |
HomematicipHAP.async_reset | (self) | Close the websocket connection. | Close the websocket connection. | async def async_reset(self) -> bool:
"""Close the websocket connection."""
self._ws_close_requested = True
if self._retry_task is not None:
self._retry_task.cancel()
await self.home.disable_events()
_LOGGER.info("Closed connection to HomematicIP cloud server")
for component in COMPONENTS:
await self.hass.config_entries.async_forward_entry_unload(
self.config_entry, component
)
self.hmip_device_by_entity_id = {}
return True | [
"async",
"def",
"async_reset",
"(",
"self",
")",
"->",
"bool",
":",
"self",
".",
"_ws_close_requested",
"=",
"True",
"if",
"self",
".",
"_retry_task",
"is",
"not",
"None",
":",
"self",
".",
"_retry_task",
".",
"cancel",
"(",
")",
"await",
"self",
".",
"home",
".",
"disable_events",
"(",
")",
"_LOGGER",
".",
"info",
"(",
"\"Closed connection to HomematicIP cloud server\"",
")",
"for",
"component",
"in",
"COMPONENTS",
":",
"await",
"self",
".",
"hass",
".",
"config_entries",
".",
"async_forward_entry_unload",
"(",
"self",
".",
"config_entry",
",",
"component",
")",
"self",
".",
"hmip_device_by_entity_id",
"=",
"{",
"}",
"return",
"True"
] | [
210,
4
] | [
222,
19
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.shutdown | (self, event) | Wrap the call to async_reset.
Used as an argument to EventBus.async_listen_once.
| Wrap the call to async_reset. | def shutdown(self, event) -> None:
"""Wrap the call to async_reset.
Used as an argument to EventBus.async_listen_once.
"""
self.hass.async_create_task(self.async_reset())
_LOGGER.debug(
"Reset connection to access point id %s", self.config_entry.unique_id
) | [
"def",
"shutdown",
"(",
"self",
",",
"event",
")",
"->",
"None",
":",
"self",
".",
"hass",
".",
"async_create_task",
"(",
"self",
".",
"async_reset",
"(",
")",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Reset connection to access point id %s\"",
",",
"self",
".",
"config_entry",
".",
"unique_id",
")"
] | [
225,
4
] | [
233,
9
] | python | en | ['en', 'en', 'en'] | True |
HomematicipHAP.get_hap | (
self, hass: HomeAssistantType, hapid: str, authtoken: str, name: str
) | Create a HomematicIP access point object. | Create a HomematicIP access point object. | async def get_hap(
self, hass: HomeAssistantType, hapid: str, authtoken: str, name: str
) -> AsyncHome:
"""Create a HomematicIP access point object."""
home = AsyncHome(hass.loop, async_get_clientsession(hass))
home.name = name
# Use the title of the config entry as title for the home.
home.label = self.config_entry.title
home.modelType = "HomematicIP Cloud Home"
home.set_auth_token(authtoken)
try:
await home.init(hapid)
await home.get_current_state()
except HmipConnectionError as err:
raise HmipcConnectionError from err
home.on_update(self.async_update)
home.on_create(self.async_create_entity)
hass.loop.create_task(self.async_connect())
return home | [
"async",
"def",
"get_hap",
"(",
"self",
",",
"hass",
":",
"HomeAssistantType",
",",
"hapid",
":",
"str",
",",
"authtoken",
":",
"str",
",",
"name",
":",
"str",
")",
"->",
"AsyncHome",
":",
"home",
"=",
"AsyncHome",
"(",
"hass",
".",
"loop",
",",
"async_get_clientsession",
"(",
"hass",
")",
")",
"home",
".",
"name",
"=",
"name",
"# Use the title of the config entry as title for the home.",
"home",
".",
"label",
"=",
"self",
".",
"config_entry",
".",
"title",
"home",
".",
"modelType",
"=",
"\"HomematicIP Cloud Home\"",
"home",
".",
"set_auth_token",
"(",
"authtoken",
")",
"try",
":",
"await",
"home",
".",
"init",
"(",
"hapid",
")",
"await",
"home",
".",
"get_current_state",
"(",
")",
"except",
"HmipConnectionError",
"as",
"err",
":",
"raise",
"HmipcConnectionError",
"from",
"err",
"home",
".",
"on_update",
"(",
"self",
".",
"async_update",
")",
"home",
".",
"on_create",
"(",
"self",
".",
"async_create_entity",
")",
"hass",
".",
"loop",
".",
"create_task",
"(",
"self",
".",
"async_connect",
"(",
")",
")",
"return",
"home"
] | [
235,
4
] | [
256,
19
] | python | en | ['en', 'en', 'en'] | True |
_make_causal_mask | (input_ids_shape: tf.TensorShape, past_key_values_length: int = 0) |
Make causal mask used for bi-directional self-attention.
|
Make causal mask used for bi-directional self-attention.
| def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
mask_cond = tf.range(shape_list(mask)[-1])
mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
if past_key_values_length > 0:
mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) | [
"def",
"_make_causal_mask",
"(",
"input_ids_shape",
":",
"tf",
".",
"TensorShape",
",",
"past_key_values_length",
":",
"int",
"=",
"0",
")",
":",
"bsz",
",",
"tgt_len",
"=",
"input_ids_shape",
"mask",
"=",
"tf",
".",
"ones",
"(",
"(",
"tgt_len",
",",
"tgt_len",
")",
")",
"*",
"LARGE_NEGATIVE",
"mask_cond",
"=",
"tf",
".",
"range",
"(",
"shape_list",
"(",
"mask",
")",
"[",
"-",
"1",
"]",
")",
"mask",
"=",
"tf",
".",
"where",
"(",
"mask_cond",
"<",
"tf",
".",
"reshape",
"(",
"mask_cond",
"+",
"1",
",",
"(",
"shape_list",
"(",
"mask",
")",
"[",
"-",
"1",
"]",
",",
"1",
")",
")",
",",
"0.0",
",",
"mask",
")",
"if",
"past_key_values_length",
">",
"0",
":",
"mask",
"=",
"tf",
".",
"concat",
"(",
"[",
"tf",
".",
"zeros",
"(",
"(",
"tgt_len",
",",
"past_key_values_length",
")",
")",
",",
"mask",
"]",
",",
"axis",
"=",
"-",
"1",
")",
"return",
"tf",
".",
"tile",
"(",
"mask",
"[",
"None",
",",
"None",
",",
":",
",",
":",
"]",
",",
"(",
"bsz",
",",
"1",
",",
"1",
",",
"1",
")",
")"
] | [
85,
0
] | [
98,
58
] | python | en | ['en', 'error', 'th'] | False |
_expand_mask | (mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0) |
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
|
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
| def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
src_len = shape_list(mask)[1]
tgt_len = tgt_len if tgt_len is not None else src_len
one_cst = tf.constant(1.0)
mask = tf.cast(mask, dtype=one_cst.dtype)
expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
return (one_cst - expanded_mask) * LARGE_NEGATIVE | [
"def",
"_expand_mask",
"(",
"mask",
":",
"tf",
".",
"Tensor",
",",
"tgt_len",
":",
"Optional",
"[",
"int",
"]",
"=",
"None",
",",
"past_key_values_length",
":",
"int",
"=",
"0",
")",
":",
"src_len",
"=",
"shape_list",
"(",
"mask",
")",
"[",
"1",
"]",
"tgt_len",
"=",
"tgt_len",
"if",
"tgt_len",
"is",
"not",
"None",
"else",
"src_len",
"one_cst",
"=",
"tf",
".",
"constant",
"(",
"1.0",
")",
"mask",
"=",
"tf",
".",
"cast",
"(",
"mask",
",",
"dtype",
"=",
"one_cst",
".",
"dtype",
")",
"expanded_mask",
"=",
"tf",
".",
"tile",
"(",
"mask",
"[",
":",
",",
"None",
",",
"None",
",",
":",
"]",
",",
"(",
"1",
",",
"1",
",",
"tgt_len",
",",
"1",
")",
")",
"return",
"(",
"one_cst",
"-",
"expanded_mask",
")",
"*",
"LARGE_NEGATIVE"
] | [
102,
0
] | [
112,
53
] | python | en | ['en', 'error', 'th'] | False |
TFMarianSinusoidalPositionalEmbedding.build | (self, input_shape: tf.TensorShape) |
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
|
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
| def build(self, input_shape: tf.TensorShape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
weight = self._init_weight(self.num_positions, self.embedding_dim)
self.weight = self.add_weight(
name="embeddings",
shape=[self.num_positions, self.embedding_dim],
)
weight = tf.cast(weight, dtype=self.weight.dtype)
self.weight.assign(weight)
super().build(input_shape) | [
"def",
"build",
"(",
"self",
",",
"input_shape",
":",
"tf",
".",
"TensorShape",
")",
":",
"weight",
"=",
"self",
".",
"_init_weight",
"(",
"self",
".",
"num_positions",
",",
"self",
".",
"embedding_dim",
")",
"self",
".",
"weight",
"=",
"self",
".",
"add_weight",
"(",
"name",
"=",
"\"embeddings\"",
",",
"shape",
"=",
"[",
"self",
".",
"num_positions",
",",
"self",
".",
"embedding_dim",
"]",
",",
")",
"weight",
"=",
"tf",
".",
"cast",
"(",
"weight",
",",
"dtype",
"=",
"self",
".",
"weight",
".",
"dtype",
")",
"self",
".",
"weight",
".",
"assign",
"(",
"weight",
")",
"super",
"(",
")",
".",
"build",
"(",
"input_shape",
")"
] | [
127,
4
] | [
143,
34
] | python | en | ['en', 'error', 'th'] | False |
TFMarianSinusoidalPositionalEmbedding._init_weight | (n_pos: int, dim: int) |
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
|
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
| def _init_weight(n_pos: int, dim: int):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
# index 0 is all zero
position_enc[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])
position_enc[:, dim // 2 :] = np.cos(position_enc[:, 1::2])
# convert to tensor
table = tf.convert_to_tensor(position_enc)
tf.stop_gradient(table)
return table | [
"def",
"_init_weight",
"(",
"n_pos",
":",
"int",
",",
"dim",
":",
"int",
")",
":",
"position_enc",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"pos",
"/",
"np",
".",
"power",
"(",
"10000",
",",
"2",
"*",
"(",
"j",
"//",
"2",
")",
"/",
"dim",
")",
"for",
"j",
"in",
"range",
"(",
"dim",
")",
"]",
"for",
"pos",
"in",
"range",
"(",
"n_pos",
")",
"]",
")",
"# index 0 is all zero",
"position_enc",
"[",
":",
",",
"0",
":",
"dim",
"//",
"2",
"]",
"=",
"np",
".",
"sin",
"(",
"position_enc",
"[",
":",
",",
"0",
":",
":",
"2",
"]",
")",
"position_enc",
"[",
":",
",",
"dim",
"//",
"2",
":",
"]",
"=",
"np",
".",
"cos",
"(",
"position_enc",
"[",
":",
",",
"1",
":",
":",
"2",
"]",
")",
"# convert to tensor",
"table",
"=",
"tf",
".",
"convert_to_tensor",
"(",
"position_enc",
")",
"tf",
".",
"stop_gradient",
"(",
"table",
")",
"return",
"table"
] | [
146,
4
] | [
160,
20
] | python | en | ['en', 'error', 'th'] | False |
TFMarianSinusoidalPositionalEmbedding.call | (self, input_shape: tf.TensorShape, past_key_values_length: int = 0) | Input is expected to be of size [bsz x seqlen]. | Input is expected to be of size [bsz x seqlen]. | def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_shape[:2]
positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return tf.gather(self.weight, positions) | [
"def",
"call",
"(",
"self",
",",
"input_shape",
":",
"tf",
".",
"TensorShape",
",",
"past_key_values_length",
":",
"int",
"=",
"0",
")",
":",
"bsz",
",",
"seq_len",
"=",
"input_shape",
"[",
":",
"2",
"]",
"positions",
"=",
"tf",
".",
"range",
"(",
"past_key_values_length",
",",
"seq_len",
"+",
"past_key_values_length",
",",
"delta",
"=",
"1",
",",
"name",
"=",
"\"range\"",
")",
"return",
"tf",
".",
"gather",
"(",
"self",
".",
"weight",
",",
"positions",
")"
] | [
162,
4
] | [
167,
48
] | python | en | ['en', 'en', 'en'] | True |
TFMarianAttention.call | (
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training=False,
) | Input shape: Batch x Time x Channel | Input shape: Batch x Time x Channel | def call(
self,
hidden_states: tf.Tensor,
key_value_states: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None,
attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
training=False,
) -> Tuple[tf.Tensor, Optional[tf.Tensor]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = shape_list(hidden_states)
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = tf.concat([past_key_value[0], key_states], axis=2)
value_states = tf.concat([past_key_value[1], value_states], axis=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
key_states = tf.reshape(key_states, proj_shape)
value_states = tf.reshape(value_states, proj_shape)
src_len = shape_list(key_states)[1]
attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_weights),
[bsz * self.num_heads, tgt_len, src_len],
message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}",
)
if attention_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attention_mask),
[bsz, 1, tgt_len, src_len],
message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}",
)
attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_weights = tf.nn.softmax(attn_weights, axis=-1)
if layer_head_mask is not None:
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(layer_head_mask),
[self.num_heads],
message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}",
)
attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
attn_weights, (bsz, self.num_heads, tgt_len, src_len)
)
attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
attn_probs = self.dropout(attn_weights, training=training)
attn_output = tf.matmul(attn_probs, value_states)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(attn_output),
[bsz * self.num_heads, tgt_len, self.head_dim],
message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}",
)
attn_output = tf.transpose(
tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
)
attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
attn_output = self.out_proj(attn_output)
attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
return attn_output, attn_weights, past_key_value | [
"def",
"call",
"(",
"self",
",",
"hidden_states",
":",
"tf",
".",
"Tensor",
",",
"key_value_states",
":",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"=",
"None",
",",
"past_key_value",
":",
"Optional",
"[",
"Tuple",
"[",
"Tuple",
"[",
"tf",
".",
"Tensor",
"]",
"]",
"]",
"=",
"None",
",",
"attention_mask",
":",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"=",
"None",
",",
"layer_head_mask",
":",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"=",
"None",
",",
"training",
"=",
"False",
",",
")",
"->",
"Tuple",
"[",
"tf",
".",
"Tensor",
",",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"]",
":",
"# if key_value_states are provided this layer is used as a cross-attention layer",
"# for the decoder",
"is_cross_attention",
"=",
"key_value_states",
"is",
"not",
"None",
"bsz",
",",
"tgt_len",
",",
"embed_dim",
"=",
"shape_list",
"(",
"hidden_states",
")",
"# get query proj",
"query_states",
"=",
"self",
".",
"q_proj",
"(",
"hidden_states",
")",
"*",
"self",
".",
"scaling",
"# get key, value proj",
"if",
"is_cross_attention",
"and",
"past_key_value",
"is",
"not",
"None",
":",
"# reuse k,v, cross_attentions",
"key_states",
"=",
"past_key_value",
"[",
"0",
"]",
"value_states",
"=",
"past_key_value",
"[",
"1",
"]",
"elif",
"is_cross_attention",
":",
"# cross_attentions",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"key_value_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"key_value_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"elif",
"past_key_value",
"is",
"not",
"None",
":",
"# reuse k, v, self_attention",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"key_states",
"=",
"tf",
".",
"concat",
"(",
"[",
"past_key_value",
"[",
"0",
"]",
",",
"key_states",
"]",
",",
"axis",
"=",
"2",
")",
"value_states",
"=",
"tf",
".",
"concat",
"(",
"[",
"past_key_value",
"[",
"1",
"]",
",",
"value_states",
"]",
",",
"axis",
"=",
"2",
")",
"else",
":",
"# self_attention",
"key_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"k_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"value_states",
"=",
"self",
".",
"_shape",
"(",
"self",
".",
"v_proj",
"(",
"hidden_states",
")",
",",
"-",
"1",
",",
"bsz",
")",
"if",
"self",
".",
"is_decoder",
":",
"# if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.",
"# Further calls to cross_attention layer can then reuse all cross-attention",
"# key/value_states (first \"if\" case)",
"# if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of",
"# all previous decoder key/value_states. Further calls to uni-directional self-attention",
"# can concat previous decoder key/value_states to current projected key/value_states (third \"elif\" case)",
"# if encoder bi-directional self-attention `past_key_value` is always `None`",
"past_key_value",
"=",
"(",
"key_states",
",",
"value_states",
")",
"proj_shape",
"=",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"-",
"1",
",",
"self",
".",
"head_dim",
")",
"query_states",
"=",
"tf",
".",
"reshape",
"(",
"self",
".",
"_shape",
"(",
"query_states",
",",
"tgt_len",
",",
"bsz",
")",
",",
"proj_shape",
")",
"key_states",
"=",
"tf",
".",
"reshape",
"(",
"key_states",
",",
"proj_shape",
")",
"value_states",
"=",
"tf",
".",
"reshape",
"(",
"value_states",
",",
"proj_shape",
")",
"src_len",
"=",
"shape_list",
"(",
"key_states",
")",
"[",
"1",
"]",
"attn_weights",
"=",
"tf",
".",
"matmul",
"(",
"query_states",
",",
"key_states",
",",
"transpose_b",
"=",
"True",
")",
"# The tf.debugging asserts are not compliant with XLA then they",
"# have to be disabled in other modes than eager.",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"tf",
".",
"debugging",
".",
"assert_equal",
"(",
"shape_list",
"(",
"attn_weights",
")",
",",
"[",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
"]",
",",
"message",
"=",
"f\"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}\"",
",",
")",
"if",
"attention_mask",
"is",
"not",
"None",
":",
"# The tf.debugging asserts are not compliant with XLA then they",
"# have to be disabled in other modes than eager.",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"tf",
".",
"debugging",
".",
"assert_equal",
"(",
"shape_list",
"(",
"attention_mask",
")",
",",
"[",
"bsz",
",",
"1",
",",
"tgt_len",
",",
"src_len",
"]",
",",
"message",
"=",
"f\"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}\"",
",",
")",
"attention_mask",
"=",
"tf",
".",
"cast",
"(",
"attention_mask",
",",
"dtype",
"=",
"attn_weights",
".",
"dtype",
")",
"attn_weights",
"=",
"tf",
".",
"reshape",
"(",
"attn_weights",
",",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
")",
"+",
"attention_mask",
"attn_weights",
"=",
"tf",
".",
"reshape",
"(",
"attn_weights",
",",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
")",
"attn_weights",
"=",
"tf",
".",
"nn",
".",
"softmax",
"(",
"attn_weights",
",",
"axis",
"=",
"-",
"1",
")",
"if",
"layer_head_mask",
"is",
"not",
"None",
":",
"# The tf.debugging asserts are not compliant with XLA then they",
"# have to be disabled in other modes than eager.",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"tf",
".",
"debugging",
".",
"assert_equal",
"(",
"shape_list",
"(",
"layer_head_mask",
")",
",",
"[",
"self",
".",
"num_heads",
"]",
",",
"message",
"=",
"f\"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}\"",
",",
")",
"attn_weights",
"=",
"tf",
".",
"reshape",
"(",
"layer_head_mask",
",",
"(",
"1",
",",
"-",
"1",
",",
"1",
",",
"1",
")",
")",
"*",
"tf",
".",
"reshape",
"(",
"attn_weights",
",",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
")",
"attn_weights",
"=",
"tf",
".",
"reshape",
"(",
"attn_weights",
",",
"(",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
")",
"attn_probs",
"=",
"self",
".",
"dropout",
"(",
"attn_weights",
",",
"training",
"=",
"training",
")",
"attn_output",
"=",
"tf",
".",
"matmul",
"(",
"attn_probs",
",",
"value_states",
")",
"# The tf.debugging asserts are not compliant with XLA then they",
"# have to be disabled in other modes than eager.",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"tf",
".",
"debugging",
".",
"assert_equal",
"(",
"shape_list",
"(",
"attn_output",
")",
",",
"[",
"bsz",
"*",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"self",
".",
"head_dim",
"]",
",",
"message",
"=",
"f\"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}\"",
",",
")",
"attn_output",
"=",
"tf",
".",
"transpose",
"(",
"tf",
".",
"reshape",
"(",
"attn_output",
",",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"self",
".",
"head_dim",
")",
")",
",",
"(",
"0",
",",
"2",
",",
"1",
",",
"3",
")",
")",
"attn_output",
"=",
"tf",
".",
"reshape",
"(",
"attn_output",
",",
"(",
"bsz",
",",
"tgt_len",
",",
"embed_dim",
")",
")",
"attn_output",
"=",
"self",
".",
"out_proj",
"(",
"attn_output",
")",
"attn_weights",
":",
"tf",
".",
"Tensor",
"=",
"tf",
".",
"reshape",
"(",
"attn_weights",
",",
"(",
"bsz",
",",
"self",
".",
"num_heads",
",",
"tgt_len",
",",
"src_len",
")",
")",
"return",
"attn_output",
",",
"attn_weights",
",",
"past_key_value"
] | [
201,
4
] | [
317,
56
] | python | en | ['en', 'pl', 'en'] | True |
TFMarianEncoderLayer.call | (self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False) |
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
|
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
| def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False):
"""
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`
"""
residual = hidden_states
hidden_states, self_attn_weights, _ = self.self_attn(
hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
)
# The tf.debugging asserts are not compliant with XLA then they
# have to be disabled in other modes than eager.
if tf.executing_eagerly():
tf.debugging.assert_equal(
shape_list(hidden_states),
shape_list(residual),
message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return hidden_states, self_attn_weights | [
"def",
"call",
"(",
"self",
",",
"hidden_states",
":",
"tf",
".",
"Tensor",
",",
"attention_mask",
":",
"tf",
".",
"Tensor",
",",
"layer_head_mask",
":",
"tf",
".",
"Tensor",
",",
"training",
"=",
"False",
")",
":",
"residual",
"=",
"hidden_states",
"hidden_states",
",",
"self_attn_weights",
",",
"_",
"=",
"self",
".",
"self_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"attention_mask",
"=",
"attention_mask",
",",
"layer_head_mask",
"=",
"layer_head_mask",
")",
"# The tf.debugging asserts are not compliant with XLA then they",
"# have to be disabled in other modes than eager.",
"if",
"tf",
".",
"executing_eagerly",
"(",
")",
":",
"tf",
".",
"debugging",
".",
"assert_equal",
"(",
"shape_list",
"(",
"hidden_states",
")",
",",
"shape_list",
"(",
"residual",
")",
",",
"message",
"=",
"f\"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}\"",
",",
")",
"hidden_states",
"=",
"self",
".",
"dropout",
"(",
"hidden_states",
",",
"training",
"=",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"self_attn_layer_norm",
"(",
"hidden_states",
")",
"residual",
"=",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"activation_fn",
"(",
"self",
".",
"fc1",
"(",
"hidden_states",
")",
")",
"hidden_states",
"=",
"self",
".",
"activation_dropout",
"(",
"hidden_states",
",",
"training",
"=",
"training",
")",
"hidden_states",
"=",
"self",
".",
"fc2",
"(",
"hidden_states",
")",
"hidden_states",
"=",
"self",
".",
"dropout",
"(",
"hidden_states",
",",
"training",
"=",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"final_layer_norm",
"(",
"hidden_states",
")",
"return",
"hidden_states",
",",
"self_attn_weights"
] | [
336,
4
] | [
371,
47
] | python | en | ['en', 'error', 'th'] | False |
TFMarianDecoderLayer.call | (
self,
hidden_states,
attention_mask: Optional[tf.Tensor] = None,
encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
encoder_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[tf.Tensor]] = None,
training=False,
) |
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
encoder_layer_head_mask (:obj:`tf.Tensor`): mask for encoder attention heads in a given layer of size
`(encoder_attention_heads,)`
past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states
|
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
encoder_layer_head_mask (:obj:`tf.Tensor`): mask for encoder attention heads in a given layer of size
`(encoder_attention_heads,)`
past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states
| def call(
self,
hidden_states,
attention_mask: Optional[tf.Tensor] = None,
encoder_hidden_states: Optional[tf.Tensor] = None,
encoder_attention_mask: Optional[tf.Tensor] = None,
layer_head_mask: Optional[tf.Tensor] = None,
encoder_layer_head_mask: Optional[tf.Tensor] = None,
past_key_value: Optional[Tuple[tf.Tensor]] = None,
training=False,
) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
"""
Args:
hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`tf.Tensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size
`(decoder_attention_heads,)`
encoder_layer_head_mask (:obj:`tf.Tensor`): mask for encoder attention heads in a given layer of size
`(encoder_attention_heads,)`
past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, _, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=encoder_layer_head_mask,
past_key_value=cross_attn_past_key_value,
)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = self.activation_dropout(hidden_states, training=training)
hidden_states = self.fc2(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
return (
hidden_states,
self_attn_weights,
present_key_value,
) | [
"def",
"call",
"(",
"self",
",",
"hidden_states",
",",
"attention_mask",
":",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_hidden_states",
":",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_attention_mask",
":",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"=",
"None",
",",
"layer_head_mask",
":",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"=",
"None",
",",
"encoder_layer_head_mask",
":",
"Optional",
"[",
"tf",
".",
"Tensor",
"]",
"=",
"None",
",",
"past_key_value",
":",
"Optional",
"[",
"Tuple",
"[",
"tf",
".",
"Tensor",
"]",
"]",
"=",
"None",
",",
"training",
"=",
"False",
",",
")",
"->",
"Tuple",
"[",
"tf",
".",
"Tensor",
",",
"tf",
".",
"Tensor",
",",
"Tuple",
"[",
"Tuple",
"[",
"tf",
".",
"Tensor",
"]",
"]",
"]",
":",
"residual",
"=",
"hidden_states",
"# Self Attention",
"# decoder uni-directional self-attention cached key/values tuple is at positions 1,2",
"self_attn_past_key_value",
"=",
"past_key_value",
"[",
":",
"2",
"]",
"if",
"past_key_value",
"is",
"not",
"None",
"else",
"None",
"# add present self-attn cache to positions 1,2 of present_key_value tuple",
"hidden_states",
",",
"self_attn_weights",
",",
"present_key_value",
"=",
"self",
".",
"self_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"past_key_value",
"=",
"self_attn_past_key_value",
",",
"attention_mask",
"=",
"attention_mask",
",",
"layer_head_mask",
"=",
"layer_head_mask",
",",
")",
"hidden_states",
"=",
"self",
".",
"dropout",
"(",
"hidden_states",
",",
"training",
"=",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"self_attn_layer_norm",
"(",
"hidden_states",
")",
"# Cross-Attention Block",
"cross_attn_present_key_value",
"=",
"None",
"if",
"encoder_hidden_states",
"is",
"not",
"None",
":",
"residual",
"=",
"hidden_states",
"# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple",
"cross_attn_past_key_value",
"=",
"past_key_value",
"[",
"-",
"2",
":",
"]",
"if",
"past_key_value",
"is",
"not",
"None",
"else",
"None",
"hidden_states",
",",
"_",
",",
"cross_attn_present_key_value",
"=",
"self",
".",
"encoder_attn",
"(",
"hidden_states",
"=",
"hidden_states",
",",
"key_value_states",
"=",
"encoder_hidden_states",
",",
"attention_mask",
"=",
"encoder_attention_mask",
",",
"layer_head_mask",
"=",
"encoder_layer_head_mask",
",",
"past_key_value",
"=",
"cross_attn_past_key_value",
",",
")",
"hidden_states",
"=",
"self",
".",
"dropout",
"(",
"hidden_states",
",",
"training",
"=",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"encoder_attn_layer_norm",
"(",
"hidden_states",
")",
"# add cross-attn to positions 3,4 of present_key_value tuple",
"present_key_value",
"=",
"present_key_value",
"+",
"cross_attn_present_key_value",
"# Fully Connected",
"residual",
"=",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"activation_fn",
"(",
"self",
".",
"fc1",
"(",
"hidden_states",
")",
")",
"hidden_states",
"=",
"self",
".",
"activation_dropout",
"(",
"hidden_states",
",",
"training",
"=",
"training",
")",
"hidden_states",
"=",
"self",
".",
"fc2",
"(",
"hidden_states",
")",
"hidden_states",
"=",
"self",
".",
"dropout",
"(",
"hidden_states",
",",
"training",
"=",
"training",
")",
"hidden_states",
"=",
"residual",
"+",
"hidden_states",
"hidden_states",
"=",
"self",
".",
"final_layer_norm",
"(",
"hidden_states",
")",
"return",
"(",
"hidden_states",
",",
"self_attn_weights",
",",
"present_key_value",
",",
")"
] | [
403,
4
] | [
478,
9
] | python | en | ['en', 'error', 'th'] | False |
async_setup | (hass: HomeAssistantType, config: ConfigType) | Set up Meteo-France from legacy config file. | Set up Meteo-France from legacy config file. | async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up Meteo-France from legacy config file."""
conf = config.get(DOMAIN)
if not conf:
return True
for city_conf in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=city_conf
)
)
return True | [
"async",
"def",
"async_setup",
"(",
"hass",
":",
"HomeAssistantType",
",",
"config",
":",
"ConfigType",
")",
"->",
"bool",
":",
"conf",
"=",
"config",
".",
"get",
"(",
"DOMAIN",
")",
"if",
"not",
"conf",
":",
"return",
"True",
"for",
"city_conf",
"in",
"conf",
":",
"hass",
".",
"async_create_task",
"(",
"hass",
".",
"config_entries",
".",
"flow",
".",
"async_init",
"(",
"DOMAIN",
",",
"context",
"=",
"{",
"\"source\"",
":",
"SOURCE_IMPORT",
"}",
",",
"data",
"=",
"city_conf",
")",
")",
"return",
"True"
] | [
40,
0
] | [
53,
15
] | python | en | ['en', 'en', 'en'] | True |
async_setup_entry | (hass: HomeAssistantType, entry: ConfigEntry) | Set up an Meteo-France account from a config entry. | Set up an Meteo-France account from a config entry. | async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up an Meteo-France account from a config entry."""
hass.data.setdefault(DOMAIN, {})
latitude = entry.data.get(CONF_LATITUDE)
client = MeteoFranceClient()
# Migrate from previous config
if not latitude:
places = await hass.async_add_executor_job(
client.search_places, entry.data[CONF_CITY]
)
hass.config_entries.async_update_entry(
entry,
title=f"{places[0]}",
data={
CONF_LATITUDE: places[0].latitude,
CONF_LONGITUDE: places[0].longitude,
},
)
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
async def _async_update_data_forecast_forecast():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(
client.get_forecast, latitude, longitude
)
async def _async_update_data_rain():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(client.get_rain, latitude, longitude)
async def _async_update_data_alert():
"""Fetch data from API endpoint."""
return await hass.async_add_executor_job(
client.get_warning_current_phenomenoms, department, 0, True
)
coordinator_forecast = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France forecast for city {entry.title}",
update_method=_async_update_data_forecast_forecast,
update_interval=SCAN_INTERVAL,
)
coordinator_rain = None
coordinator_alert = None
# Fetch initial data so we have data when entities subscribe
await coordinator_forecast.async_refresh()
if not coordinator_forecast.last_update_success:
raise ConfigEntryNotReady
# Check if rain forecast is available.
if coordinator_forecast.data.position.get("rain_product_available") == 1:
coordinator_rain = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France rain for city {entry.title}",
update_method=_async_update_data_rain,
update_interval=SCAN_INTERVAL_RAIN,
)
await coordinator_rain.async_refresh()
if not coordinator_rain.last_update_success:
raise ConfigEntryNotReady
else:
_LOGGER.warning(
"1 hour rain forecast not available. %s is not in covered zone",
entry.title,
)
department = coordinator_forecast.data.position.get("dept")
_LOGGER.debug(
"Department corresponding to %s is %s",
entry.title,
department,
)
if is_valid_warning_department(department):
if not hass.data[DOMAIN].get(department):
coordinator_alert = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"Météo-France alert for department {department}",
update_method=_async_update_data_alert,
update_interval=SCAN_INTERVAL,
)
await coordinator_alert.async_refresh()
if not coordinator_alert.last_update_success:
raise ConfigEntryNotReady
hass.data[DOMAIN][department] = True
else:
_LOGGER.warning(
"Weather alert for department %s won't be added with city %s, as it has already been added within another city",
department,
entry.title,
)
else:
_LOGGER.warning(
"Weather alert not available: The city %s is not in metropolitan France or Andorre.",
entry.title,
)
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data[DOMAIN][entry.entry_id] = {
COORDINATOR_FORECAST: coordinator_forecast,
COORDINATOR_RAIN: coordinator_rain,
COORDINATOR_ALERT: coordinator_alert,
UNDO_UPDATE_LISTENER: undo_listener,
}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
":",
"HomeAssistantType",
",",
"entry",
":",
"ConfigEntry",
")",
"->",
"bool",
":",
"hass",
".",
"data",
".",
"setdefault",
"(",
"DOMAIN",
",",
"{",
"}",
")",
"latitude",
"=",
"entry",
".",
"data",
".",
"get",
"(",
"CONF_LATITUDE",
")",
"client",
"=",
"MeteoFranceClient",
"(",
")",
"# Migrate from previous config",
"if",
"not",
"latitude",
":",
"places",
"=",
"await",
"hass",
".",
"async_add_executor_job",
"(",
"client",
".",
"search_places",
",",
"entry",
".",
"data",
"[",
"CONF_CITY",
"]",
")",
"hass",
".",
"config_entries",
".",
"async_update_entry",
"(",
"entry",
",",
"title",
"=",
"f\"{places[0]}\"",
",",
"data",
"=",
"{",
"CONF_LATITUDE",
":",
"places",
"[",
"0",
"]",
".",
"latitude",
",",
"CONF_LONGITUDE",
":",
"places",
"[",
"0",
"]",
".",
"longitude",
",",
"}",
",",
")",
"latitude",
"=",
"entry",
".",
"data",
"[",
"CONF_LATITUDE",
"]",
"longitude",
"=",
"entry",
".",
"data",
"[",
"CONF_LONGITUDE",
"]",
"async",
"def",
"_async_update_data_forecast_forecast",
"(",
")",
":",
"\"\"\"Fetch data from API endpoint.\"\"\"",
"return",
"await",
"hass",
".",
"async_add_executor_job",
"(",
"client",
".",
"get_forecast",
",",
"latitude",
",",
"longitude",
")",
"async",
"def",
"_async_update_data_rain",
"(",
")",
":",
"\"\"\"Fetch data from API endpoint.\"\"\"",
"return",
"await",
"hass",
".",
"async_add_executor_job",
"(",
"client",
".",
"get_rain",
",",
"latitude",
",",
"longitude",
")",
"async",
"def",
"_async_update_data_alert",
"(",
")",
":",
"\"\"\"Fetch data from API endpoint.\"\"\"",
"return",
"await",
"hass",
".",
"async_add_executor_job",
"(",
"client",
".",
"get_warning_current_phenomenoms",
",",
"department",
",",
"0",
",",
"True",
")",
"coordinator_forecast",
"=",
"DataUpdateCoordinator",
"(",
"hass",
",",
"_LOGGER",
",",
"name",
"=",
"f\"Météo-France forecast for city {entry.title}\",",
"",
"update_method",
"=",
"_async_update_data_forecast_forecast",
",",
"update_interval",
"=",
"SCAN_INTERVAL",
",",
")",
"coordinator_rain",
"=",
"None",
"coordinator_alert",
"=",
"None",
"# Fetch initial data so we have data when entities subscribe",
"await",
"coordinator_forecast",
".",
"async_refresh",
"(",
")",
"if",
"not",
"coordinator_forecast",
".",
"last_update_success",
":",
"raise",
"ConfigEntryNotReady",
"# Check if rain forecast is available.",
"if",
"coordinator_forecast",
".",
"data",
".",
"position",
".",
"get",
"(",
"\"rain_product_available\"",
")",
"==",
"1",
":",
"coordinator_rain",
"=",
"DataUpdateCoordinator",
"(",
"hass",
",",
"_LOGGER",
",",
"name",
"=",
"f\"Météo-France rain for city {entry.title}\",",
"",
"update_method",
"=",
"_async_update_data_rain",
",",
"update_interval",
"=",
"SCAN_INTERVAL_RAIN",
",",
")",
"await",
"coordinator_rain",
".",
"async_refresh",
"(",
")",
"if",
"not",
"coordinator_rain",
".",
"last_update_success",
":",
"raise",
"ConfigEntryNotReady",
"else",
":",
"_LOGGER",
".",
"warning",
"(",
"\"1 hour rain forecast not available. %s is not in covered zone\"",
",",
"entry",
".",
"title",
",",
")",
"department",
"=",
"coordinator_forecast",
".",
"data",
".",
"position",
".",
"get",
"(",
"\"dept\"",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Department corresponding to %s is %s\"",
",",
"entry",
".",
"title",
",",
"department",
",",
")",
"if",
"is_valid_warning_department",
"(",
"department",
")",
":",
"if",
"not",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
".",
"get",
"(",
"department",
")",
":",
"coordinator_alert",
"=",
"DataUpdateCoordinator",
"(",
"hass",
",",
"_LOGGER",
",",
"name",
"=",
"f\"Météo-France alert for department {department}\",",
"",
"update_method",
"=",
"_async_update_data_alert",
",",
"update_interval",
"=",
"SCAN_INTERVAL",
",",
")",
"await",
"coordinator_alert",
".",
"async_refresh",
"(",
")",
"if",
"not",
"coordinator_alert",
".",
"last_update_success",
":",
"raise",
"ConfigEntryNotReady",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"department",
"]",
"=",
"True",
"else",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Weather alert for department %s won't be added with city %s, as it has already been added within another city\"",
",",
"department",
",",
"entry",
".",
"title",
",",
")",
"else",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Weather alert not available: The city %s is not in metropolitan France or Andorre.\"",
",",
"entry",
".",
"title",
",",
")",
"undo_listener",
"=",
"entry",
".",
"add_update_listener",
"(",
"_async_update_listener",
")",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"=",
"{",
"COORDINATOR_FORECAST",
":",
"coordinator_forecast",
",",
"COORDINATOR_RAIN",
":",
"coordinator_rain",
",",
"COORDINATOR_ALERT",
":",
"coordinator_alert",
",",
"UNDO_UPDATE_LISTENER",
":",
"undo_listener",
",",
"}",
"for",
"platform",
"in",
"PLATFORMS",
":",
"hass",
".",
"async_create_task",
"(",
"hass",
".",
"config_entries",
".",
"async_forward_entry_setup",
"(",
"entry",
",",
"platform",
")",
")",
"return",
"True"
] | [
56,
0
] | [
179,
15
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.