Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
AbstractAutoCompressionModule.post_compress_finetuning_epochs | (cls, compress_algorithm_name: str) |
The epochs in post-compress finetuning process.
Parameters
----------
compress_algorithm_name: str
The name of pruner and quantizer, i.e. 'level', 'l1', 'qat'.
Returns
-------
int
The finetuning epoch number.
|
The epochs in post-compress finetuning process. | def post_compress_finetuning_epochs(cls, compress_algorithm_name: str) -> int:
"""
The epochs in post-compress finetuning process.
Parameters
----------
compress_algorithm_name: str
The name of pruner and quantizer, i.e. 'level', 'l1', 'qat'.
Returns
-------
int
The finetuning epoch number.
"""
pass | [
"def",
"post_compress_finetuning_epochs",
"(",
"cls",
",",
"compress_algorithm_name",
":",
"str",
")",
"->",
"int",
":",
"pass"
] | [
107,
4
] | [
121,
12
] | python | en | ['en', 'error', 'th'] | False |
StreamOutput.__init__ | (self, stream, timeout: int = 300) | Initialize a stream output. | Initialize a stream output. | def __init__(self, stream, timeout: int = 300) -> None:
"""Initialize a stream output."""
self.idle = False
self.timeout = timeout
self._stream = stream
self._cursor = None
self._event = asyncio.Event()
self._segments = deque(maxlen=MAX_SEGMENTS)
self._unsub = None | [
"def",
"__init__",
"(",
"self",
",",
"stream",
",",
"timeout",
":",
"int",
"=",
"300",
")",
"->",
"None",
":",
"self",
".",
"idle",
"=",
"False",
"self",
".",
"timeout",
"=",
"timeout",
"self",
".",
"_stream",
"=",
"stream",
"self",
".",
"_cursor",
"=",
"None",
"self",
".",
"_event",
"=",
"asyncio",
".",
"Event",
"(",
")",
"self",
".",
"_segments",
"=",
"deque",
"(",
"maxlen",
"=",
"MAX_SEGMENTS",
")",
"self",
".",
"_unsub",
"=",
"None"
] | [
41,
4
] | [
49,
26
] | python | en | ['en', 'pt', 'en'] | True |
StreamOutput.name | (self) | Return provider name. | Return provider name. | def name(self) -> str:
"""Return provider name."""
return None | [
"def",
"name",
"(",
"self",
")",
"->",
"str",
":",
"return",
"None"
] | [
52,
4
] | [
54,
19
] | python | en | ['en', 'no', 'en'] | True |
StreamOutput.format | (self) | Return container format. | Return container format. | def format(self) -> str:
"""Return container format."""
return None | [
"def",
"format",
"(",
"self",
")",
"->",
"str",
":",
"return",
"None"
] | [
57,
4
] | [
59,
19
] | python | da | ['da', 'la', 'en'] | False |
StreamOutput.audio_codecs | (self) | Return desired audio codecs. | Return desired audio codecs. | def audio_codecs(self) -> str:
"""Return desired audio codecs."""
return None | [
"def",
"audio_codecs",
"(",
"self",
")",
"->",
"str",
":",
"return",
"None"
] | [
62,
4
] | [
64,
19
] | python | en | ['en', 'la', 'en'] | True |
StreamOutput.video_codecs | (self) | Return desired video codecs. | Return desired video codecs. | def video_codecs(self) -> tuple:
"""Return desired video codecs."""
return None | [
"def",
"video_codecs",
"(",
"self",
")",
"->",
"tuple",
":",
"return",
"None"
] | [
67,
4
] | [
69,
19
] | python | af | ['es', 'af', 'en'] | False |
StreamOutput.container_options | (self) | Return Callable which takes a sequence number and returns container options. | Return Callable which takes a sequence number and returns container options. | def container_options(self) -> Callable[[int], dict]:
"""Return Callable which takes a sequence number and returns container options."""
return None | [
"def",
"container_options",
"(",
"self",
")",
"->",
"Callable",
"[",
"[",
"int",
"]",
",",
"dict",
"]",
":",
"return",
"None"
] | [
72,
4
] | [
74,
19
] | python | en | ['en', 'en', 'en'] | True |
StreamOutput.segments | (self) | Return current sequence from segments. | Return current sequence from segments. | def segments(self) -> List[int]:
"""Return current sequence from segments."""
return [s.sequence for s in self._segments] | [
"def",
"segments",
"(",
"self",
")",
"->",
"List",
"[",
"int",
"]",
":",
"return",
"[",
"s",
".",
"sequence",
"for",
"s",
"in",
"self",
".",
"_segments",
"]"
] | [
77,
4
] | [
79,
51
] | python | en | ['en', 'en', 'en'] | True |
StreamOutput.target_duration | (self) | Return the max duration of any given segment in seconds. | Return the max duration of any given segment in seconds. | def target_duration(self) -> int:
"""Return the max duration of any given segment in seconds."""
segment_length = len(self._segments)
if not segment_length:
return 1
durations = [s.duration for s in self._segments]
return round(max(durations)) or 1 | [
"def",
"target_duration",
"(",
"self",
")",
"->",
"int",
":",
"segment_length",
"=",
"len",
"(",
"self",
".",
"_segments",
")",
"if",
"not",
"segment_length",
":",
"return",
"1",
"durations",
"=",
"[",
"s",
".",
"duration",
"for",
"s",
"in",
"self",
".",
"_segments",
"]",
"return",
"round",
"(",
"max",
"(",
"durations",
")",
")",
"or",
"1"
] | [
82,
4
] | [
88,
41
] | python | en | ['en', 'en', 'en'] | True |
StreamOutput.get_segment | (self, sequence: int = None) | Retrieve a specific segment, or the whole list. | Retrieve a specific segment, or the whole list. | def get_segment(self, sequence: int = None) -> Any:
"""Retrieve a specific segment, or the whole list."""
self.idle = False
# Reset idle timeout
if self._unsub is not None:
self._unsub()
self._unsub = async_call_later(self._stream.hass, self.timeout, self._timeout)
if not sequence:
return self._segments
for segment in self._segments:
if segment.sequence == sequence:
return segment
return None | [
"def",
"get_segment",
"(",
"self",
",",
"sequence",
":",
"int",
"=",
"None",
")",
"->",
"Any",
":",
"self",
".",
"idle",
"=",
"False",
"# Reset idle timeout",
"if",
"self",
".",
"_unsub",
"is",
"not",
"None",
":",
"self",
".",
"_unsub",
"(",
")",
"self",
".",
"_unsub",
"=",
"async_call_later",
"(",
"self",
".",
"_stream",
".",
"hass",
",",
"self",
".",
"timeout",
",",
"self",
".",
"_timeout",
")",
"if",
"not",
"sequence",
":",
"return",
"self",
".",
"_segments",
"for",
"segment",
"in",
"self",
".",
"_segments",
":",
"if",
"segment",
".",
"sequence",
"==",
"sequence",
":",
"return",
"segment",
"return",
"None"
] | [
90,
4
] | [
104,
19
] | python | en | ['en', 'en', 'en'] | True |
StreamOutput.recv | (self) | Wait for and retrieve the latest segment. | Wait for and retrieve the latest segment. | async def recv(self) -> Segment:
"""Wait for and retrieve the latest segment."""
last_segment = max(self.segments, default=0)
if self._cursor is None or self._cursor <= last_segment:
await self._event.wait()
if not self._segments:
return None
segment = self.get_segment()[-1]
self._cursor = segment.sequence
return segment | [
"async",
"def",
"recv",
"(",
"self",
")",
"->",
"Segment",
":",
"last_segment",
"=",
"max",
"(",
"self",
".",
"segments",
",",
"default",
"=",
"0",
")",
"if",
"self",
".",
"_cursor",
"is",
"None",
"or",
"self",
".",
"_cursor",
"<=",
"last_segment",
":",
"await",
"self",
".",
"_event",
".",
"wait",
"(",
")",
"if",
"not",
"self",
".",
"_segments",
":",
"return",
"None",
"segment",
"=",
"self",
".",
"get_segment",
"(",
")",
"[",
"-",
"1",
"]",
"self",
".",
"_cursor",
"=",
"segment",
".",
"sequence",
"return",
"segment"
] | [
106,
4
] | [
117,
22
] | python | en | ['en', 'en', 'en'] | True |
StreamOutput.put | (self, segment: Segment) | Store output. | Store output. | def put(self, segment: Segment) -> None:
"""Store output."""
# Start idle timeout when we start receiving data
if self._unsub is None:
self._unsub = async_call_later(
self._stream.hass, self.timeout, self._timeout
)
if segment is None:
self._event.set()
# Cleanup provider
if self._unsub is not None:
self._unsub()
self.cleanup()
return
self._segments.append(segment)
self._event.set()
self._event.clear() | [
"def",
"put",
"(",
"self",
",",
"segment",
":",
"Segment",
")",
"->",
"None",
":",
"# Start idle timeout when we start receiving data",
"if",
"self",
".",
"_unsub",
"is",
"None",
":",
"self",
".",
"_unsub",
"=",
"async_call_later",
"(",
"self",
".",
"_stream",
".",
"hass",
",",
"self",
".",
"timeout",
",",
"self",
".",
"_timeout",
")",
"if",
"segment",
"is",
"None",
":",
"self",
".",
"_event",
".",
"set",
"(",
")",
"# Cleanup provider",
"if",
"self",
".",
"_unsub",
"is",
"not",
"None",
":",
"self",
".",
"_unsub",
"(",
")",
"self",
".",
"cleanup",
"(",
")",
"return",
"self",
".",
"_segments",
".",
"append",
"(",
"segment",
")",
"self",
".",
"_event",
".",
"set",
"(",
")",
"self",
".",
"_event",
".",
"clear",
"(",
")"
] | [
120,
4
] | [
138,
27
] | python | en | ['en', 'en', 'en'] | False |
StreamOutput._timeout | (self, _now=None) | Handle stream timeout. | Handle stream timeout. | def _timeout(self, _now=None):
"""Handle stream timeout."""
self._unsub = None
if self._stream.keepalive:
self.idle = True
self._stream.check_idle()
else:
self.cleanup() | [
"def",
"_timeout",
"(",
"self",
",",
"_now",
"=",
"None",
")",
":",
"self",
".",
"_unsub",
"=",
"None",
"if",
"self",
".",
"_stream",
".",
"keepalive",
":",
"self",
".",
"idle",
"=",
"True",
"self",
".",
"_stream",
".",
"check_idle",
"(",
")",
"else",
":",
"self",
".",
"cleanup",
"(",
")"
] | [
141,
4
] | [
148,
26
] | python | en | ['en', 'en', 'en'] | True |
StreamOutput.cleanup | (self) | Handle cleanup. | Handle cleanup. | def cleanup(self):
"""Handle cleanup."""
self._segments = deque(maxlen=MAX_SEGMENTS)
self._stream.remove_provider(self) | [
"def",
"cleanup",
"(",
"self",
")",
":",
"self",
".",
"_segments",
"=",
"deque",
"(",
"maxlen",
"=",
"MAX_SEGMENTS",
")",
"self",
".",
"_stream",
".",
"remove_provider",
"(",
"self",
")"
] | [
150,
4
] | [
153,
42
] | python | en | ['en', 'en', 'en'] | False |
StreamView.get | (self, request, token, sequence=None) | Start a GET request. | Start a GET request. | async def get(self, request, token, sequence=None):
"""Start a GET request."""
hass = request.app["hass"]
stream = next(
(
s
for s in hass.data[DOMAIN][ATTR_STREAMS].values()
if s.access_token == token
),
None,
)
if not stream:
raise web.HTTPNotFound()
# Start worker if not already started
stream.start()
return await self.handle(request, stream, sequence) | [
"async",
"def",
"get",
"(",
"self",
",",
"request",
",",
"token",
",",
"sequence",
"=",
"None",
")",
":",
"hass",
"=",
"request",
".",
"app",
"[",
"\"hass\"",
"]",
"stream",
"=",
"next",
"(",
"(",
"s",
"for",
"s",
"in",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"ATTR_STREAMS",
"]",
".",
"values",
"(",
")",
"if",
"s",
".",
"access_token",
"==",
"token",
")",
",",
"None",
",",
")",
"if",
"not",
"stream",
":",
"raise",
"web",
".",
"HTTPNotFound",
"(",
")",
"# Start worker if not already started",
"stream",
".",
"start",
"(",
")",
"return",
"await",
"self",
".",
"handle",
"(",
"request",
",",
"stream",
",",
"sequence",
")"
] | [
167,
4
] | [
186,
59
] | python | en | ['en', 'lb', 'en'] | True |
StreamView.handle | (self, request, stream, sequence) | Handle the stream request. | Handle the stream request. | async def handle(self, request, stream, sequence):
"""Handle the stream request."""
raise NotImplementedError() | [
"async",
"def",
"handle",
"(",
"self",
",",
"request",
",",
"stream",
",",
"sequence",
")",
":",
"raise",
"NotImplementedError",
"(",
")"
] | [
188,
4
] | [
190,
35
] | python | en | ['en', 'en', 'en'] | True |
test_duplicate_error | (hass) | Test that errors are shown when duplicates are added. | Test that errors are shown when duplicates are added. | async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {CONF_ZIP_CODE: "12345"}
MockConfigEntry(domain=DOMAIN, unique_id="12345", data=conf).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured" | [
"async",
"def",
"test_duplicate_error",
"(",
"hass",
")",
":",
"conf",
"=",
"{",
"CONF_ZIP_CODE",
":",
"\"12345\"",
"}",
"MockConfigEntry",
"(",
"domain",
"=",
"DOMAIN",
",",
"unique_id",
"=",
"\"12345\"",
",",
"data",
"=",
"conf",
")",
".",
"add_to_hass",
"(",
"hass",
")",
"result",
"=",
"await",
"hass",
".",
"config_entries",
".",
"flow",
".",
"async_init",
"(",
"DOMAIN",
",",
"context",
"=",
"{",
"\"source\"",
":",
"SOURCE_USER",
"}",
",",
"data",
"=",
"conf",
")",
"assert",
"result",
"[",
"\"type\"",
"]",
"==",
"data_entry_flow",
".",
"RESULT_TYPE_ABORT",
"assert",
"result",
"[",
"\"reason\"",
"]",
"==",
"\"already_configured\""
] | [
9,
0
] | [
20,
51
] | python | en | ['en', 'en', 'en'] | True |
test_invalid_zip_code | (hass) | Test that an invalid ZIP code key throws an error. | Test that an invalid ZIP code key throws an error. | async def test_invalid_zip_code(hass):
"""Test that an invalid ZIP code key throws an error."""
conf = {CONF_ZIP_CODE: "abcde"}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {CONF_ZIP_CODE: "invalid_zip_code"} | [
"async",
"def",
"test_invalid_zip_code",
"(",
"hass",
")",
":",
"conf",
"=",
"{",
"CONF_ZIP_CODE",
":",
"\"abcde\"",
"}",
"result",
"=",
"await",
"hass",
".",
"config_entries",
".",
"flow",
".",
"async_init",
"(",
"DOMAIN",
",",
"context",
"=",
"{",
"\"source\"",
":",
"SOURCE_USER",
"}",
",",
"data",
"=",
"conf",
")",
"assert",
"result",
"[",
"\"type\"",
"]",
"==",
"data_entry_flow",
".",
"RESULT_TYPE_FORM",
"assert",
"result",
"[",
"\"errors\"",
"]",
"==",
"{",
"CONF_ZIP_CODE",
":",
"\"invalid_zip_code\"",
"}"
] | [
23,
0
] | [
32,
66
] | python | en | ['en', 'gd', 'en'] | True |
test_show_form | (hass) | Test that the form is served with no input. | Test that the form is served with no input. | async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user" | [
"async",
"def",
"test_show_form",
"(",
"hass",
")",
":",
"result",
"=",
"await",
"hass",
".",
"config_entries",
".",
"flow",
".",
"async_init",
"(",
"DOMAIN",
",",
"context",
"=",
"{",
"\"source\"",
":",
"SOURCE_USER",
"}",
")",
"assert",
"result",
"[",
"\"type\"",
"]",
"==",
"data_entry_flow",
".",
"RESULT_TYPE_FORM",
"assert",
"result",
"[",
"\"step_id\"",
"]",
"==",
"\"user\""
] | [
35,
0
] | [
42,
38
] | python | en | ['en', 'en', 'en'] | True |
test_step_user | (hass) | Test that the user step works (without MFA). | Test that the user step works (without MFA). | async def test_step_user(hass):
"""Test that the user step works (without MFA)."""
conf = {CONF_ZIP_CODE: "12345"}
with patch("homeassistant.components.iqvia.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "12345"
assert result["data"] == {CONF_ZIP_CODE: "12345"} | [
"async",
"def",
"test_step_user",
"(",
"hass",
")",
":",
"conf",
"=",
"{",
"CONF_ZIP_CODE",
":",
"\"12345\"",
"}",
"with",
"patch",
"(",
"\"homeassistant.components.iqvia.async_setup_entry\"",
",",
"return_value",
"=",
"True",
")",
":",
"result",
"=",
"await",
"hass",
".",
"config_entries",
".",
"flow",
".",
"async_init",
"(",
"DOMAIN",
",",
"context",
"=",
"{",
"\"source\"",
":",
"SOURCE_USER",
"}",
",",
"data",
"=",
"conf",
")",
"assert",
"result",
"[",
"\"type\"",
"]",
"==",
"data_entry_flow",
".",
"RESULT_TYPE_CREATE_ENTRY",
"assert",
"result",
"[",
"\"title\"",
"]",
"==",
"\"12345\"",
"assert",
"result",
"[",
"\"data\"",
"]",
"==",
"{",
"CONF_ZIP_CODE",
":",
"\"12345\"",
"}"
] | [
45,
0
] | [
56,
57
] | python | en | ['en', 'en', 'en'] | True |
async_setup_entry | (
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable
) | Set up Guardian switches based on a config entry. | Set up Guardian switches based on a config entry. | async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up Guardian switches based on a config entry."""
platform = entity_platform.current_platform.get()
for service_name, schema, method in [
(SERVICE_DISABLE_AP, {}, "async_disable_ap"),
(SERVICE_ENABLE_AP, {}, "async_enable_ap"),
(SERVICE_PAIR_SENSOR, {vol.Required(CONF_UID): cv.string}, "async_pair_sensor"),
(SERVICE_REBOOT, {}, "async_reboot"),
(SERVICE_RESET_VALVE_DIAGNOSTICS, {}, "async_reset_valve_diagnostics"),
(
SERVICE_UPGRADE_FIRMWARE,
{
vol.Optional(CONF_URL): cv.url,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_FILENAME): cv.string,
},
"async_upgrade_firmware",
),
(
SERVICE_UNPAIR_SENSOR,
{vol.Required(CONF_UID): cv.string},
"async_unpair_sensor",
),
]:
platform.async_register_entity_service(service_name, schema, method)
async_add_entities(
[
ValveControllerSwitch(
entry,
hass.data[DOMAIN][DATA_CLIENT][entry.entry_id],
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id],
)
]
) | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
":",
"HomeAssistant",
",",
"entry",
":",
"ConfigEntry",
",",
"async_add_entities",
":",
"Callable",
")",
"->",
"None",
":",
"platform",
"=",
"entity_platform",
".",
"current_platform",
".",
"get",
"(",
")",
"for",
"service_name",
",",
"schema",
",",
"method",
"in",
"[",
"(",
"SERVICE_DISABLE_AP",
",",
"{",
"}",
",",
"\"async_disable_ap\"",
")",
",",
"(",
"SERVICE_ENABLE_AP",
",",
"{",
"}",
",",
"\"async_enable_ap\"",
")",
",",
"(",
"SERVICE_PAIR_SENSOR",
",",
"{",
"vol",
".",
"Required",
"(",
"CONF_UID",
")",
":",
"cv",
".",
"string",
"}",
",",
"\"async_pair_sensor\"",
")",
",",
"(",
"SERVICE_REBOOT",
",",
"{",
"}",
",",
"\"async_reboot\"",
")",
",",
"(",
"SERVICE_RESET_VALVE_DIAGNOSTICS",
",",
"{",
"}",
",",
"\"async_reset_valve_diagnostics\"",
")",
",",
"(",
"SERVICE_UPGRADE_FIRMWARE",
",",
"{",
"vol",
".",
"Optional",
"(",
"CONF_URL",
")",
":",
"cv",
".",
"url",
",",
"vol",
".",
"Optional",
"(",
"CONF_PORT",
")",
":",
"cv",
".",
"port",
",",
"vol",
".",
"Optional",
"(",
"CONF_FILENAME",
")",
":",
"cv",
".",
"string",
",",
"}",
",",
"\"async_upgrade_firmware\"",
",",
")",
",",
"(",
"SERVICE_UNPAIR_SENSOR",
",",
"{",
"vol",
".",
"Required",
"(",
"CONF_UID",
")",
":",
"cv",
".",
"string",
"}",
",",
"\"async_unpair_sensor\"",
",",
")",
",",
"]",
":",
"platform",
".",
"async_register_entity_service",
"(",
"service_name",
",",
"schema",
",",
"method",
")",
"async_add_entities",
"(",
"[",
"ValveControllerSwitch",
"(",
"entry",
",",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_CLIENT",
"]",
"[",
"entry",
".",
"entry_id",
"]",
",",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_COORDINATOR",
"]",
"[",
"entry",
".",
"entry_id",
"]",
",",
")",
"]",
")"
] | [
39,
0
] | [
76,
5
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.__init__ | (
self,
entry: ConfigEntry,
client: Client,
coordinators: Dict[str, DataUpdateCoordinator],
) | Initialize. | Initialize. | def __init__(
self,
entry: ConfigEntry,
client: Client,
coordinators: Dict[str, DataUpdateCoordinator],
):
"""Initialize."""
super().__init__(
entry, coordinators, "valve", "Valve Controller", None, "mdi:water"
)
self._client = client
self._is_on = True | [
"def",
"__init__",
"(",
"self",
",",
"entry",
":",
"ConfigEntry",
",",
"client",
":",
"Client",
",",
"coordinators",
":",
"Dict",
"[",
"str",
",",
"DataUpdateCoordinator",
"]",
",",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"entry",
",",
"coordinators",
",",
"\"valve\"",
",",
"\"Valve Controller\"",
",",
"None",
",",
"\"mdi:water\"",
")",
"self",
".",
"_client",
"=",
"client",
"self",
".",
"_is_on",
"=",
"True"
] | [
82,
4
] | [
94,
26
] | python | en | ['en', 'en', 'it'] | False |
ValveControllerSwitch.available | (self) | Return whether the entity is available. | Return whether the entity is available. | def available(self) -> bool:
"""Return whether the entity is available."""
return self.coordinators[API_VALVE_STATUS].last_update_success | [
"def",
"available",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"coordinators",
"[",
"API_VALVE_STATUS",
"]",
".",
"last_update_success"
] | [
97,
4
] | [
99,
70
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.is_on | (self) | Return True if the valve is open. | Return True if the valve is open. | def is_on(self) -> bool:
"""Return True if the valve is open."""
return self._is_on | [
"def",
"is_on",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"self",
".",
"_is_on"
] | [
102,
4
] | [
104,
26
] | python | en | ['en', 'nl', 'en'] | True |
ValveControllerSwitch._async_continue_entity_setup | (self) | Register API interest (and related tasks) when the entity is added. | Register API interest (and related tasks) when the entity is added. | async def _async_continue_entity_setup(self):
"""Register API interest (and related tasks) when the entity is added."""
self.async_add_coordinator_update_listener(API_VALVE_STATUS) | [
"async",
"def",
"_async_continue_entity_setup",
"(",
"self",
")",
":",
"self",
".",
"async_add_coordinator_update_listener",
"(",
"API_VALVE_STATUS",
")"
] | [
106,
4
] | [
108,
68
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch._async_update_from_latest_data | (self) | Update the entity. | Update the entity. | def _async_update_from_latest_data(self) -> None:
"""Update the entity."""
self._is_on = self.coordinators[API_VALVE_STATUS].data["state"] in (
"start_opening",
"opening",
"finish_opening",
"opened",
)
self._attrs.update(
{
ATTR_AVG_CURRENT: self.coordinators[API_VALVE_STATUS].data[
"average_current"
],
ATTR_INST_CURRENT: self.coordinators[API_VALVE_STATUS].data[
"instantaneous_current"
],
ATTR_INST_CURRENT_DDT: self.coordinators[API_VALVE_STATUS].data[
"instantaneous_current_ddt"
],
ATTR_TRAVEL_COUNT: self.coordinators[API_VALVE_STATUS].data[
"travel_count"
],
}
) | [
"def",
"_async_update_from_latest_data",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"_is_on",
"=",
"self",
".",
"coordinators",
"[",
"API_VALVE_STATUS",
"]",
".",
"data",
"[",
"\"state\"",
"]",
"in",
"(",
"\"start_opening\"",
",",
"\"opening\"",
",",
"\"finish_opening\"",
",",
"\"opened\"",
",",
")",
"self",
".",
"_attrs",
".",
"update",
"(",
"{",
"ATTR_AVG_CURRENT",
":",
"self",
".",
"coordinators",
"[",
"API_VALVE_STATUS",
"]",
".",
"data",
"[",
"\"average_current\"",
"]",
",",
"ATTR_INST_CURRENT",
":",
"self",
".",
"coordinators",
"[",
"API_VALVE_STATUS",
"]",
".",
"data",
"[",
"\"instantaneous_current\"",
"]",
",",
"ATTR_INST_CURRENT_DDT",
":",
"self",
".",
"coordinators",
"[",
"API_VALVE_STATUS",
"]",
".",
"data",
"[",
"\"instantaneous_current_ddt\"",
"]",
",",
"ATTR_TRAVEL_COUNT",
":",
"self",
".",
"coordinators",
"[",
"API_VALVE_STATUS",
"]",
".",
"data",
"[",
"\"travel_count\"",
"]",
",",
"}",
")"
] | [
111,
4
] | [
135,
9
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.async_disable_ap | (self) | Disable the device's onboard access point. | Disable the device's onboard access point. | async def async_disable_ap(self):
"""Disable the device's onboard access point."""
try:
async with self._client:
await self._client.wifi.disable_ap()
except GuardianError as err:
LOGGER.error("Error while disabling valve controller AP: %s", err) | [
"async",
"def",
"async_disable_ap",
"(",
"self",
")",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"wifi",
".",
"disable_ap",
"(",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while disabling valve controller AP: %s\"",
",",
"err",
")"
] | [
137,
4
] | [
143,
78
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.async_enable_ap | (self) | Enable the device's onboard access point. | Enable the device's onboard access point. | async def async_enable_ap(self):
"""Enable the device's onboard access point."""
try:
async with self._client:
await self._client.wifi.enable_ap()
except GuardianError as err:
LOGGER.error("Error while enabling valve controller AP: %s", err) | [
"async",
"def",
"async_enable_ap",
"(",
"self",
")",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"wifi",
".",
"enable_ap",
"(",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while enabling valve controller AP: %s\"",
",",
"err",
")"
] | [
145,
4
] | [
151,
77
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.async_pair_sensor | (self, *, uid) | Add a new paired sensor. | Add a new paired sensor. | async def async_pair_sensor(self, *, uid):
"""Add a new paired sensor."""
try:
async with self._client:
await self._client.sensor.pair_sensor(uid)
except GuardianError as err:
LOGGER.error("Error while adding paired sensor: %s", err)
return
await self.hass.data[DOMAIN][DATA_PAIRED_SENSOR_MANAGER][
self._entry.entry_id
].async_pair_sensor(uid) | [
"async",
"def",
"async_pair_sensor",
"(",
"self",
",",
"*",
",",
"uid",
")",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"sensor",
".",
"pair_sensor",
"(",
"uid",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while adding paired sensor: %s\"",
",",
"err",
")",
"return",
"await",
"self",
".",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_PAIRED_SENSOR_MANAGER",
"]",
"[",
"self",
".",
"_entry",
".",
"entry_id",
"]",
".",
"async_pair_sensor",
"(",
"uid",
")"
] | [
153,
4
] | [
164,
32
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.async_reboot | (self) | Reboot the device. | Reboot the device. | async def async_reboot(self):
"""Reboot the device."""
try:
async with self._client:
await self._client.system.reboot()
except GuardianError as err:
LOGGER.error("Error while rebooting valve controller: %s", err) | [
"async",
"def",
"async_reboot",
"(",
"self",
")",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"system",
".",
"reboot",
"(",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while rebooting valve controller: %s\"",
",",
"err",
")"
] | [
166,
4
] | [
172,
75
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.async_reset_valve_diagnostics | (self) | Fully reset system motor diagnostics. | Fully reset system motor diagnostics. | async def async_reset_valve_diagnostics(self):
"""Fully reset system motor diagnostics."""
try:
async with self._client:
await self._client.valve.reset()
except GuardianError as err:
LOGGER.error("Error while resetting valve diagnostics: %s", err) | [
"async",
"def",
"async_reset_valve_diagnostics",
"(",
"self",
")",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"valve",
".",
"reset",
"(",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while resetting valve diagnostics: %s\"",
",",
"err",
")"
] | [
174,
4
] | [
180,
76
] | python | en | ['en', 'sv', 'en'] | True |
ValveControllerSwitch.async_unpair_sensor | (self, *, uid) | Add a new paired sensor. | Add a new paired sensor. | async def async_unpair_sensor(self, *, uid):
"""Add a new paired sensor."""
try:
async with self._client:
await self._client.sensor.unpair_sensor(uid)
except GuardianError as err:
LOGGER.error("Error while removing paired sensor: %s", err)
return
await self.hass.data[DOMAIN][DATA_PAIRED_SENSOR_MANAGER][
self._entry.entry_id
].async_unpair_sensor(uid) | [
"async",
"def",
"async_unpair_sensor",
"(",
"self",
",",
"*",
",",
"uid",
")",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"sensor",
".",
"unpair_sensor",
"(",
"uid",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while removing paired sensor: %s\"",
",",
"err",
")",
"return",
"await",
"self",
".",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"DATA_PAIRED_SENSOR_MANAGER",
"]",
"[",
"self",
".",
"_entry",
".",
"entry_id",
"]",
".",
"async_unpair_sensor",
"(",
"uid",
")"
] | [
182,
4
] | [
193,
34
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.async_upgrade_firmware | (self, *, url, port, filename) | Upgrade the device firmware. | Upgrade the device firmware. | async def async_upgrade_firmware(self, *, url, port, filename):
"""Upgrade the device firmware."""
try:
async with self._client:
await self._client.system.upgrade_firmware(
url=url,
port=port,
filename=filename,
)
except GuardianError as err:
LOGGER.error("Error while upgrading firmware: %s", err) | [
"async",
"def",
"async_upgrade_firmware",
"(",
"self",
",",
"*",
",",
"url",
",",
"port",
",",
"filename",
")",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"system",
".",
"upgrade_firmware",
"(",
"url",
"=",
"url",
",",
"port",
"=",
"port",
",",
"filename",
"=",
"filename",
",",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while upgrading firmware: %s\"",
",",
"err",
")"
] | [
195,
4
] | [
205,
67
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.async_turn_off | (self, **kwargs) | Turn the valve off (closed). | Turn the valve off (closed). | async def async_turn_off(self, **kwargs) -> None:
"""Turn the valve off (closed)."""
try:
async with self._client:
await self._client.valve.close()
except GuardianError as err:
LOGGER.error("Error while closing the valve: %s", err)
return
self._is_on = False
self.async_write_ha_state() | [
"async",
"def",
"async_turn_off",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"valve",
".",
"close",
"(",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while closing the valve: %s\"",
",",
"err",
")",
"return",
"self",
".",
"_is_on",
"=",
"False",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
207,
4
] | [
217,
35
] | python | en | ['en', 'en', 'en'] | True |
ValveControllerSwitch.async_turn_on | (self, **kwargs) | Turn the valve on (open). | Turn the valve on (open). | async def async_turn_on(self, **kwargs) -> None:
"""Turn the valve on (open)."""
try:
async with self._client:
await self._client.valve.open()
except GuardianError as err:
LOGGER.error("Error while opening the valve: %s", err)
return
self._is_on = True
self.async_write_ha_state() | [
"async",
"def",
"async_turn_on",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
"->",
"None",
":",
"try",
":",
"async",
"with",
"self",
".",
"_client",
":",
"await",
"self",
".",
"_client",
".",
"valve",
".",
"open",
"(",
")",
"except",
"GuardianError",
"as",
"err",
":",
"LOGGER",
".",
"error",
"(",
"\"Error while opening the valve: %s\"",
",",
"err",
")",
"return",
"self",
".",
"_is_on",
"=",
"True",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
219,
4
] | [
229,
35
] | python | en | ['en', 'fi', 'en'] | True |
discover_mysensors_platform | (hass, hass_config, platform, new_devices) | Discover a MySensors platform. | Discover a MySensors platform. | def discover_mysensors_platform(hass, hass_config, platform, new_devices):
"""Discover a MySensors platform."""
task = hass.async_create_task(
discovery.async_load_platform(
hass,
platform,
DOMAIN,
{ATTR_DEVICES: new_devices, CONF_NAME: DOMAIN},
hass_config,
)
)
return task | [
"def",
"discover_mysensors_platform",
"(",
"hass",
",",
"hass_config",
",",
"platform",
",",
"new_devices",
")",
":",
"task",
"=",
"hass",
".",
"async_create_task",
"(",
"discovery",
".",
"async_load_platform",
"(",
"hass",
",",
"platform",
",",
"DOMAIN",
",",
"{",
"ATTR_DEVICES",
":",
"new_devices",
",",
"CONF_NAME",
":",
"DOMAIN",
"}",
",",
"hass_config",
",",
")",
")",
"return",
"task"
] | [
19,
0
] | [
30,
15
] | python | en | ['en', 'da', 'en'] | True |
default_schema | (gateway, child, value_type_name) | Return a default validation schema for value types. | Return a default validation schema for value types. | def default_schema(gateway, child, value_type_name):
"""Return a default validation schema for value types."""
schema = {value_type_name: cv.string}
return get_child_schema(gateway, child, value_type_name, schema) | [
"def",
"default_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
")",
":",
"schema",
"=",
"{",
"value_type_name",
":",
"cv",
".",
"string",
"}",
"return",
"get_child_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
",",
"schema",
")"
] | [
33,
0
] | [
36,
68
] | python | en | ['en', 'de', 'en'] | True |
light_dimmer_schema | (gateway, child, value_type_name) | Return a validation schema for V_DIMMER. | Return a validation schema for V_DIMMER. | def light_dimmer_schema(gateway, child, value_type_name):
"""Return a validation schema for V_DIMMER."""
schema = {"V_DIMMER": cv.string, "V_LIGHT": cv.string}
return get_child_schema(gateway, child, value_type_name, schema) | [
"def",
"light_dimmer_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
")",
":",
"schema",
"=",
"{",
"\"V_DIMMER\"",
":",
"cv",
".",
"string",
",",
"\"V_LIGHT\"",
":",
"cv",
".",
"string",
"}",
"return",
"get_child_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
",",
"schema",
")"
] | [
40,
0
] | [
43,
68
] | python | en | ['en', 'da', 'en'] | True |
light_percentage_schema | (gateway, child, value_type_name) | Return a validation schema for V_PERCENTAGE. | Return a validation schema for V_PERCENTAGE. | def light_percentage_schema(gateway, child, value_type_name):
"""Return a validation schema for V_PERCENTAGE."""
schema = {"V_PERCENTAGE": cv.string, "V_STATUS": cv.string}
return get_child_schema(gateway, child, value_type_name, schema) | [
"def",
"light_percentage_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
")",
":",
"schema",
"=",
"{",
"\"V_PERCENTAGE\"",
":",
"cv",
".",
"string",
",",
"\"V_STATUS\"",
":",
"cv",
".",
"string",
"}",
"return",
"get_child_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
",",
"schema",
")"
] | [
47,
0
] | [
50,
68
] | python | en | ['en', 'da', 'en'] | True |
light_rgb_schema | (gateway, child, value_type_name) | Return a validation schema for V_RGB. | Return a validation schema for V_RGB. | def light_rgb_schema(gateway, child, value_type_name):
"""Return a validation schema for V_RGB."""
schema = {"V_RGB": cv.string, "V_STATUS": cv.string}
return get_child_schema(gateway, child, value_type_name, schema) | [
"def",
"light_rgb_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
")",
":",
"schema",
"=",
"{",
"\"V_RGB\"",
":",
"cv",
".",
"string",
",",
"\"V_STATUS\"",
":",
"cv",
".",
"string",
"}",
"return",
"get_child_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
",",
"schema",
")"
] | [
54,
0
] | [
57,
68
] | python | en | ['en', 'cy', 'en'] | True |
light_rgbw_schema | (gateway, child, value_type_name) | Return a validation schema for V_RGBW. | Return a validation schema for V_RGBW. | def light_rgbw_schema(gateway, child, value_type_name):
"""Return a validation schema for V_RGBW."""
schema = {"V_RGBW": cv.string, "V_STATUS": cv.string}
return get_child_schema(gateway, child, value_type_name, schema) | [
"def",
"light_rgbw_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
")",
":",
"schema",
"=",
"{",
"\"V_RGBW\"",
":",
"cv",
".",
"string",
",",
"\"V_STATUS\"",
":",
"cv",
".",
"string",
"}",
"return",
"get_child_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
",",
"schema",
")"
] | [
61,
0
] | [
64,
68
] | python | en | ['en', 'cy', 'en'] | True |
switch_ir_send_schema | (gateway, child, value_type_name) | Return a validation schema for V_IR_SEND. | Return a validation schema for V_IR_SEND. | def switch_ir_send_schema(gateway, child, value_type_name):
"""Return a validation schema for V_IR_SEND."""
schema = {"V_IR_SEND": cv.string, "V_LIGHT": cv.string}
return get_child_schema(gateway, child, value_type_name, schema) | [
"def",
"switch_ir_send_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
")",
":",
"schema",
"=",
"{",
"\"V_IR_SEND\"",
":",
"cv",
".",
"string",
",",
"\"V_LIGHT\"",
":",
"cv",
".",
"string",
"}",
"return",
"get_child_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
",",
"schema",
")"
] | [
68,
0
] | [
71,
68
] | python | en | ['en', 'sk', 'en'] | True |
get_child_schema | (gateway, child, value_type_name, schema) | Return a child schema. | Return a child schema. | def get_child_schema(gateway, child, value_type_name, schema):
"""Return a child schema."""
set_req = gateway.const.SetReq
child_schema = child.get_schema(gateway.protocol_version)
schema = child_schema.extend(
{
vol.Required(
set_req[name].value, msg=invalid_msg(gateway, child, name)
): child_schema.schema.get(set_req[name].value, valid)
for name, valid in schema.items()
},
extra=vol.ALLOW_EXTRA,
)
return schema | [
"def",
"get_child_schema",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
",",
"schema",
")",
":",
"set_req",
"=",
"gateway",
".",
"const",
".",
"SetReq",
"child_schema",
"=",
"child",
".",
"get_schema",
"(",
"gateway",
".",
"protocol_version",
")",
"schema",
"=",
"child_schema",
".",
"extend",
"(",
"{",
"vol",
".",
"Required",
"(",
"set_req",
"[",
"name",
"]",
".",
"value",
",",
"msg",
"=",
"invalid_msg",
"(",
"gateway",
",",
"child",
",",
"name",
")",
")",
":",
"child_schema",
".",
"schema",
".",
"get",
"(",
"set_req",
"[",
"name",
"]",
".",
"value",
",",
"valid",
")",
"for",
"name",
",",
"valid",
"in",
"schema",
".",
"items",
"(",
")",
"}",
",",
"extra",
"=",
"vol",
".",
"ALLOW_EXTRA",
",",
")",
"return",
"schema"
] | [
74,
0
] | [
87,
17
] | python | en | ['en', 'de', 'en'] | True |
invalid_msg | (gateway, child, value_type_name) | Return a message for an invalid child during schema validation. | Return a message for an invalid child during schema validation. | def invalid_msg(gateway, child, value_type_name):
"""Return a message for an invalid child during schema validation."""
pres = gateway.const.Presentation
set_req = gateway.const.SetReq
return (
f"{pres(child.type).name} requires value_type {set_req[value_type_name].name}"
) | [
"def",
"invalid_msg",
"(",
"gateway",
",",
"child",
",",
"value_type_name",
")",
":",
"pres",
"=",
"gateway",
".",
"const",
".",
"Presentation",
"set_req",
"=",
"gateway",
".",
"const",
".",
"SetReq",
"return",
"(",
"f\"{pres(child.type).name} requires value_type {set_req[value_type_name].name}\"",
")"
] | [
90,
0
] | [
96,
5
] | python | en | ['en', 'en', 'en'] | True |
validate_set_msg | (msg) | Validate a set message. | Validate a set message. | def validate_set_msg(msg):
"""Validate a set message."""
if not validate_node(msg.gateway, msg.node_id):
return {}
child = msg.gateway.sensors[msg.node_id].children[msg.child_id]
return validate_child(msg.gateway, msg.node_id, child, msg.sub_type) | [
"def",
"validate_set_msg",
"(",
"msg",
")",
":",
"if",
"not",
"validate_node",
"(",
"msg",
".",
"gateway",
",",
"msg",
".",
"node_id",
")",
":",
"return",
"{",
"}",
"child",
"=",
"msg",
".",
"gateway",
".",
"sensors",
"[",
"msg",
".",
"node_id",
"]",
".",
"children",
"[",
"msg",
".",
"child_id",
"]",
"return",
"validate_child",
"(",
"msg",
".",
"gateway",
",",
"msg",
".",
"node_id",
",",
"child",
",",
"msg",
".",
"sub_type",
")"
] | [
99,
0
] | [
104,
72
] | python | en | ['en', 'en', 'en'] | True |
validate_node | (gateway, node_id) | Validate a node. | Validate a node. | def validate_node(gateway, node_id):
"""Validate a node."""
if gateway.sensors[node_id].sketch_name is None:
_LOGGER.debug("Node %s is missing sketch name", node_id)
return False
return True | [
"def",
"validate_node",
"(",
"gateway",
",",
"node_id",
")",
":",
"if",
"gateway",
".",
"sensors",
"[",
"node_id",
"]",
".",
"sketch_name",
"is",
"None",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Node %s is missing sketch name\"",
",",
"node_id",
")",
"return",
"False",
"return",
"True"
] | [
107,
0
] | [
112,
15
] | python | en | ['en', 'lb', 'pt'] | False |
validate_child | (gateway, node_id, child, value_type=None) | Validate a child. | Validate a child. | def validate_child(gateway, node_id, child, value_type=None):
"""Validate a child."""
validated = defaultdict(list)
pres = gateway.const.Presentation
set_req = gateway.const.SetReq
child_type_name = next(
(member.name for member in pres if member.value == child.type), None
)
value_types = {value_type} if value_type else {*child.values}
value_type_names = {
member.name for member in set_req if member.value in value_types
}
platforms = TYPE_TO_PLATFORMS.get(child_type_name, [])
if not platforms:
_LOGGER.warning("Child type %s is not supported", child.type)
return validated
for platform in platforms:
platform_v_names = FLAT_PLATFORM_TYPES[platform, child_type_name]
v_names = platform_v_names & value_type_names
if not v_names:
child_value_names = {
member.name for member in set_req if member.value in child.values
}
v_names = platform_v_names & child_value_names
for v_name in v_names:
child_schema_gen = SCHEMAS.get((platform, v_name), default_schema)
child_schema = child_schema_gen(gateway, child, v_name)
try:
child_schema(child.values)
except vol.Invalid as exc:
_LOGGER.warning(
"Invalid %s on node %s, %s platform: %s",
child,
node_id,
platform,
exc,
)
continue
dev_id = id(gateway), node_id, child.id, set_req[v_name].value
validated[platform].append(dev_id)
return validated | [
"def",
"validate_child",
"(",
"gateway",
",",
"node_id",
",",
"child",
",",
"value_type",
"=",
"None",
")",
":",
"validated",
"=",
"defaultdict",
"(",
"list",
")",
"pres",
"=",
"gateway",
".",
"const",
".",
"Presentation",
"set_req",
"=",
"gateway",
".",
"const",
".",
"SetReq",
"child_type_name",
"=",
"next",
"(",
"(",
"member",
".",
"name",
"for",
"member",
"in",
"pres",
"if",
"member",
".",
"value",
"==",
"child",
".",
"type",
")",
",",
"None",
")",
"value_types",
"=",
"{",
"value_type",
"}",
"if",
"value_type",
"else",
"{",
"*",
"child",
".",
"values",
"}",
"value_type_names",
"=",
"{",
"member",
".",
"name",
"for",
"member",
"in",
"set_req",
"if",
"member",
".",
"value",
"in",
"value_types",
"}",
"platforms",
"=",
"TYPE_TO_PLATFORMS",
".",
"get",
"(",
"child_type_name",
",",
"[",
"]",
")",
"if",
"not",
"platforms",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Child type %s is not supported\"",
",",
"child",
".",
"type",
")",
"return",
"validated",
"for",
"platform",
"in",
"platforms",
":",
"platform_v_names",
"=",
"FLAT_PLATFORM_TYPES",
"[",
"platform",
",",
"child_type_name",
"]",
"v_names",
"=",
"platform_v_names",
"&",
"value_type_names",
"if",
"not",
"v_names",
":",
"child_value_names",
"=",
"{",
"member",
".",
"name",
"for",
"member",
"in",
"set_req",
"if",
"member",
".",
"value",
"in",
"child",
".",
"values",
"}",
"v_names",
"=",
"platform_v_names",
"&",
"child_value_names",
"for",
"v_name",
"in",
"v_names",
":",
"child_schema_gen",
"=",
"SCHEMAS",
".",
"get",
"(",
"(",
"platform",
",",
"v_name",
")",
",",
"default_schema",
")",
"child_schema",
"=",
"child_schema_gen",
"(",
"gateway",
",",
"child",
",",
"v_name",
")",
"try",
":",
"child_schema",
"(",
"child",
".",
"values",
")",
"except",
"vol",
".",
"Invalid",
"as",
"exc",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Invalid %s on node %s, %s platform: %s\"",
",",
"child",
",",
"node_id",
",",
"platform",
",",
"exc",
",",
")",
"continue",
"dev_id",
"=",
"id",
"(",
"gateway",
")",
",",
"node_id",
",",
"child",
".",
"id",
",",
"set_req",
"[",
"v_name",
"]",
".",
"value",
"validated",
"[",
"platform",
"]",
".",
"append",
"(",
"dev_id",
")",
"return",
"validated"
] | [
115,
0
] | [
158,
20
] | python | en | ['it', 'en', 'en'] | True |
read_init | () | Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects. | Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects. | def read_init():
""" Read the init and extracts PyTorch, TensorFlow, SentencePiece and Tokenizers objects. """
with open(os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"), "r", encoding="utf-8", newline="\n") as f:
lines = f.readlines()
# Get to the point we do the actual imports for type checking
line_index = 0
while not lines[line_index].startswith("if TYPE_CHECKING"):
line_index += 1
backend_specific_objects = {}
# Go through the end of the file
while line_index < len(lines):
# If the line is an if is_backend_available, we grab all objects associated.
if _re_test_backend.search(lines[line_index]) is not None:
backend = _re_test_backend.search(lines[line_index]).groups()[0]
line_index += 1
# Ignore if backend isn't tracked for dummies.
if backend not in BACKENDS:
continue
objects = []
# Until we unindent, add backend objects to the list
while len(lines[line_index]) <= 1 or lines[line_index].startswith(" " * 8):
line = lines[line_index]
single_line_import_search = _re_single_line_import.search(line)
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", "))
elif line.startswith(" " * 12):
objects.append(line[12:-2])
line_index += 1
backend_specific_objects[backend] = objects
else:
line_index += 1
return backend_specific_objects | [
"def",
"read_init",
"(",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"PATH_TO_TRANSFORMERS",
",",
"\"__init__.py\"",
")",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"newline",
"=",
"\"\\n\"",
")",
"as",
"f",
":",
"lines",
"=",
"f",
".",
"readlines",
"(",
")",
"# Get to the point we do the actual imports for type checking",
"line_index",
"=",
"0",
"while",
"not",
"lines",
"[",
"line_index",
"]",
".",
"startswith",
"(",
"\"if TYPE_CHECKING\"",
")",
":",
"line_index",
"+=",
"1",
"backend_specific_objects",
"=",
"{",
"}",
"# Go through the end of the file",
"while",
"line_index",
"<",
"len",
"(",
"lines",
")",
":",
"# If the line is an if is_backend_available, we grab all objects associated.",
"if",
"_re_test_backend",
".",
"search",
"(",
"lines",
"[",
"line_index",
"]",
")",
"is",
"not",
"None",
":",
"backend",
"=",
"_re_test_backend",
".",
"search",
"(",
"lines",
"[",
"line_index",
"]",
")",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"line_index",
"+=",
"1",
"# Ignore if backend isn't tracked for dummies.",
"if",
"backend",
"not",
"in",
"BACKENDS",
":",
"continue",
"objects",
"=",
"[",
"]",
"# Until we unindent, add backend objects to the list",
"while",
"len",
"(",
"lines",
"[",
"line_index",
"]",
")",
"<=",
"1",
"or",
"lines",
"[",
"line_index",
"]",
".",
"startswith",
"(",
"\" \"",
"*",
"8",
")",
":",
"line",
"=",
"lines",
"[",
"line_index",
"]",
"single_line_import_search",
"=",
"_re_single_line_import",
".",
"search",
"(",
"line",
")",
"if",
"single_line_import_search",
"is",
"not",
"None",
":",
"objects",
".",
"extend",
"(",
"single_line_import_search",
".",
"groups",
"(",
")",
"[",
"0",
"]",
".",
"split",
"(",
"\", \"",
")",
")",
"elif",
"line",
".",
"startswith",
"(",
"\" \"",
"*",
"12",
")",
":",
"objects",
".",
"append",
"(",
"line",
"[",
"12",
":",
"-",
"2",
"]",
")",
"line_index",
"+=",
"1",
"backend_specific_objects",
"[",
"backend",
"]",
"=",
"objects",
"else",
":",
"line_index",
"+=",
"1",
"return",
"backend_specific_objects"
] | [
57,
0
] | [
94,
35
] | python | en | ['en', 'en', 'en'] | True |
create_dummy_object | (name, backend_name) | Create the code for the dummy object corresponding to `name`. | Create the code for the dummy object corresponding to `name`. | def create_dummy_object(name, backend_name):
""" Create the code for the dummy object corresponding to `name`."""
_pretrained = [
"Config" "ForCausalLM",
"ForConditionalGeneration",
"ForMaskedLM",
"ForMultipleChoice",
"ForQuestionAnswering",
"ForSequenceClassification",
"ForTokenClassification",
"Model",
"Tokenizer",
]
if name.isupper():
return DUMMY_CONSTANT.format(name)
elif name.islower():
return DUMMY_FUNCTION.format(name, backend_name)
else:
is_pretrained = False
for part in _pretrained:
if part in name:
is_pretrained = True
break
if is_pretrained:
return DUMMY_PRETRAINED_CLASS.format(name, backend_name)
else:
return DUMMY_CLASS.format(name, backend_name) | [
"def",
"create_dummy_object",
"(",
"name",
",",
"backend_name",
")",
":",
"_pretrained",
"=",
"[",
"\"Config\"",
"\"ForCausalLM\"",
",",
"\"ForConditionalGeneration\"",
",",
"\"ForMaskedLM\"",
",",
"\"ForMultipleChoice\"",
",",
"\"ForQuestionAnswering\"",
",",
"\"ForSequenceClassification\"",
",",
"\"ForTokenClassification\"",
",",
"\"Model\"",
",",
"\"Tokenizer\"",
",",
"]",
"if",
"name",
".",
"isupper",
"(",
")",
":",
"return",
"DUMMY_CONSTANT",
".",
"format",
"(",
"name",
")",
"elif",
"name",
".",
"islower",
"(",
")",
":",
"return",
"DUMMY_FUNCTION",
".",
"format",
"(",
"name",
",",
"backend_name",
")",
"else",
":",
"is_pretrained",
"=",
"False",
"for",
"part",
"in",
"_pretrained",
":",
"if",
"part",
"in",
"name",
":",
"is_pretrained",
"=",
"True",
"break",
"if",
"is_pretrained",
":",
"return",
"DUMMY_PRETRAINED_CLASS",
".",
"format",
"(",
"name",
",",
"backend_name",
")",
"else",
":",
"return",
"DUMMY_CLASS",
".",
"format",
"(",
"name",
",",
"backend_name",
")"
] | [
97,
0
] | [
123,
57
] | python | en | ['en', 'en', 'en'] | True |
create_dummy_files | () | Create the content of the dummy files. | Create the content of the dummy files. | def create_dummy_files():
""" Create the content of the dummy files. """
backend_specific_objects = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
module_names = {"torch": "pytorch"}
dummy_files = {}
for backend, objects in backend_specific_objects.items():
backend_name = module_names.get(backend, backend)
dummy_file = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n"
dummy_file += f"from ..file_utils import requires_{backend_name}\n\n"
dummy_file += "\n".join([create_dummy_object(o, backend_name) for o in objects])
dummy_files[backend] = dummy_file
return dummy_files | [
"def",
"create_dummy_files",
"(",
")",
":",
"backend_specific_objects",
"=",
"read_init",
"(",
")",
"# For special correspondence backend to module name as used in the function requires_modulename",
"module_names",
"=",
"{",
"\"torch\"",
":",
"\"pytorch\"",
"}",
"dummy_files",
"=",
"{",
"}",
"for",
"backend",
",",
"objects",
"in",
"backend_specific_objects",
".",
"items",
"(",
")",
":",
"backend_name",
"=",
"module_names",
".",
"get",
"(",
"backend",
",",
"backend",
")",
"dummy_file",
"=",
"\"# This file is autogenerated by the command `make fix-copies`, do not edit.\\n\"",
"dummy_file",
"+=",
"f\"from ..file_utils import requires_{backend_name}\\n\\n\"",
"dummy_file",
"+=",
"\"\\n\"",
".",
"join",
"(",
"[",
"create_dummy_object",
"(",
"o",
",",
"backend_name",
")",
"for",
"o",
"in",
"objects",
"]",
")",
"dummy_files",
"[",
"backend",
"]",
"=",
"dummy_file",
"return",
"dummy_files"
] | [
126,
0
] | [
140,
22
] | python | en | ['en', 'en', 'en'] | True |
check_dummies | (overwrite=False) | Check if the dummy files are up to date and maybe `overwrite` with the right content. | Check if the dummy files are up to date and maybe `overwrite` with the right content. | def check_dummies(overwrite=False):
""" Check if the dummy files are up to date and maybe `overwrite` with the right content. """
dummy_files = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
short_names = {"torch": "pt"}
# Locate actual dummy modules and read their content.
path = os.path.join(PATH_TO_TRANSFORMERS, "utils")
dummy_file_paths = {
backend: os.path.join(path, f"dummy_{short_names.get(backend, backend)}_objects.py")
for backend in dummy_files.keys()
}
actual_dummies = {}
for backend, file_path in dummy_file_paths.items():
with open(file_path, "r", encoding="utf-8", newline="\n") as f:
actual_dummies[backend] = f.read()
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
f"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main "
"__init__ has new objects."
)
with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n") as f:
f.write(dummy_files[backend])
else:
raise ValueError(
"The main __init__ has objects that are not present in "
f"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` "
"to fix this."
) | [
"def",
"check_dummies",
"(",
"overwrite",
"=",
"False",
")",
":",
"dummy_files",
"=",
"create_dummy_files",
"(",
")",
"# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py",
"short_names",
"=",
"{",
"\"torch\"",
":",
"\"pt\"",
"}",
"# Locate actual dummy modules and read their content.",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"PATH_TO_TRANSFORMERS",
",",
"\"utils\"",
")",
"dummy_file_paths",
"=",
"{",
"backend",
":",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"f\"dummy_{short_names.get(backend, backend)}_objects.py\"",
")",
"for",
"backend",
"in",
"dummy_files",
".",
"keys",
"(",
")",
"}",
"actual_dummies",
"=",
"{",
"}",
"for",
"backend",
",",
"file_path",
"in",
"dummy_file_paths",
".",
"items",
"(",
")",
":",
"with",
"open",
"(",
"file_path",
",",
"\"r\"",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"newline",
"=",
"\"\\n\"",
")",
"as",
"f",
":",
"actual_dummies",
"[",
"backend",
"]",
"=",
"f",
".",
"read",
"(",
")",
"for",
"backend",
"in",
"dummy_files",
".",
"keys",
"(",
")",
":",
"if",
"dummy_files",
"[",
"backend",
"]",
"!=",
"actual_dummies",
"[",
"backend",
"]",
":",
"if",
"overwrite",
":",
"print",
"(",
"f\"Updating transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py as the main \"",
"\"__init__ has new objects.\"",
")",
"with",
"open",
"(",
"dummy_file_paths",
"[",
"backend",
"]",
",",
"\"w\"",
",",
"encoding",
"=",
"\"utf-8\"",
",",
"newline",
"=",
"\"\\n\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"dummy_files",
"[",
"backend",
"]",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"The main __init__ has objects that are not present in \"",
"f\"transformers.utils.dummy_{short_names.get(backend, backend)}_objects.py. Run `make fix-copies` \"",
"\"to fix this.\"",
")"
] | [
143,
0
] | [
175,
17
] | python | en | ['en', 'en', 'en'] | True |
async_setup | (hass: HomeAssistant, config: dict) | Set up the NEW_NAME component. | Set up the NEW_NAME component. | async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the NEW_NAME component."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
config_flow.OAuth2FlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
),
)
return True | [
"async",
"def",
"async_setup",
"(",
"hass",
":",
"HomeAssistant",
",",
"config",
":",
"dict",
")",
":",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"=",
"{",
"}",
"if",
"DOMAIN",
"not",
"in",
"config",
":",
"return",
"True",
"config_flow",
".",
"OAuth2FlowHandler",
".",
"async_register_implementation",
"(",
"hass",
",",
"config_entry_oauth2_flow",
".",
"LocalOAuth2Implementation",
"(",
"hass",
",",
"DOMAIN",
",",
"config",
"[",
"DOMAIN",
"]",
"[",
"CONF_CLIENT_ID",
"]",
",",
"config",
"[",
"DOMAIN",
"]",
"[",
"CONF_CLIENT_SECRET",
"]",
",",
"OAUTH2_AUTHORIZE",
",",
"OAUTH2_TOKEN",
",",
")",
",",
")",
"return",
"True"
] | [
34,
0
] | [
53,
15
] | python | en | ['en', 'en', 'en'] | True |
async_setup_entry | (hass: HomeAssistant, entry: ConfigEntry) | Set up NEW_NAME from a config entry. | Set up NEW_NAME from a config entry. | async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up NEW_NAME from a config entry."""
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
session = config_entry_oauth2_flow.OAuth2Session(hass, entry, implementation)
# If using a requests-based API lib
hass.data[DOMAIN][entry.entry_id] = api.ConfigEntryAuth(hass, entry, session)
# If using an aiohttp-based API lib
hass.data[DOMAIN][entry.entry_id] = api.AsyncConfigEntryAuth(
aiohttp_client.async_get_clientsession(hass), session
)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
":",
"HomeAssistant",
",",
"entry",
":",
"ConfigEntry",
")",
":",
"implementation",
"=",
"(",
"await",
"config_entry_oauth2_flow",
".",
"async_get_config_entry_implementation",
"(",
"hass",
",",
"entry",
")",
")",
"session",
"=",
"config_entry_oauth2_flow",
".",
"OAuth2Session",
"(",
"hass",
",",
"entry",
",",
"implementation",
")",
"# If using a requests-based API lib",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"=",
"api",
".",
"ConfigEntryAuth",
"(",
"hass",
",",
"entry",
",",
"session",
")",
"# If using an aiohttp-based API lib",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"entry",
".",
"entry_id",
"]",
"=",
"api",
".",
"AsyncConfigEntryAuth",
"(",
"aiohttp_client",
".",
"async_get_clientsession",
"(",
"hass",
")",
",",
"session",
")",
"for",
"component",
"in",
"PLATFORMS",
":",
"hass",
".",
"async_create_task",
"(",
"hass",
".",
"config_entries",
".",
"async_forward_entry_setup",
"(",
"entry",
",",
"component",
")",
")",
"return",
"True"
] | [
56,
0
] | [
79,
15
] | python | en | ['en', 'en', 'en'] | True |
async_unload_entry | (hass: HomeAssistant, entry: ConfigEntry) | Unload a config entry. | Unload a config entry. | async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok | [
"async",
"def",
"async_unload_entry",
"(",
"hass",
":",
"HomeAssistant",
",",
"entry",
":",
"ConfigEntry",
")",
":",
"unload_ok",
"=",
"all",
"(",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"[",
"hass",
".",
"config_entries",
".",
"async_forward_entry_unload",
"(",
"entry",
",",
"component",
")",
"for",
"component",
"in",
"PLATFORMS",
"]",
")",
")",
"if",
"unload_ok",
":",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
".",
"pop",
"(",
"entry",
".",
"entry_id",
")",
"return",
"unload_ok"
] | [
82,
0
] | [
95,
20
] | python | en | ['en', 'es', 'en'] | True |
MsgDispatcher.handle_initialize | (self, data) | Data is search space
| Data is search space
| def handle_initialize(self, data):
"""Data is search space
"""
self.tuner.update_search_space(data)
send(CommandType.Initialized, '') | [
"def",
"handle_initialize",
"(",
"self",
",",
"data",
")",
":",
"self",
".",
"tuner",
".",
"update_search_space",
"(",
"data",
")",
"send",
"(",
"CommandType",
".",
"Initialized",
",",
"''",
")"
] | [
85,
4
] | [
89,
41
] | python | en | ['en', 'en', 'en'] | True |
MsgDispatcher.send_trial_callback | (self, id_, params) | For tuner to issue trial config when the config is generated
| For tuner to issue trial config when the config is generated
| def send_trial_callback(self, id_, params):
"""For tuner to issue trial config when the config is generated
"""
send(CommandType.NewTrialJob, _pack_parameter(id_, params)) | [
"def",
"send_trial_callback",
"(",
"self",
",",
"id_",
",",
"params",
")",
":",
"send",
"(",
"CommandType",
".",
"NewTrialJob",
",",
"_pack_parameter",
"(",
"id_",
",",
"params",
")",
")"
] | [
91,
4
] | [
94,
67
] | python | en | ['en', 'en', 'en'] | True |
MsgDispatcher.handle_import_data | (self, data) | Import additional data for tuning
data: a list of dictionaries, each of which has at least two keys, 'parameter' and 'value'
| Import additional data for tuning
data: a list of dictionaries, each of which has at least two keys, 'parameter' and 'value'
| def handle_import_data(self, data):
"""Import additional data for tuning
data: a list of dictionaries, each of which has at least two keys, 'parameter' and 'value'
"""
for entry in data:
entry['value'] = entry['value'] if type(entry['value']) is str else json_tricks.dumps(entry['value'])
entry['value'] = json_tricks.loads(entry['value'])
self.tuner.import_data(data) | [
"def",
"handle_import_data",
"(",
"self",
",",
"data",
")",
":",
"for",
"entry",
"in",
"data",
":",
"entry",
"[",
"'value'",
"]",
"=",
"entry",
"[",
"'value'",
"]",
"if",
"type",
"(",
"entry",
"[",
"'value'",
"]",
")",
"is",
"str",
"else",
"json_tricks",
".",
"dumps",
"(",
"entry",
"[",
"'value'",
"]",
")",
"entry",
"[",
"'value'",
"]",
"=",
"json_tricks",
".",
"loads",
"(",
"entry",
"[",
"'value'",
"]",
")",
"self",
".",
"tuner",
".",
"import_data",
"(",
"data",
")"
] | [
111,
4
] | [
118,
36
] | python | en | ['en', 'en', 'en'] | True |
MsgDispatcher.handle_report_metric_data | (self, data) |
data: a dict received from nni_manager, which contains:
- 'parameter_id': id of the trial
- 'value': metric value reported by nni.report_final_result()
- 'type': report type, support {'FINAL', 'PERIODICAL'}
|
data: a dict received from nni_manager, which contains:
- 'parameter_id': id of the trial
- 'value': metric value reported by nni.report_final_result()
- 'type': report type, support {'FINAL', 'PERIODICAL'}
| def handle_report_metric_data(self, data):
"""
data: a dict received from nni_manager, which contains:
- 'parameter_id': id of the trial
- 'value': metric value reported by nni.report_final_result()
- 'type': report type, support {'FINAL', 'PERIODICAL'}
"""
# metrics value is dumped as json string in trial, so we need to decode it here
if 'value' in data:
data['value'] = json_tricks.loads(data['value'])
if data['type'] == MetricType.FINAL:
self._handle_final_metric_data(data)
elif data['type'] == MetricType.PERIODICAL:
if self.assessor is not None:
self._handle_intermediate_metric_data(data)
elif data['type'] == MetricType.REQUEST_PARAMETER:
assert multi_phase_enabled()
assert data['trial_job_id'] is not None
assert data['parameter_index'] is not None
param_id = _create_parameter_id()
try:
param = self.tuner.generate_parameters(param_id, trial_job_id=data['trial_job_id'])
except NoMoreTrialError:
param = None
send(CommandType.SendTrialJobParameter, _pack_parameter(param_id, param, trial_job_id=data['trial_job_id'],
parameter_index=data['parameter_index']))
else:
raise ValueError('Data type not supported: {}'.format(data['type'])) | [
"def",
"handle_report_metric_data",
"(",
"self",
",",
"data",
")",
":",
"# metrics value is dumped as json string in trial, so we need to decode it here",
"if",
"'value'",
"in",
"data",
":",
"data",
"[",
"'value'",
"]",
"=",
"json_tricks",
".",
"loads",
"(",
"data",
"[",
"'value'",
"]",
")",
"if",
"data",
"[",
"'type'",
"]",
"==",
"MetricType",
".",
"FINAL",
":",
"self",
".",
"_handle_final_metric_data",
"(",
"data",
")",
"elif",
"data",
"[",
"'type'",
"]",
"==",
"MetricType",
".",
"PERIODICAL",
":",
"if",
"self",
".",
"assessor",
"is",
"not",
"None",
":",
"self",
".",
"_handle_intermediate_metric_data",
"(",
"data",
")",
"elif",
"data",
"[",
"'type'",
"]",
"==",
"MetricType",
".",
"REQUEST_PARAMETER",
":",
"assert",
"multi_phase_enabled",
"(",
")",
"assert",
"data",
"[",
"'trial_job_id'",
"]",
"is",
"not",
"None",
"assert",
"data",
"[",
"'parameter_index'",
"]",
"is",
"not",
"None",
"param_id",
"=",
"_create_parameter_id",
"(",
")",
"try",
":",
"param",
"=",
"self",
".",
"tuner",
".",
"generate_parameters",
"(",
"param_id",
",",
"trial_job_id",
"=",
"data",
"[",
"'trial_job_id'",
"]",
")",
"except",
"NoMoreTrialError",
":",
"param",
"=",
"None",
"send",
"(",
"CommandType",
".",
"SendTrialJobParameter",
",",
"_pack_parameter",
"(",
"param_id",
",",
"param",
",",
"trial_job_id",
"=",
"data",
"[",
"'trial_job_id'",
"]",
",",
"parameter_index",
"=",
"data",
"[",
"'parameter_index'",
"]",
")",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'Data type not supported: {}'",
".",
"format",
"(",
"data",
"[",
"'type'",
"]",
")",
")"
] | [
125,
4
] | [
152,
80
] | python | en | ['en', 'error', 'th'] | False |
MsgDispatcher.handle_trial_end | (self, data) |
data: it has three keys: trial_job_id, event, hyper_params
- trial_job_id: the id generated by training service
- event: the job's state
- hyper_params: the hyperparameters generated and returned by tuner
|
data: it has three keys: trial_job_id, event, hyper_params
- trial_job_id: the id generated by training service
- event: the job's state
- hyper_params: the hyperparameters generated and returned by tuner
| def handle_trial_end(self, data):
"""
data: it has three keys: trial_job_id, event, hyper_params
- trial_job_id: the id generated by training service
- event: the job's state
- hyper_params: the hyperparameters generated and returned by tuner
"""
trial_job_id = data['trial_job_id']
_ended_trials.add(trial_job_id)
if trial_job_id in _trial_history:
_trial_history.pop(trial_job_id)
if self.assessor is not None:
self.assessor.trial_end(trial_job_id, data['event'] == 'SUCCEEDED')
if self.tuner is not None:
self.tuner.trial_end(json_tricks.loads(data['hyper_params'])['parameter_id'], data['event'] == 'SUCCEEDED') | [
"def",
"handle_trial_end",
"(",
"self",
",",
"data",
")",
":",
"trial_job_id",
"=",
"data",
"[",
"'trial_job_id'",
"]",
"_ended_trials",
".",
"add",
"(",
"trial_job_id",
")",
"if",
"trial_job_id",
"in",
"_trial_history",
":",
"_trial_history",
".",
"pop",
"(",
"trial_job_id",
")",
"if",
"self",
".",
"assessor",
"is",
"not",
"None",
":",
"self",
".",
"assessor",
".",
"trial_end",
"(",
"trial_job_id",
",",
"data",
"[",
"'event'",
"]",
"==",
"'SUCCEEDED'",
")",
"if",
"self",
".",
"tuner",
"is",
"not",
"None",
":",
"self",
".",
"tuner",
".",
"trial_end",
"(",
"json_tricks",
".",
"loads",
"(",
"data",
"[",
"'hyper_params'",
"]",
")",
"[",
"'parameter_id'",
"]",
",",
"data",
"[",
"'event'",
"]",
"==",
"'SUCCEEDED'",
")"
] | [
154,
4
] | [
168,
119
] | python | en | ['en', 'error', 'th'] | False |
MsgDispatcher._handle_final_metric_data | (self, data) | Call tuner to process final results
| Call tuner to process final results
| def _handle_final_metric_data(self, data):
"""Call tuner to process final results
"""
id_ = data['parameter_id']
value = data['value']
if id_ is None or id_ in _customized_parameter_ids:
if not hasattr(self.tuner, '_accept_customized'):
self.tuner._accept_customized = False
if not self.tuner._accept_customized:
_logger.info('Customized trial job %s ignored by tuner', id_)
return
customized = True
else:
customized = False
if id_ in _trial_params:
self.tuner.receive_trial_result(id_, _trial_params[id_], value, customized=customized,
trial_job_id=data.get('trial_job_id'))
else:
_logger.warning('Find unknown job parameter id %s, maybe something goes wrong.', _trial_params[id_]) | [
"def",
"_handle_final_metric_data",
"(",
"self",
",",
"data",
")",
":",
"id_",
"=",
"data",
"[",
"'parameter_id'",
"]",
"value",
"=",
"data",
"[",
"'value'",
"]",
"if",
"id_",
"is",
"None",
"or",
"id_",
"in",
"_customized_parameter_ids",
":",
"if",
"not",
"hasattr",
"(",
"self",
".",
"tuner",
",",
"'_accept_customized'",
")",
":",
"self",
".",
"tuner",
".",
"_accept_customized",
"=",
"False",
"if",
"not",
"self",
".",
"tuner",
".",
"_accept_customized",
":",
"_logger",
".",
"info",
"(",
"'Customized trial job %s ignored by tuner'",
",",
"id_",
")",
"return",
"customized",
"=",
"True",
"else",
":",
"customized",
"=",
"False",
"if",
"id_",
"in",
"_trial_params",
":",
"self",
".",
"tuner",
".",
"receive_trial_result",
"(",
"id_",
",",
"_trial_params",
"[",
"id_",
"]",
",",
"value",
",",
"customized",
"=",
"customized",
",",
"trial_job_id",
"=",
"data",
".",
"get",
"(",
"'trial_job_id'",
")",
")",
"else",
":",
"_logger",
".",
"warning",
"(",
"'Find unknown job parameter id %s, maybe something goes wrong.'",
",",
"_trial_params",
"[",
"id_",
"]",
")"
] | [
170,
4
] | [
188,
112
] | python | en | ['en', 'en', 'en'] | True |
MsgDispatcher._handle_intermediate_metric_data | (self, data) | Call assessor to process intermediate results
| Call assessor to process intermediate results
| def _handle_intermediate_metric_data(self, data):
"""Call assessor to process intermediate results
"""
if data['type'] != MetricType.PERIODICAL:
return
if self.assessor is None:
return
trial_job_id = data['trial_job_id']
if trial_job_id in _ended_trials:
return
history = _trial_history[trial_job_id]
history[data['sequence']] = data['value']
ordered_history = _sort_history(history)
if len(ordered_history) < data['sequence']: # no user-visible update since last time
return
try:
result = self.assessor.assess_trial(trial_job_id, ordered_history)
except Exception as e:
_logger.error('Assessor error')
_logger.exception(e)
if isinstance(result, bool):
result = AssessResult.Good if result else AssessResult.Bad
elif not isinstance(result, AssessResult):
msg = 'Result of Assessor.assess_trial must be an object of AssessResult, not %s'
raise RuntimeError(msg % type(result))
if result is AssessResult.Bad:
_logger.debug('BAD, kill %s', trial_job_id)
send(CommandType.KillTrialJob, json_tricks.dumps(trial_job_id))
# notify tuner
_logger.debug('env var: NNI_INCLUDE_INTERMEDIATE_RESULTS: [%s]',
dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS)
if dispatcher_env_vars.NNI_INCLUDE_INTERMEDIATE_RESULTS == 'true':
self._earlystop_notify_tuner(data)
else:
_logger.debug('GOOD') | [
"def",
"_handle_intermediate_metric_data",
"(",
"self",
",",
"data",
")",
":",
"if",
"data",
"[",
"'type'",
"]",
"!=",
"MetricType",
".",
"PERIODICAL",
":",
"return",
"if",
"self",
".",
"assessor",
"is",
"None",
":",
"return",
"trial_job_id",
"=",
"data",
"[",
"'trial_job_id'",
"]",
"if",
"trial_job_id",
"in",
"_ended_trials",
":",
"return",
"history",
"=",
"_trial_history",
"[",
"trial_job_id",
"]",
"history",
"[",
"data",
"[",
"'sequence'",
"]",
"]",
"=",
"data",
"[",
"'value'",
"]",
"ordered_history",
"=",
"_sort_history",
"(",
"history",
")",
"if",
"len",
"(",
"ordered_history",
")",
"<",
"data",
"[",
"'sequence'",
"]",
":",
"# no user-visible update since last time",
"return",
"try",
":",
"result",
"=",
"self",
".",
"assessor",
".",
"assess_trial",
"(",
"trial_job_id",
",",
"ordered_history",
")",
"except",
"Exception",
"as",
"e",
":",
"_logger",
".",
"error",
"(",
"'Assessor error'",
")",
"_logger",
".",
"exception",
"(",
"e",
")",
"if",
"isinstance",
"(",
"result",
",",
"bool",
")",
":",
"result",
"=",
"AssessResult",
".",
"Good",
"if",
"result",
"else",
"AssessResult",
".",
"Bad",
"elif",
"not",
"isinstance",
"(",
"result",
",",
"AssessResult",
")",
":",
"msg",
"=",
"'Result of Assessor.assess_trial must be an object of AssessResult, not %s'",
"raise",
"RuntimeError",
"(",
"msg",
"%",
"type",
"(",
"result",
")",
")",
"if",
"result",
"is",
"AssessResult",
".",
"Bad",
":",
"_logger",
".",
"debug",
"(",
"'BAD, kill %s'",
",",
"trial_job_id",
")",
"send",
"(",
"CommandType",
".",
"KillTrialJob",
",",
"json_tricks",
".",
"dumps",
"(",
"trial_job_id",
")",
")",
"# notify tuner",
"_logger",
".",
"debug",
"(",
"'env var: NNI_INCLUDE_INTERMEDIATE_RESULTS: [%s]'",
",",
"dispatcher_env_vars",
".",
"NNI_INCLUDE_INTERMEDIATE_RESULTS",
")",
"if",
"dispatcher_env_vars",
".",
"NNI_INCLUDE_INTERMEDIATE_RESULTS",
"==",
"'true'",
":",
"self",
".",
"_earlystop_notify_tuner",
"(",
"data",
")",
"else",
":",
"_logger",
".",
"debug",
"(",
"'GOOD'",
")"
] | [
190,
4
] | [
229,
33
] | python | en | ['en', 'en', 'en'] | True |
MsgDispatcher._earlystop_notify_tuner | (self, data) | Send last intermediate result as final result to tuner in case the
trial is early stopped.
| Send last intermediate result as final result to tuner in case the
trial is early stopped.
| def _earlystop_notify_tuner(self, data):
"""Send last intermediate result as final result to tuner in case the
trial is early stopped.
"""
_logger.debug('Early stop notify tuner data: [%s]', data)
data['type'] = MetricType.FINAL
if multi_thread_enabled():
self._handle_final_metric_data(data)
else:
data['value'] = to_json(data['value'])
self.enqueue_command(CommandType.ReportMetricData, data) | [
"def",
"_earlystop_notify_tuner",
"(",
"self",
",",
"data",
")",
":",
"_logger",
".",
"debug",
"(",
"'Early stop notify tuner data: [%s]'",
",",
"data",
")",
"data",
"[",
"'type'",
"]",
"=",
"MetricType",
".",
"FINAL",
"if",
"multi_thread_enabled",
"(",
")",
":",
"self",
".",
"_handle_final_metric_data",
"(",
"data",
")",
"else",
":",
"data",
"[",
"'value'",
"]",
"=",
"to_json",
"(",
"data",
"[",
"'value'",
"]",
")",
"self",
".",
"enqueue_command",
"(",
"CommandType",
".",
"ReportMetricData",
",",
"data",
")"
] | [
231,
4
] | [
241,
68
] | python | en | ['en', 'en', 'en'] | True |
async_setup_platform | (hass, config, async_add_entities, discovery_info=None) | Set up a Logi Circle Camera. Obsolete. | Set up a Logi Circle Camera. Obsolete. | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Logi Circle Camera. Obsolete."""
_LOGGER.warning("Logi Circle no longer works with camera platform configuration") | [
"async",
"def",
"async_setup_platform",
"(",
"hass",
",",
"config",
",",
"async_add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Logi Circle no longer works with camera platform configuration\"",
")"
] | [
31,
0
] | [
33,
85
] | python | en | ['en', 'jv', 'en'] | True |
async_setup_entry | (hass, entry, async_add_entities) | Set up a Logi Circle Camera based on a config entry. | Set up a Logi Circle Camera based on a config entry. | async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Logi Circle Camera based on a config entry."""
devices = await hass.data[LOGI_CIRCLE_DOMAIN].cameras
ffmpeg = hass.data[DATA_FFMPEG]
cameras = [LogiCam(device, entry, ffmpeg) for device in devices]
async_add_entities(cameras, True) | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
",",
"entry",
",",
"async_add_entities",
")",
":",
"devices",
"=",
"await",
"hass",
".",
"data",
"[",
"LOGI_CIRCLE_DOMAIN",
"]",
".",
"cameras",
"ffmpeg",
"=",
"hass",
".",
"data",
"[",
"DATA_FFMPEG",
"]",
"cameras",
"=",
"[",
"LogiCam",
"(",
"device",
",",
"entry",
",",
"ffmpeg",
")",
"for",
"device",
"in",
"devices",
"]",
"async_add_entities",
"(",
"cameras",
",",
"True",
")"
] | [
36,
0
] | [
43,
37
] | python | en | ['en', 'zu', 'en'] | True |
LogiCam.__init__ | (self, camera, device_info, ffmpeg) | Initialize Logi Circle camera. | Initialize Logi Circle camera. | def __init__(self, camera, device_info, ffmpeg):
"""Initialize Logi Circle camera."""
super().__init__()
self._camera = camera
self._name = self._camera.name
self._id = self._camera.mac_address
self._has_battery = self._camera.supports_feature("battery_level")
self._ffmpeg = ffmpeg
self._listeners = [] | [
"def",
"__init__",
"(",
"self",
",",
"camera",
",",
"device_info",
",",
"ffmpeg",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
")",
"self",
".",
"_camera",
"=",
"camera",
"self",
".",
"_name",
"=",
"self",
".",
"_camera",
".",
"name",
"self",
".",
"_id",
"=",
"self",
".",
"_camera",
".",
"mac_address",
"self",
".",
"_has_battery",
"=",
"self",
".",
"_camera",
".",
"supports_feature",
"(",
"\"battery_level\"",
")",
"self",
".",
"_ffmpeg",
"=",
"ffmpeg",
"self",
".",
"_listeners",
"=",
"[",
"]"
] | [
49,
4
] | [
57,
28
] | python | co | ['en', 'co', 'it'] | False |
LogiCam.async_added_to_hass | (self) | Connect camera methods to signals. | Connect camera methods to signals. | async def async_added_to_hass(self):
"""Connect camera methods to signals."""
def _dispatch_proxy(method):
"""Expand parameters & filter entity IDs."""
async def _call(params):
entity_ids = params.get(ATTR_ENTITY_ID)
filtered_params = {
k: v for k, v in params.items() if k != ATTR_ENTITY_ID
}
if entity_ids is None or self.entity_id in entity_ids:
await method(**filtered_params)
return _call
self._listeners.extend(
[
async_dispatcher_connect(
self.hass,
SIGNAL_LOGI_CIRCLE_RECONFIGURE,
_dispatch_proxy(self.set_config),
),
async_dispatcher_connect(
self.hass,
SIGNAL_LOGI_CIRCLE_SNAPSHOT,
_dispatch_proxy(self.livestream_snapshot),
),
async_dispatcher_connect(
self.hass,
SIGNAL_LOGI_CIRCLE_RECORD,
_dispatch_proxy(self.download_livestream),
),
]
) | [
"async",
"def",
"async_added_to_hass",
"(",
"self",
")",
":",
"def",
"_dispatch_proxy",
"(",
"method",
")",
":",
"\"\"\"Expand parameters & filter entity IDs.\"\"\"",
"async",
"def",
"_call",
"(",
"params",
")",
":",
"entity_ids",
"=",
"params",
".",
"get",
"(",
"ATTR_ENTITY_ID",
")",
"filtered_params",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"params",
".",
"items",
"(",
")",
"if",
"k",
"!=",
"ATTR_ENTITY_ID",
"}",
"if",
"entity_ids",
"is",
"None",
"or",
"self",
".",
"entity_id",
"in",
"entity_ids",
":",
"await",
"method",
"(",
"*",
"*",
"filtered_params",
")",
"return",
"_call",
"self",
".",
"_listeners",
".",
"extend",
"(",
"[",
"async_dispatcher_connect",
"(",
"self",
".",
"hass",
",",
"SIGNAL_LOGI_CIRCLE_RECONFIGURE",
",",
"_dispatch_proxy",
"(",
"self",
".",
"set_config",
")",
",",
")",
",",
"async_dispatcher_connect",
"(",
"self",
".",
"hass",
",",
"SIGNAL_LOGI_CIRCLE_SNAPSHOT",
",",
"_dispatch_proxy",
"(",
"self",
".",
"livestream_snapshot",
")",
",",
")",
",",
"async_dispatcher_connect",
"(",
"self",
".",
"hass",
",",
"SIGNAL_LOGI_CIRCLE_RECORD",
",",
"_dispatch_proxy",
"(",
"self",
".",
"download_livestream",
")",
",",
")",
",",
"]",
")"
] | [
59,
4
] | [
93,
9
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.async_will_remove_from_hass | (self) | Disconnect dispatcher listeners when removed. | Disconnect dispatcher listeners when removed. | async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listeners when removed."""
for detach in self._listeners:
detach() | [
"async",
"def",
"async_will_remove_from_hass",
"(",
"self",
")",
":",
"for",
"detach",
"in",
"self",
".",
"_listeners",
":",
"detach",
"(",
")"
] | [
95,
4
] | [
98,
20
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.unique_id | (self) | Return a unique ID. | Return a unique ID. | def unique_id(self):
"""Return a unique ID."""
return self._id | [
"def",
"unique_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"_id"
] | [
101,
4
] | [
103,
23
] | python | ca | ['fr', 'ca', 'en'] | False |
LogiCam.name | (self) | Return the name of this camera. | Return the name of this camera. | def name(self):
"""Return the name of this camera."""
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] | [
106,
4
] | [
108,
25
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.supported_features | (self) | Logi Circle camera's support turning on and off ("soft" switch). | Logi Circle camera's support turning on and off ("soft" switch). | def supported_features(self):
"""Logi Circle camera's support turning on and off ("soft" switch)."""
return SUPPORT_ON_OFF | [
"def",
"supported_features",
"(",
"self",
")",
":",
"return",
"SUPPORT_ON_OFF"
] | [
111,
4
] | [
113,
29
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.device_info | (self) | Return information about the device. | Return information about the device. | def device_info(self):
"""Return information about the device."""
return {
"name": self._camera.name,
"identifiers": {(LOGI_CIRCLE_DOMAIN, self._camera.id)},
"model": self._camera.model_name,
"sw_version": self._camera.firmware,
"manufacturer": DEVICE_BRAND,
} | [
"def",
"device_info",
"(",
"self",
")",
":",
"return",
"{",
"\"name\"",
":",
"self",
".",
"_camera",
".",
"name",
",",
"\"identifiers\"",
":",
"{",
"(",
"LOGI_CIRCLE_DOMAIN",
",",
"self",
".",
"_camera",
".",
"id",
")",
"}",
",",
"\"model\"",
":",
"self",
".",
"_camera",
".",
"model_name",
",",
"\"sw_version\"",
":",
"self",
".",
"_camera",
".",
"firmware",
",",
"\"manufacturer\"",
":",
"DEVICE_BRAND",
",",
"}"
] | [
116,
4
] | [
124,
9
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.device_state_attributes | (self) | Return the state attributes. | Return the state attributes. | def device_state_attributes(self):
"""Return the state attributes."""
state = {
ATTR_ATTRIBUTION: ATTRIBUTION,
"battery_saving_mode": (
STATE_ON if self._camera.battery_saving else STATE_OFF
),
"microphone_gain": self._camera.microphone_gain,
}
# Add battery attributes if camera is battery-powered
if self._has_battery:
state[ATTR_BATTERY_CHARGING] = self._camera.charging
state[ATTR_BATTERY_LEVEL] = self._camera.battery_level
return state | [
"def",
"device_state_attributes",
"(",
"self",
")",
":",
"state",
"=",
"{",
"ATTR_ATTRIBUTION",
":",
"ATTRIBUTION",
",",
"\"battery_saving_mode\"",
":",
"(",
"STATE_ON",
"if",
"self",
".",
"_camera",
".",
"battery_saving",
"else",
"STATE_OFF",
")",
",",
"\"microphone_gain\"",
":",
"self",
".",
"_camera",
".",
"microphone_gain",
",",
"}",
"# Add battery attributes if camera is battery-powered",
"if",
"self",
".",
"_has_battery",
":",
"state",
"[",
"ATTR_BATTERY_CHARGING",
"]",
"=",
"self",
".",
"_camera",
".",
"charging",
"state",
"[",
"ATTR_BATTERY_LEVEL",
"]",
"=",
"self",
".",
"_camera",
".",
"battery_level",
"return",
"state"
] | [
127,
4
] | [
142,
20
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.async_camera_image | (self) | Return a still image from the camera. | Return a still image from the camera. | async def async_camera_image(self):
"""Return a still image from the camera."""
return await self._camera.live_stream.download_jpeg() | [
"async",
"def",
"async_camera_image",
"(",
"self",
")",
":",
"return",
"await",
"self",
".",
"_camera",
".",
"live_stream",
".",
"download_jpeg",
"(",
")"
] | [
144,
4
] | [
146,
61
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.async_turn_off | (self) | Disable streaming mode for this camera. | Disable streaming mode for this camera. | async def async_turn_off(self):
"""Disable streaming mode for this camera."""
await self._camera.set_config("streaming", False) | [
"async",
"def",
"async_turn_off",
"(",
"self",
")",
":",
"await",
"self",
".",
"_camera",
".",
"set_config",
"(",
"\"streaming\"",
",",
"False",
")"
] | [
148,
4
] | [
150,
57
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.async_turn_on | (self) | Enable streaming mode for this camera. | Enable streaming mode for this camera. | async def async_turn_on(self):
"""Enable streaming mode for this camera."""
await self._camera.set_config("streaming", True) | [
"async",
"def",
"async_turn_on",
"(",
"self",
")",
":",
"await",
"self",
".",
"_camera",
".",
"set_config",
"(",
"\"streaming\"",
",",
"True",
")"
] | [
152,
4
] | [
154,
56
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.should_poll | (self) | Update the image periodically. | Update the image periodically. | def should_poll(self):
"""Update the image periodically."""
return True | [
"def",
"should_poll",
"(",
"self",
")",
":",
"return",
"True"
] | [
157,
4
] | [
159,
19
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.set_config | (self, mode, value) | Set an configuration property for the target camera. | Set an configuration property for the target camera. | async def set_config(self, mode, value):
"""Set an configuration property for the target camera."""
if mode == LED_MODE_KEY:
await self._camera.set_config("led", value)
if mode == RECORDING_MODE_KEY:
await self._camera.set_config("recording_disabled", not value) | [
"async",
"def",
"set_config",
"(",
"self",
",",
"mode",
",",
"value",
")",
":",
"if",
"mode",
"==",
"LED_MODE_KEY",
":",
"await",
"self",
".",
"_camera",
".",
"set_config",
"(",
"\"led\"",
",",
"value",
")",
"if",
"mode",
"==",
"RECORDING_MODE_KEY",
":",
"await",
"self",
".",
"_camera",
".",
"set_config",
"(",
"\"recording_disabled\"",
",",
"not",
"value",
")"
] | [
161,
4
] | [
166,
74
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.download_livestream | (self, filename, duration) | Download a recording from the camera's livestream. | Download a recording from the camera's livestream. | async def download_livestream(self, filename, duration):
"""Download a recording from the camera's livestream."""
# Render filename from template.
filename.hass = self.hass
stream_file = filename.async_render(variables={ATTR_ENTITY_ID: self.entity_id})
# Respect configured allowed paths.
if not self.hass.config.is_allowed_path(stream_file):
_LOGGER.error("Can't write %s, no access to path!", stream_file)
return
await self._camera.live_stream.download_rtsp(
filename=stream_file,
duration=timedelta(seconds=duration),
ffmpeg_bin=self._ffmpeg.binary,
) | [
"async",
"def",
"download_livestream",
"(",
"self",
",",
"filename",
",",
"duration",
")",
":",
"# Render filename from template.",
"filename",
".",
"hass",
"=",
"self",
".",
"hass",
"stream_file",
"=",
"filename",
".",
"async_render",
"(",
"variables",
"=",
"{",
"ATTR_ENTITY_ID",
":",
"self",
".",
"entity_id",
"}",
")",
"# Respect configured allowed paths.",
"if",
"not",
"self",
".",
"hass",
".",
"config",
".",
"is_allowed_path",
"(",
"stream_file",
")",
":",
"_LOGGER",
".",
"error",
"(",
"\"Can't write %s, no access to path!\"",
",",
"stream_file",
")",
"return",
"await",
"self",
".",
"_camera",
".",
"live_stream",
".",
"download_rtsp",
"(",
"filename",
"=",
"stream_file",
",",
"duration",
"=",
"timedelta",
"(",
"seconds",
"=",
"duration",
")",
",",
"ffmpeg_bin",
"=",
"self",
".",
"_ffmpeg",
".",
"binary",
",",
")"
] | [
168,
4
] | [
183,
9
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.livestream_snapshot | (self, filename) | Download a still frame from the camera's livestream. | Download a still frame from the camera's livestream. | async def livestream_snapshot(self, filename):
"""Download a still frame from the camera's livestream."""
# Render filename from template.
filename.hass = self.hass
snapshot_file = filename.async_render(
variables={ATTR_ENTITY_ID: self.entity_id}
)
# Respect configured allowed paths.
if not self.hass.config.is_allowed_path(snapshot_file):
_LOGGER.error("Can't write %s, no access to path!", snapshot_file)
return
await self._camera.live_stream.download_jpeg(
filename=snapshot_file, refresh=True
) | [
"async",
"def",
"livestream_snapshot",
"(",
"self",
",",
"filename",
")",
":",
"# Render filename from template.",
"filename",
".",
"hass",
"=",
"self",
".",
"hass",
"snapshot_file",
"=",
"filename",
".",
"async_render",
"(",
"variables",
"=",
"{",
"ATTR_ENTITY_ID",
":",
"self",
".",
"entity_id",
"}",
")",
"# Respect configured allowed paths.",
"if",
"not",
"self",
".",
"hass",
".",
"config",
".",
"is_allowed_path",
"(",
"snapshot_file",
")",
":",
"_LOGGER",
".",
"error",
"(",
"\"Can't write %s, no access to path!\"",
",",
"snapshot_file",
")",
"return",
"await",
"self",
".",
"_camera",
".",
"live_stream",
".",
"download_jpeg",
"(",
"filename",
"=",
"snapshot_file",
",",
"refresh",
"=",
"True",
")"
] | [
185,
4
] | [
200,
9
] | python | en | ['en', 'en', 'en'] | True |
LogiCam.async_update | (self) | Update camera entity and refresh attributes. | Update camera entity and refresh attributes. | async def async_update(self):
"""Update camera entity and refresh attributes."""
await self._camera.update() | [
"async",
"def",
"async_update",
"(",
"self",
")",
":",
"await",
"self",
".",
"_camera",
".",
"update",
"(",
")"
] | [
202,
4
] | [
204,
35
] | python | en | ['en', 'en', 'en'] | True |
setup_platform | (hass, config, add_devices, discovery_info=None) | Set up the Sterling Bank sensor platform. | Set up the Sterling Bank sensor platform. | def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Sterling Bank sensor platform."""
sensors = []
for account in config[CONF_ACCOUNTS]:
try:
starling_account = StarlingAccount(
account[CONF_ACCESS_TOKEN], sandbox=account[CONF_SANDBOX]
)
for balance_type in account[CONF_BALANCE_TYPES]:
sensors.append(
StarlingBalanceSensor(
starling_account, account[CONF_NAME], balance_type
)
)
except requests.exceptions.HTTPError as error:
_LOGGER.error(
"Unable to set up Starling account '%s': %s", account[CONF_NAME], error
)
add_devices(sensors, True) | [
"def",
"setup_platform",
"(",
"hass",
",",
"config",
",",
"add_devices",
",",
"discovery_info",
"=",
"None",
")",
":",
"sensors",
"=",
"[",
"]",
"for",
"account",
"in",
"config",
"[",
"CONF_ACCOUNTS",
"]",
":",
"try",
":",
"starling_account",
"=",
"StarlingAccount",
"(",
"account",
"[",
"CONF_ACCESS_TOKEN",
"]",
",",
"sandbox",
"=",
"account",
"[",
"CONF_SANDBOX",
"]",
")",
"for",
"balance_type",
"in",
"account",
"[",
"CONF_BALANCE_TYPES",
"]",
":",
"sensors",
".",
"append",
"(",
"StarlingBalanceSensor",
"(",
"starling_account",
",",
"account",
"[",
"CONF_NAME",
"]",
",",
"balance_type",
")",
")",
"except",
"requests",
".",
"exceptions",
".",
"HTTPError",
"as",
"error",
":",
"_LOGGER",
".",
"error",
"(",
"\"Unable to set up Starling account '%s': %s\"",
",",
"account",
"[",
"CONF_NAME",
"]",
",",
"error",
")",
"add_devices",
"(",
"sensors",
",",
"True",
")"
] | [
41,
0
] | [
61,
30
] | python | en | ['en', 'da', 'en'] | True |
StarlingBalanceSensor.__init__ | (self, starling_account, account_name, balance_data_type) | Initialize the sensor. | Initialize the sensor. | def __init__(self, starling_account, account_name, balance_data_type):
"""Initialize the sensor."""
self._starling_account = starling_account
self._balance_data_type = balance_data_type
self._state = None
self._account_name = account_name | [
"def",
"__init__",
"(",
"self",
",",
"starling_account",
",",
"account_name",
",",
"balance_data_type",
")",
":",
"self",
".",
"_starling_account",
"=",
"starling_account",
"self",
".",
"_balance_data_type",
"=",
"balance_data_type",
"self",
".",
"_state",
"=",
"None",
"self",
".",
"_account_name",
"=",
"account_name"
] | [
67,
4
] | [
72,
41
] | python | en | ['en', 'en', 'en'] | True |
StarlingBalanceSensor.name | (self) | Return the name of the sensor. | Return the name of the sensor. | def name(self):
"""Return the name of the sensor."""
return "{} {}".format(
self._account_name, self._balance_data_type.replace("_", " ").capitalize()
) | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"\"{} {}\"",
".",
"format",
"(",
"self",
".",
"_account_name",
",",
"self",
".",
"_balance_data_type",
".",
"replace",
"(",
"\"_\"",
",",
"\" \"",
")",
".",
"capitalize",
"(",
")",
")"
] | [
75,
4
] | [
79,
9
] | python | en | ['en', 'mi', 'en'] | True |
StarlingBalanceSensor.state | (self) | Return the state of the sensor. | Return the state of the sensor. | def state(self):
"""Return the state of the sensor."""
return self._state | [
"def",
"state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_state"
] | [
82,
4
] | [
84,
26
] | python | en | ['en', 'en', 'en'] | True |
StarlingBalanceSensor.unit_of_measurement | (self) | Return the unit of measurement. | Return the unit of measurement. | def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._starling_account.currency | [
"def",
"unit_of_measurement",
"(",
"self",
")",
":",
"return",
"self",
".",
"_starling_account",
".",
"currency"
] | [
87,
4
] | [
89,
46
] | python | en | ['en', 'la', 'en'] | True |
StarlingBalanceSensor.icon | (self) | Return the entity icon. | Return the entity icon. | def icon(self):
"""Return the entity icon."""
return ICON | [
"def",
"icon",
"(",
"self",
")",
":",
"return",
"ICON"
] | [
92,
4
] | [
94,
19
] | python | en | ['en', 'cy', 'en'] | True |
StarlingBalanceSensor.update | (self) | Fetch new state data for the sensor. | Fetch new state data for the sensor. | def update(self):
"""Fetch new state data for the sensor."""
self._starling_account.update_balance_data()
if self._balance_data_type == "cleared_balance":
self._state = self._starling_account.cleared_balance / 100
elif self._balance_data_type == "effective_balance":
self._state = self._starling_account.effective_balance / 100 | [
"def",
"update",
"(",
"self",
")",
":",
"self",
".",
"_starling_account",
".",
"update_balance_data",
"(",
")",
"if",
"self",
".",
"_balance_data_type",
"==",
"\"cleared_balance\"",
":",
"self",
".",
"_state",
"=",
"self",
".",
"_starling_account",
".",
"cleared_balance",
"/",
"100",
"elif",
"self",
".",
"_balance_data_type",
"==",
"\"effective_balance\"",
":",
"self",
".",
"_state",
"=",
"self",
".",
"_starling_account",
".",
"effective_balance",
"/",
"100"
] | [
96,
4
] | [
102,
72
] | python | en | ['en', 'en', 'en'] | True |
set_seed | (seed: int) |
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
|
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed). | def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf`` (if
installed).
Args:
seed (:obj:`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed) | [
"def",
"set_seed",
"(",
"seed",
":",
"int",
")",
":",
"random",
".",
"seed",
"(",
"seed",
")",
"np",
".",
"random",
".",
"seed",
"(",
"seed",
")",
"if",
"is_torch_available",
"(",
")",
":",
"torch",
".",
"manual_seed",
"(",
"seed",
")",
"torch",
".",
"cuda",
".",
"manual_seed_all",
"(",
"seed",
")",
"# ^^ safe to call this function even if cuda is not available",
"if",
"is_tf_available",
"(",
")",
":",
"tf",
".",
"random",
".",
"set_seed",
"(",
"seed",
")"
] | [
48,
0
] | [
63,
32
] | python | en | ['en', 'error', 'th'] | False |
default_compute_objective | (metrics: Dict[str, float]) |
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
|
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise. | def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the :class:`~transformers.Trainer`, the sum of all metrics otherwise.
Args:
metrics (:obj:`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
:obj:`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_samples_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values()) | [
"def",
"default_compute_objective",
"(",
"metrics",
":",
"Dict",
"[",
"str",
",",
"float",
"]",
")",
"->",
"float",
":",
"metrics",
"=",
"copy",
".",
"deepcopy",
"(",
"metrics",
")",
"loss",
"=",
"metrics",
".",
"pop",
"(",
"\"eval_loss\"",
",",
"None",
")",
"_",
"=",
"metrics",
".",
"pop",
"(",
"\"epoch\"",
",",
"None",
")",
"# Remove speed metrics",
"speed_metrics",
"=",
"[",
"m",
"for",
"m",
"in",
"metrics",
".",
"keys",
"(",
")",
"if",
"m",
".",
"endswith",
"(",
"\"_runtime\"",
")",
"or",
"m",
".",
"endswith",
"(",
"\"_samples_per_second\"",
")",
"]",
"for",
"sm",
"in",
"speed_metrics",
":",
"_",
"=",
"metrics",
".",
"pop",
"(",
"sm",
",",
"None",
")",
"return",
"loss",
"if",
"len",
"(",
"metrics",
")",
"==",
"0",
"else",
"sum",
"(",
"metrics",
".",
"values",
"(",
")",
")"
] | [
138,
0
] | [
156,
63
] | python | en | ['en', 'error', 'th'] | False |
is_main_process | (local_rank) |
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
|
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
| def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0] | [
"def",
"is_main_process",
"(",
"local_rank",
")",
":",
"if",
"is_torch_tpu_available",
"(",
")",
":",
"import",
"torch_xla",
".",
"core",
".",
"xla_model",
"as",
"xm",
"return",
"xm",
".",
"get_ordinal",
"(",
")",
"==",
"0",
"return",
"local_rank",
"in",
"[",
"-",
"1",
",",
"0",
"]"
] | [
196,
0
] | [
205,
32
] | python | en | ['en', 'error', 'th'] | False |
total_processes_number | (local_rank) |
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
|
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
| def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif is_sagemaker_distributed_available():
import smdistributed.dataparallel.torch.distributed as dist
return dist.get_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1 | [
"def",
"total_processes_number",
"(",
"local_rank",
")",
":",
"if",
"is_torch_tpu_available",
"(",
")",
":",
"import",
"torch_xla",
".",
"core",
".",
"xla_model",
"as",
"xm",
"return",
"xm",
".",
"xrt_world_size",
"(",
")",
"elif",
"is_sagemaker_distributed_available",
"(",
")",
":",
"import",
"smdistributed",
".",
"dataparallel",
".",
"torch",
".",
"distributed",
"as",
"dist",
"return",
"dist",
".",
"get_world_size",
"(",
")",
"elif",
"local_rank",
"!=",
"-",
"1",
"and",
"is_torch_available",
"(",
")",
":",
"import",
"torch",
"return",
"torch",
".",
"distributed",
".",
"get_world_size",
"(",
")",
"return",
"1"
] | [
208,
0
] | [
224,
12
] | python | en | ['en', 'error', 'th'] | False |
speed_metrics | (split, start_time, num_samples=None) |
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
|
Measure and return speed performance metrics. | def speed_metrics(split, start_time, num_samples=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = 1 / (runtime / num_samples)
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
return result | [
"def",
"speed_metrics",
"(",
"split",
",",
"start_time",
",",
"num_samples",
"=",
"None",
")",
":",
"runtime",
"=",
"time",
".",
"time",
"(",
")",
"-",
"start_time",
"result",
"=",
"{",
"f\"{split}_runtime\"",
":",
"round",
"(",
"runtime",
",",
"4",
")",
"}",
"if",
"num_samples",
"is",
"not",
"None",
":",
"samples_per_second",
"=",
"1",
"/",
"(",
"runtime",
"/",
"num_samples",
")",
"result",
"[",
"f\"{split}_samples_per_second\"",
"]",
"=",
"round",
"(",
"samples_per_second",
",",
"3",
")",
"return",
"result"
] | [
227,
0
] | [
245,
17
] | python | en | ['en', 'error', 'th'] | False |
denumpify_detensorize | (metrics) |
Recursively calls `.item()` on the element of the dictionary passed
|
Recursively calls `.item()` on the element of the dictionary passed
| def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics | [
"def",
"denumpify_detensorize",
"(",
"metrics",
")",
":",
"if",
"isinstance",
"(",
"metrics",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"return",
"type",
"(",
"metrics",
")",
"(",
"denumpify_detensorize",
"(",
"m",
")",
"for",
"m",
"in",
"metrics",
")",
"elif",
"isinstance",
"(",
"metrics",
",",
"dict",
")",
":",
"return",
"type",
"(",
"metrics",
")",
"(",
"{",
"k",
":",
"denumpify_detensorize",
"(",
"v",
")",
"for",
"k",
",",
"v",
"in",
"metrics",
".",
"items",
"(",
")",
"}",
")",
"elif",
"isinstance",
"(",
"metrics",
",",
"np",
".",
"generic",
")",
":",
"return",
"metrics",
".",
"item",
"(",
")",
"elif",
"is_torch_available",
"(",
")",
"and",
"isinstance",
"(",
"metrics",
",",
"torch",
".",
"Tensor",
")",
"and",
"metrics",
".",
"numel",
"(",
")",
"==",
"1",
":",
"return",
"metrics",
".",
"item",
"(",
")",
"return",
"metrics"
] | [
445,
0
] | [
457,
18
] | python | en | ['en', 'error', 'th'] | False |
TrainerMemoryTracker.derive_stage | (self) | derives the stage/caller name automatically | derives the stage/caller name automatically | def derive_stage(self):
""" derives the stage/caller name automatically """
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
) | [
"def",
"derive_stage",
"(",
"self",
")",
":",
"caller",
"=",
"inspect",
".",
"currentframe",
"(",
")",
".",
"f_back",
".",
"f_back",
".",
"f_code",
".",
"co_name",
"if",
"caller",
"in",
"self",
".",
"stages",
":",
"return",
"self",
".",
"stages",
"[",
"caller",
"]",
"else",
":",
"raise",
"ValueError",
"(",
"f\"was called from {caller}, but only expect to be called from one of {self.stages.keys()}\"",
")"
] | [
314,
4
] | [
322,
13
] | python | en | ['en', 'en', 'en'] | True |
TrainerMemoryTracker.cpu_mem_used | (self) | get resident set size memory for the current process | get resident set size memory for the current process | def cpu_mem_used(self):
""" get resident set size memory for the current process """
return self.process.memory_info().rss | [
"def",
"cpu_mem_used",
"(",
"self",
")",
":",
"return",
"self",
".",
"process",
".",
"memory_info",
"(",
")",
".",
"rss"
] | [
324,
4
] | [
326,
45
] | python | en | ['en', 'en', 'en'] | True |
TrainerMemoryTracker.start | (self) | start tracking for the caller's stage | start tracking for the caller's stage | def start(self):
""" start tracking for the caller's stage """
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start() | [
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"skip_memory_metrics",
":",
"return",
"stage",
"=",
"self",
".",
"derive_stage",
"(",
")",
"# deal with nested calls of eval during train - simply ignore those",
"if",
"self",
".",
"cur_stage",
"is",
"not",
"None",
"and",
"self",
".",
"cur_stage",
"!=",
"stage",
":",
"return",
"self",
".",
"cur_stage",
"=",
"stage",
"gc",
".",
"collect",
"(",
")",
"if",
"self",
".",
"torch",
"is",
"not",
"None",
":",
"self",
".",
"torch",
".",
"cuda",
".",
"reset_peak_memory_stats",
"(",
")",
"self",
".",
"torch",
".",
"cuda",
".",
"empty_cache",
"(",
")",
"# gpu",
"if",
"self",
".",
"torch",
"is",
"not",
"None",
":",
"self",
".",
"gpu_mem_used_at_start",
"=",
"self",
".",
"torch",
".",
"cuda",
".",
"memory_allocated",
"(",
")",
"# cpu",
"self",
".",
"cpu_mem_used_at_start",
"=",
"self",
".",
"cpu_mem_used",
"(",
")",
"self",
".",
"peak_monitoring",
"=",
"True",
"peak_monitor_thread",
"=",
"threading",
".",
"Thread",
"(",
"target",
"=",
"self",
".",
"peak_monitor_func",
")",
"peak_monitor_thread",
".",
"daemon",
"=",
"True",
"peak_monitor_thread",
".",
"start",
"(",
")"
] | [
340,
4
] | [
368,
35
] | python | en | ['en', 'en', 'en'] | True |
TrainerMemoryTracker.stop | (self, stage) | stop tracking for the passed stage | stop tracking for the passed stage | def stop(self, stage):
""" stop tracking for the passed stage """
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None | [
"def",
"stop",
"(",
"self",
",",
"stage",
")",
":",
"# deal with nested calls of eval during train - simply ignore those",
"if",
"self",
".",
"cur_stage",
"is",
"not",
"None",
"and",
"self",
".",
"cur_stage",
"!=",
"stage",
":",
"return",
"# this sends a signal to peak_monitor_func to complete its loop",
"self",
".",
"peak_monitoring",
"=",
"False",
"# first ensure all objects get collected and their memory is freed",
"gc",
".",
"collect",
"(",
")",
"if",
"self",
".",
"torch",
"is",
"not",
"None",
":",
"self",
".",
"torch",
".",
"cuda",
".",
"empty_cache",
"(",
")",
"# concepts:",
"# - alloc_delta: the difference of allocated memory between the end and the start",
"# - peaked_delta: the difference between the peak memory and the current memory",
"# in order to know how much memory the measured code consumed one needs to sum these two",
"# gpu",
"if",
"self",
".",
"torch",
"is",
"not",
"None",
":",
"self",
".",
"gpu_mem_used_now",
"=",
"self",
".",
"torch",
".",
"cuda",
".",
"memory_allocated",
"(",
")",
"self",
".",
"gpu_mem_used_peak",
"=",
"self",
".",
"torch",
".",
"cuda",
".",
"max_memory_allocated",
"(",
")",
"self",
".",
"gpu",
"[",
"self",
".",
"cur_stage",
"]",
"=",
"dict",
"(",
"alloc",
"=",
"(",
"self",
".",
"gpu_mem_used_now",
"-",
"self",
".",
"gpu_mem_used_at_start",
")",
",",
"peaked",
"=",
"max",
"(",
"0",
",",
"self",
".",
"gpu_mem_used_peak",
"-",
"self",
".",
"gpu_mem_used_now",
")",
",",
")",
"# cpu",
"self",
".",
"cpu_mem_used_now",
"=",
"self",
".",
"cpu_mem_used",
"(",
")",
"self",
".",
"cpu",
"[",
"self",
".",
"cur_stage",
"]",
"=",
"dict",
"(",
"alloc",
"=",
"(",
"self",
".",
"cpu_mem_used_now",
"-",
"self",
".",
"cpu_mem_used_at_start",
")",
",",
"peaked",
"=",
"max",
"(",
"0",
",",
"self",
".",
"cpu_mem_used_peak",
"-",
"self",
".",
"cpu_mem_used_now",
")",
",",
")",
"# reset - cycle finished",
"self",
".",
"cur_stage",
"=",
"None"
] | [
370,
4
] | [
408,
29
] | python | en | ['en', 'en', 'en'] | True |
TrainerMemoryTracker.update_metrics | (self, stage, metrics) | stop tracking for the passed stage | stop tracking for the passed stage | def update_metrics(self, stage, metrics):
""" stop tracking for the passed stage """
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t] | [
"def",
"update_metrics",
"(",
"self",
",",
"stage",
",",
"metrics",
")",
":",
"if",
"self",
".",
"skip_memory_metrics",
":",
"return",
"# deal with nested calls of eval during train - simply ignore those",
"if",
"self",
".",
"cur_stage",
"is",
"not",
"None",
"and",
"self",
".",
"cur_stage",
"!=",
"stage",
":",
"return",
"# since we don't have a way to return init metrics, we push them into the first of train/val/predict",
"stages",
"=",
"[",
"stage",
"]",
"if",
"not",
"self",
".",
"init_reported",
":",
"stages",
".",
"insert",
"(",
"0",
",",
"\"init\"",
")",
"self",
".",
"init_reported",
"=",
"True",
"for",
"stage",
"in",
"stages",
":",
"for",
"t",
"in",
"[",
"\"alloc\"",
",",
"\"peaked\"",
"]",
":",
"if",
"stage",
"in",
"self",
".",
"cpu",
"and",
"t",
"in",
"self",
".",
"cpu",
"[",
"stage",
"]",
":",
"metrics",
"[",
"f\"{stage}_mem_cpu_{t}_delta\"",
"]",
"=",
"self",
".",
"cpu",
"[",
"stage",
"]",
"[",
"t",
"]",
"if",
"self",
".",
"torch",
"is",
"not",
"None",
"and",
"stage",
"in",
"self",
".",
"gpu",
"and",
"t",
"in",
"self",
".",
"gpu",
"[",
"stage",
"]",
":",
"metrics",
"[",
"f\"{stage}_mem_gpu_{t}_delta\"",
"]",
"=",
"self",
".",
"gpu",
"[",
"stage",
"]",
"[",
"t",
"]"
] | [
410,
4
] | [
430,
78
] | python | en | ['en', 'en', 'en'] | True |
TrainerMemoryTracker.stop_and_update_metrics | (self, metrics=None) | combine stop + update in one call for simpler code | combine stop + update in one call for simpler code | def stop_and_update_metrics(self, metrics=None):
""" combine stop + update in one call for simpler code """
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics) | [
"def",
"stop_and_update_metrics",
"(",
"self",
",",
"metrics",
"=",
"None",
")",
":",
"if",
"self",
".",
"skip_memory_metrics",
":",
"return",
"stage",
"=",
"self",
".",
"derive_stage",
"(",
")",
"self",
".",
"stop",
"(",
"stage",
")",
"# init doesn't have metrics to update so we just save that data for later stages to retrieve",
"if",
"metrics",
"is",
"not",
"None",
":",
"self",
".",
"update_metrics",
"(",
"stage",
",",
"metrics",
")"
] | [
432,
4
] | [
442,
47
] | python | en | ['en', 'en', 'en'] | True |
test_doorsense | (hass) | Test creation of a lock with doorsense and bridge. | Test creation of a lock with doorsense and bridge. | async def test_doorsense(hass):
"""Test creation of a lock with doorsense and bridge."""
lock_one = await _mock_lock_from_fixture(
hass, "get_lock.online_with_doorsense.json"
)
await _create_august_with_devices(hass, [lock_one])
binary_sensor_online_with_doorsense_name = hass.states.get(
"binary_sensor.online_with_doorsense_name_open"
)
assert binary_sensor_online_with_doorsense_name.state == STATE_ON
data = {ATTR_ENTITY_ID: "lock.online_with_doorsense_name"}
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_UNLOCK, data, blocking=True
)
await hass.async_block_till_done()
binary_sensor_online_with_doorsense_name = hass.states.get(
"binary_sensor.online_with_doorsense_name_open"
)
assert binary_sensor_online_with_doorsense_name.state == STATE_ON
assert await hass.services.async_call(
LOCK_DOMAIN, SERVICE_LOCK, data, blocking=True
)
await hass.async_block_till_done()
binary_sensor_online_with_doorsense_name = hass.states.get(
"binary_sensor.online_with_doorsense_name_open"
)
assert binary_sensor_online_with_doorsense_name.state == STATE_OFF | [
"async",
"def",
"test_doorsense",
"(",
"hass",
")",
":",
"lock_one",
"=",
"await",
"_mock_lock_from_fixture",
"(",
"hass",
",",
"\"get_lock.online_with_doorsense.json\"",
")",
"await",
"_create_august_with_devices",
"(",
"hass",
",",
"[",
"lock_one",
"]",
")",
"binary_sensor_online_with_doorsense_name",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"\"binary_sensor.online_with_doorsense_name_open\"",
")",
"assert",
"binary_sensor_online_with_doorsense_name",
".",
"state",
"==",
"STATE_ON",
"data",
"=",
"{",
"ATTR_ENTITY_ID",
":",
"\"lock.online_with_doorsense_name\"",
"}",
"assert",
"await",
"hass",
".",
"services",
".",
"async_call",
"(",
"LOCK_DOMAIN",
",",
"SERVICE_UNLOCK",
",",
"data",
",",
"blocking",
"=",
"True",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"binary_sensor_online_with_doorsense_name",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"\"binary_sensor.online_with_doorsense_name_open\"",
")",
"assert",
"binary_sensor_online_with_doorsense_name",
".",
"state",
"==",
"STATE_ON",
"assert",
"await",
"hass",
".",
"services",
".",
"async_call",
"(",
"LOCK_DOMAIN",
",",
"SERVICE_LOCK",
",",
"data",
",",
"blocking",
"=",
"True",
")",
"await",
"hass",
".",
"async_block_till_done",
"(",
")",
"binary_sensor_online_with_doorsense_name",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"\"binary_sensor.online_with_doorsense_name_open\"",
")",
"assert",
"binary_sensor_online_with_doorsense_name",
".",
"state",
"==",
"STATE_OFF"
] | [
20,
0
] | [
51,
70
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.