Search is not available for this dataset
identifier
stringlengths 1
155
| parameters
stringlengths 2
6.09k
| docstring
stringlengths 11
63.4k
| docstring_summary
stringlengths 0
63.4k
| function
stringlengths 29
99.8k
| function_tokens
sequence | start_point
sequence | end_point
sequence | language
stringclasses 1
value | docstring_language
stringlengths 2
7
| docstring_language_predictions
stringlengths 18
23
| is_langid_reliable
stringclasses 2
values |
---|---|---|---|---|---|---|---|---|---|---|---|
FirmataBinaryDigitalInput.latch_callback | (self, data: list) | Update pin state on callback. | Update pin state on callback. | async def latch_callback(self, data: list) -> None:
"""Update pin state on callback."""
if data[1] != self._firmata_pin:
return
_LOGGER.debug(
"Received latch %d for digital input pin %d on board %s",
data[2],
self._firmata_pin,
self.board.name,
)
new_state = bool(data[2])
if self._negate:
new_state = not new_state
if self._state == new_state:
return
self._state = new_state
self._forward_callback() | [
"async",
"def",
"latch_callback",
"(",
"self",
",",
"data",
":",
"list",
")",
"->",
"None",
":",
"if",
"data",
"[",
"1",
"]",
"!=",
"self",
".",
"_firmata_pin",
":",
"return",
"_LOGGER",
".",
"debug",
"(",
"\"Received latch %d for digital input pin %d on board %s\"",
",",
"data",
"[",
"2",
"]",
",",
"self",
".",
"_firmata_pin",
",",
"self",
".",
"board",
".",
"name",
",",
")",
"new_state",
"=",
"bool",
"(",
"data",
"[",
"2",
"]",
")",
"if",
"self",
".",
"_negate",
":",
"new_state",
"=",
"not",
"new_state",
"if",
"self",
".",
"_state",
"==",
"new_state",
":",
"return",
"self",
".",
"_state",
"=",
"new_state",
"self",
".",
"_forward_callback",
"(",
")"
] | [
184,
4
] | [
200,
32
] | python | en | ['en', 'en', 'en'] | True |
FirmataAnalogInput.__init__ | (
self, board: FirmataBoard, pin: FirmataPinType, pin_mode: str, differential: int
) | Initialize the analog input pin. | Initialize the analog input pin. | def __init__(
self, board: FirmataBoard, pin: FirmataPinType, pin_mode: str, differential: int
):
"""Initialize the analog input pin."""
self._differential = differential
self._forward_callback = None
super().__init__(board, pin, pin_mode) | [
"def",
"__init__",
"(",
"self",
",",
"board",
":",
"FirmataBoard",
",",
"pin",
":",
"FirmataPinType",
",",
"pin_mode",
":",
"str",
",",
"differential",
":",
"int",
")",
":",
"self",
".",
"_differential",
"=",
"differential",
"self",
".",
"_forward_callback",
"=",
"None",
"super",
"(",
")",
".",
"__init__",
"(",
"board",
",",
"pin",
",",
"pin_mode",
")"
] | [
206,
4
] | [
212,
46
] | python | en | ['en', 'en', 'en'] | True |
FirmataAnalogInput.start_pin | (self, forward_callback: Callable[[], None]) | Get initial state and start reporting a pin. | Get initial state and start reporting a pin. | async def start_pin(self, forward_callback: Callable[[], None]) -> None:
"""Get initial state and start reporting a pin."""
_LOGGER.debug(
"Starting reporting updates for analog input pin %s on board %s",
self._pin,
self.board.name,
)
self._forward_callback = forward_callback
api = self.board.api
# Only PIN_MODE_ANALOG_INPUT mode is supported as sensor input
await api.set_pin_mode_analog_input(
self._analog_pin, self.latch_callback, self._differential
)
self._state = (await self.board.api.analog_read(self._analog_pin))[0]
self._forward_callback() | [
"async",
"def",
"start_pin",
"(",
"self",
",",
"forward_callback",
":",
"Callable",
"[",
"[",
"]",
",",
"None",
"]",
")",
"->",
"None",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Starting reporting updates for analog input pin %s on board %s\"",
",",
"self",
".",
"_pin",
",",
"self",
".",
"board",
".",
"name",
",",
")",
"self",
".",
"_forward_callback",
"=",
"forward_callback",
"api",
"=",
"self",
".",
"board",
".",
"api",
"# Only PIN_MODE_ANALOG_INPUT mode is supported as sensor input",
"await",
"api",
".",
"set_pin_mode_analog_input",
"(",
"self",
".",
"_analog_pin",
",",
"self",
".",
"latch_callback",
",",
"self",
".",
"_differential",
")",
"self",
".",
"_state",
"=",
"(",
"await",
"self",
".",
"board",
".",
"api",
".",
"analog_read",
"(",
"self",
".",
"_analog_pin",
")",
")",
"[",
"0",
"]",
"self",
".",
"_forward_callback",
"(",
")"
] | [
214,
4
] | [
230,
32
] | python | en | ['en', 'en', 'en'] | True |
FirmataAnalogInput.stop_pin | (self) | Stop reporting analog input pin. | Stop reporting analog input pin. | async def stop_pin(self) -> None:
"""Stop reporting analog input pin."""
_LOGGER.debug(
"Stopping reporting updates for analog input pin %s on board %s",
self._pin,
self.board.name,
)
api = self.board.api
await api.disable_analog_reporting(self._analog_pin) | [
"async",
"def",
"stop_pin",
"(",
"self",
")",
"->",
"None",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Stopping reporting updates for analog input pin %s on board %s\"",
",",
"self",
".",
"_pin",
",",
"self",
".",
"board",
".",
"name",
",",
")",
"api",
"=",
"self",
".",
"board",
".",
"api",
"await",
"api",
".",
"disable_analog_reporting",
"(",
"self",
".",
"_analog_pin",
")"
] | [
232,
4
] | [
240,
60
] | python | da | ['da', 'jv', 'en'] | False |
FirmataAnalogInput.state | (self) | Return sensor state. | Return sensor state. | def state(self) -> int:
"""Return sensor state."""
return self._state | [
"def",
"state",
"(",
"self",
")",
"->",
"int",
":",
"return",
"self",
".",
"_state"
] | [
243,
4
] | [
245,
26
] | python | en | ['en', 'bs', 'en'] | True |
FirmataAnalogInput.latch_callback | (self, data: list) | Update pin state on callback. | Update pin state on callback. | async def latch_callback(self, data: list) -> None:
"""Update pin state on callback."""
if data[1] != self._analog_pin:
return
_LOGGER.debug(
"Received latch %d for analog input pin %s on board %s",
data[2],
self._pin,
self.board.name,
)
new_state = data[2]
if self._state == new_state:
_LOGGER.debug("stopping")
return
self._state = new_state
self._forward_callback() | [
"async",
"def",
"latch_callback",
"(",
"self",
",",
"data",
":",
"list",
")",
"->",
"None",
":",
"if",
"data",
"[",
"1",
"]",
"!=",
"self",
".",
"_analog_pin",
":",
"return",
"_LOGGER",
".",
"debug",
"(",
"\"Received latch %d for analog input pin %s on board %s\"",
",",
"data",
"[",
"2",
"]",
",",
"self",
".",
"_pin",
",",
"self",
".",
"board",
".",
"name",
",",
")",
"new_state",
"=",
"data",
"[",
"2",
"]",
"if",
"self",
".",
"_state",
"==",
"new_state",
":",
"_LOGGER",
".",
"debug",
"(",
"\"stopping\"",
")",
"return",
"self",
".",
"_state",
"=",
"new_state",
"self",
".",
"_forward_callback",
"(",
")"
] | [
247,
4
] | [
262,
32
] | python | en | ['en', 'en', 'en'] | True |
_async_has_devices | (hass) | Return if there are devices that can be discovered. | Return if there are devices that can be discovered. | async def _async_has_devices(hass):
"""Return if there are devices that can be discovered."""
aehw4a1_ip_addresses = await AehW4a1().discovery()
return len(aehw4a1_ip_addresses) > 0 | [
"async",
"def",
"_async_has_devices",
"(",
"hass",
")",
":",
"aehw4a1_ip_addresses",
"=",
"await",
"AehW4a1",
"(",
")",
".",
"discovery",
"(",
")",
"return",
"len",
"(",
"aehw4a1_ip_addresses",
")",
">",
"0"
] | [
9,
0
] | [
12,
40
] | python | en | ['en', 'en', 'en'] | True |
setup | (hass, config) | Set up the Graphite feeder. | Set up the Graphite feeder. | def setup(hass, config):
"""Set up the Graphite feeder."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
prefix = conf.get(CONF_PREFIX)
port = conf.get(CONF_PORT)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
sock.shutdown(2)
_LOGGER.debug("Connection to Graphite possible")
except OSError:
_LOGGER.error("Not able to connect to Graphite")
return False
GraphiteFeeder(hass, host, port, prefix)
return True | [
"def",
"setup",
"(",
"hass",
",",
"config",
")",
":",
"conf",
"=",
"config",
"[",
"DOMAIN",
"]",
"host",
"=",
"conf",
".",
"get",
"(",
"CONF_HOST",
")",
"prefix",
"=",
"conf",
".",
"get",
"(",
"CONF_PREFIX",
")",
"port",
"=",
"conf",
".",
"get",
"(",
"CONF_PORT",
")",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"try",
":",
"sock",
".",
"connect",
"(",
"(",
"host",
",",
"port",
")",
")",
"sock",
".",
"shutdown",
"(",
"2",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Connection to Graphite possible\"",
")",
"except",
"OSError",
":",
"_LOGGER",
".",
"error",
"(",
"\"Not able to connect to Graphite\"",
")",
"return",
"False",
"GraphiteFeeder",
"(",
"hass",
",",
"host",
",",
"port",
",",
"prefix",
")",
"return",
"True"
] | [
41,
0
] | [
58,
15
] | python | en | ['en', 'pt', 'en'] | True |
GraphiteFeeder.__init__ | (self, hass, host, port, prefix) | Initialize the feeder. | Initialize the feeder. | def __init__(self, hass, host, port, prefix):
"""Initialize the feeder."""
super().__init__(daemon=True)
self._hass = hass
self._host = host
self._port = port
# rstrip any trailing dots in case they think they need it
self._prefix = prefix.rstrip(".")
self._queue = queue.Queue()
self._quit_object = object()
self._we_started = False
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, self.start_listen)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self.shutdown)
hass.bus.listen(EVENT_STATE_CHANGED, self.event_listener)
_LOGGER.debug("Graphite feeding to %s:%i initialized", self._host, self._port) | [
"def",
"__init__",
"(",
"self",
",",
"hass",
",",
"host",
",",
"port",
",",
"prefix",
")",
":",
"super",
"(",
")",
".",
"__init__",
"(",
"daemon",
"=",
"True",
")",
"self",
".",
"_hass",
"=",
"hass",
"self",
".",
"_host",
"=",
"host",
"self",
".",
"_port",
"=",
"port",
"# rstrip any trailing dots in case they think they need it",
"self",
".",
"_prefix",
"=",
"prefix",
".",
"rstrip",
"(",
"\".\"",
")",
"self",
".",
"_queue",
"=",
"queue",
".",
"Queue",
"(",
")",
"self",
".",
"_quit_object",
"=",
"object",
"(",
")",
"self",
".",
"_we_started",
"=",
"False",
"hass",
".",
"bus",
".",
"listen_once",
"(",
"EVENT_HOMEASSISTANT_START",
",",
"self",
".",
"start_listen",
")",
"hass",
".",
"bus",
".",
"listen_once",
"(",
"EVENT_HOMEASSISTANT_STOP",
",",
"self",
".",
"shutdown",
")",
"hass",
".",
"bus",
".",
"listen",
"(",
"EVENT_STATE_CHANGED",
",",
"self",
".",
"event_listener",
")",
"_LOGGER",
".",
"debug",
"(",
"\"Graphite feeding to %s:%i initialized\"",
",",
"self",
".",
"_host",
",",
"self",
".",
"_port",
")"
] | [
64,
4
] | [
79,
86
] | python | en | ['en', 'en', 'en'] | True |
GraphiteFeeder.start_listen | (self, event) | Start event-processing thread. | Start event-processing thread. | def start_listen(self, event):
"""Start event-processing thread."""
_LOGGER.debug("Event processing thread started")
self._we_started = True
self.start() | [
"def",
"start_listen",
"(",
"self",
",",
"event",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Event processing thread started\"",
")",
"self",
".",
"_we_started",
"=",
"True",
"self",
".",
"start",
"(",
")"
] | [
81,
4
] | [
85,
20
] | python | en | ['en', 'en', 'en'] | True |
GraphiteFeeder.shutdown | (self, event) | Signal shutdown of processing event. | Signal shutdown of processing event. | def shutdown(self, event):
"""Signal shutdown of processing event."""
_LOGGER.debug("Event processing signaled exit")
self._queue.put(self._quit_object) | [
"def",
"shutdown",
"(",
"self",
",",
"event",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Event processing signaled exit\"",
")",
"self",
".",
"_queue",
".",
"put",
"(",
"self",
".",
"_quit_object",
")"
] | [
87,
4
] | [
90,
42
] | python | en | ['en', 'en', 'en'] | True |
GraphiteFeeder.event_listener | (self, event) | Queue an event for processing. | Queue an event for processing. | def event_listener(self, event):
"""Queue an event for processing."""
if self.is_alive() or not self._we_started:
_LOGGER.debug("Received event")
self._queue.put(event)
else:
_LOGGER.error("Graphite feeder thread has died, not queuing event") | [
"def",
"event_listener",
"(",
"self",
",",
"event",
")",
":",
"if",
"self",
".",
"is_alive",
"(",
")",
"or",
"not",
"self",
".",
"_we_started",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Received event\"",
")",
"self",
".",
"_queue",
".",
"put",
"(",
"event",
")",
"else",
":",
"_LOGGER",
".",
"error",
"(",
"\"Graphite feeder thread has died, not queuing event\"",
")"
] | [
92,
4
] | [
98,
79
] | python | en | ['en', 'en', 'en'] | True |
GraphiteFeeder._send_to_graphite | (self, data) | Send data to Graphite. | Send data to Graphite. | def _send_to_graphite(self, data):
"""Send data to Graphite."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(10)
sock.connect((self._host, self._port))
sock.sendall(data.encode("ascii"))
sock.send(b"\n")
sock.close() | [
"def",
"_send_to_graphite",
"(",
"self",
",",
"data",
")",
":",
"sock",
"=",
"socket",
".",
"socket",
"(",
"socket",
".",
"AF_INET",
",",
"socket",
".",
"SOCK_STREAM",
")",
"sock",
".",
"settimeout",
"(",
"10",
")",
"sock",
".",
"connect",
"(",
"(",
"self",
".",
"_host",
",",
"self",
".",
"_port",
")",
")",
"sock",
".",
"sendall",
"(",
"data",
".",
"encode",
"(",
"\"ascii\"",
")",
")",
"sock",
".",
"send",
"(",
"b\"\\n\"",
")",
"sock",
".",
"close",
"(",
")"
] | [
100,
4
] | [
107,
20
] | python | en | ['en', 'it', 'en'] | True |
GraphiteFeeder._report_attributes | (self, entity_id, new_state) | Report the attributes. | Report the attributes. | def _report_attributes(self, entity_id, new_state):
"""Report the attributes."""
now = time.time()
things = dict(new_state.attributes)
try:
things["state"] = state.state_as_number(new_state)
except ValueError:
pass
lines = [
"%s.%s.%s %f %i"
% (self._prefix, entity_id, key.replace(" ", "_"), value, now)
for key, value in things.items()
if isinstance(value, (float, int))
]
if not lines:
return
_LOGGER.debug("Sending to graphite: %s", lines)
try:
self._send_to_graphite("\n".join(lines))
except socket.gaierror:
_LOGGER.error("Unable to connect to host %s", self._host)
except OSError:
_LOGGER.exception("Failed to send data to graphite") | [
"def",
"_report_attributes",
"(",
"self",
",",
"entity_id",
",",
"new_state",
")",
":",
"now",
"=",
"time",
".",
"time",
"(",
")",
"things",
"=",
"dict",
"(",
"new_state",
".",
"attributes",
")",
"try",
":",
"things",
"[",
"\"state\"",
"]",
"=",
"state",
".",
"state_as_number",
"(",
"new_state",
")",
"except",
"ValueError",
":",
"pass",
"lines",
"=",
"[",
"\"%s.%s.%s %f %i\"",
"%",
"(",
"self",
".",
"_prefix",
",",
"entity_id",
",",
"key",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
",",
"value",
",",
"now",
")",
"for",
"key",
",",
"value",
"in",
"things",
".",
"items",
"(",
")",
"if",
"isinstance",
"(",
"value",
",",
"(",
"float",
",",
"int",
")",
")",
"]",
"if",
"not",
"lines",
":",
"return",
"_LOGGER",
".",
"debug",
"(",
"\"Sending to graphite: %s\"",
",",
"lines",
")",
"try",
":",
"self",
".",
"_send_to_graphite",
"(",
"\"\\n\"",
".",
"join",
"(",
"lines",
")",
")",
"except",
"socket",
".",
"gaierror",
":",
"_LOGGER",
".",
"error",
"(",
"\"Unable to connect to host %s\"",
",",
"self",
".",
"_host",
")",
"except",
"OSError",
":",
"_LOGGER",
".",
"exception",
"(",
"\"Failed to send data to graphite\"",
")"
] | [
109,
4
] | [
131,
64
] | python | en | ['en', 'en', 'en'] | True |
GraphiteFeeder.run | (self) | Run the process to export the data. | Run the process to export the data. | def run(self):
"""Run the process to export the data."""
while True:
event = self._queue.get()
if event == self._quit_object:
_LOGGER.debug("Event processing thread stopped")
self._queue.task_done()
return
if event.event_type == EVENT_STATE_CHANGED:
if not event.data.get("new_state"):
_LOGGER.debug(
"Skipping %s without new_state for %s",
event.event_type,
event.data["entity_id"],
)
self._queue.task_done()
continue
_LOGGER.debug(
"Processing STATE_CHANGED event for %s", event.data["entity_id"]
)
try:
self._report_attributes(
event.data["entity_id"], event.data["new_state"]
)
except Exception: # pylint: disable=broad-except
# Catch this so we can avoid the thread dying and
# make it visible.
_LOGGER.exception("Failed to process STATE_CHANGED event")
else:
_LOGGER.warning("Processing unexpected event type %s", event.event_type)
self._queue.task_done() | [
"def",
"run",
"(",
"self",
")",
":",
"while",
"True",
":",
"event",
"=",
"self",
".",
"_queue",
".",
"get",
"(",
")",
"if",
"event",
"==",
"self",
".",
"_quit_object",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Event processing thread stopped\"",
")",
"self",
".",
"_queue",
".",
"task_done",
"(",
")",
"return",
"if",
"event",
".",
"event_type",
"==",
"EVENT_STATE_CHANGED",
":",
"if",
"not",
"event",
".",
"data",
".",
"get",
"(",
"\"new_state\"",
")",
":",
"_LOGGER",
".",
"debug",
"(",
"\"Skipping %s without new_state for %s\"",
",",
"event",
".",
"event_type",
",",
"event",
".",
"data",
"[",
"\"entity_id\"",
"]",
",",
")",
"self",
".",
"_queue",
".",
"task_done",
"(",
")",
"continue",
"_LOGGER",
".",
"debug",
"(",
"\"Processing STATE_CHANGED event for %s\"",
",",
"event",
".",
"data",
"[",
"\"entity_id\"",
"]",
")",
"try",
":",
"self",
".",
"_report_attributes",
"(",
"event",
".",
"data",
"[",
"\"entity_id\"",
"]",
",",
"event",
".",
"data",
"[",
"\"new_state\"",
"]",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"# Catch this so we can avoid the thread dying and",
"# make it visible.",
"_LOGGER",
".",
"exception",
"(",
"\"Failed to process STATE_CHANGED event\"",
")",
"else",
":",
"_LOGGER",
".",
"warning",
"(",
"\"Processing unexpected event type %s\"",
",",
"event",
".",
"event_type",
")",
"self",
".",
"_queue",
".",
"task_done",
"(",
")"
] | [
133,
4
] | [
165,
35
] | python | en | ['en', 'en', 'en'] | True |
async_setup_platform | (hass, config, async_add_entities, discovery_info=None) | Set up the Bravia TV platform. | Set up the Bravia TV platform. | async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Bravia TV platform."""
host = config[CONF_HOST]
bravia_config_file_path = hass.config.path(BRAVIA_CONFIG_FILE)
bravia_config = await hass.async_add_executor_job(
load_json, bravia_config_file_path
)
if not bravia_config:
_LOGGER.error(
"Configuration import failed, there is no bravia.conf file in the configuration folder"
)
return
while bravia_config:
# Import a configured TV
host_ip, host_config = bravia_config.popitem()
if host_ip == host:
pin = host_config[CONF_PIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_HOST: host, CONF_PIN: pin},
)
)
return | [
"async",
"def",
"async_setup_platform",
"(",
"hass",
",",
"config",
",",
"async_add_entities",
",",
"discovery_info",
"=",
"None",
")",
":",
"host",
"=",
"config",
"[",
"CONF_HOST",
"]",
"bravia_config_file_path",
"=",
"hass",
".",
"config",
".",
"path",
"(",
"BRAVIA_CONFIG_FILE",
")",
"bravia_config",
"=",
"await",
"hass",
".",
"async_add_executor_job",
"(",
"load_json",
",",
"bravia_config_file_path",
")",
"if",
"not",
"bravia_config",
":",
"_LOGGER",
".",
"error",
"(",
"\"Configuration import failed, there is no bravia.conf file in the configuration folder\"",
")",
"return",
"while",
"bravia_config",
":",
"# Import a configured TV",
"host_ip",
",",
"host_config",
"=",
"bravia_config",
".",
"popitem",
"(",
")",
"if",
"host_ip",
"==",
"host",
":",
"pin",
"=",
"host_config",
"[",
"CONF_PIN",
"]",
"hass",
".",
"async_create_task",
"(",
"hass",
".",
"config_entries",
".",
"flow",
".",
"async_init",
"(",
"DOMAIN",
",",
"context",
"=",
"{",
"\"source\"",
":",
"SOURCE_IMPORT",
"}",
",",
"data",
"=",
"{",
"CONF_HOST",
":",
"host",
",",
"CONF_PIN",
":",
"pin",
"}",
",",
")",
")",
"return"
] | [
65,
0
] | [
92,
18
] | python | en | ['en', 'lv', 'en'] | True |
async_setup_entry | (hass, config_entry, async_add_entities) | Add BraviaTV entities from a config_entry. | Add BraviaTV entities from a config_entry. | async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add BraviaTV entities from a config_entry."""
ignored_sources = []
pin = config_entry.data[CONF_PIN]
unique_id = config_entry.unique_id
device_info = {
"identifiers": {(DOMAIN, unique_id)},
"name": DEFAULT_NAME,
"manufacturer": ATTR_MANUFACTURER,
"model": config_entry.title,
}
braviarc = hass.data[DOMAIN][config_entry.entry_id][BRAVIARC]
ignored_sources = config_entry.options.get(CONF_IGNORED_SOURCES, [])
async_add_entities(
[
BraviaTVDevice(
braviarc, DEFAULT_NAME, pin, unique_id, device_info, ignored_sources
)
]
) | [
"async",
"def",
"async_setup_entry",
"(",
"hass",
",",
"config_entry",
",",
"async_add_entities",
")",
":",
"ignored_sources",
"=",
"[",
"]",
"pin",
"=",
"config_entry",
".",
"data",
"[",
"CONF_PIN",
"]",
"unique_id",
"=",
"config_entry",
".",
"unique_id",
"device_info",
"=",
"{",
"\"identifiers\"",
":",
"{",
"(",
"DOMAIN",
",",
"unique_id",
")",
"}",
",",
"\"name\"",
":",
"DEFAULT_NAME",
",",
"\"manufacturer\"",
":",
"ATTR_MANUFACTURER",
",",
"\"model\"",
":",
"config_entry",
".",
"title",
",",
"}",
"braviarc",
"=",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"[",
"config_entry",
".",
"entry_id",
"]",
"[",
"BRAVIARC",
"]",
"ignored_sources",
"=",
"config_entry",
".",
"options",
".",
"get",
"(",
"CONF_IGNORED_SOURCES",
",",
"[",
"]",
")",
"async_add_entities",
"(",
"[",
"BraviaTVDevice",
"(",
"braviarc",
",",
"DEFAULT_NAME",
",",
"pin",
",",
"unique_id",
",",
"device_info",
",",
"ignored_sources",
")",
"]",
")"
] | [
95,
0
] | [
117,
5
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.__init__ | (self, client, name, pin, unique_id, device_info, ignored_sources) | Initialize the Bravia TV device. | Initialize the Bravia TV device. | def __init__(self, client, name, pin, unique_id, device_info, ignored_sources):
"""Initialize the Bravia TV device."""
self._pin = pin
self._braviarc = client
self._name = name
self._state = STATE_OFF
self._muted = False
self._program_name = None
self._channel_name = None
self._channel_number = None
self._source = None
self._source_list = []
self._original_content_list = []
self._content_mapping = {}
self._duration = None
self._content_uri = None
self._playing = False
self._start_date_time = None
self._program_media_type = None
self._min_volume = None
self._max_volume = None
self._volume = None
self._unique_id = unique_id
self._device_info = device_info
self._ignored_sources = ignored_sources
self._state_lock = asyncio.Lock() | [
"def",
"__init__",
"(",
"self",
",",
"client",
",",
"name",
",",
"pin",
",",
"unique_id",
",",
"device_info",
",",
"ignored_sources",
")",
":",
"self",
".",
"_pin",
"=",
"pin",
"self",
".",
"_braviarc",
"=",
"client",
"self",
".",
"_name",
"=",
"name",
"self",
".",
"_state",
"=",
"STATE_OFF",
"self",
".",
"_muted",
"=",
"False",
"self",
".",
"_program_name",
"=",
"None",
"self",
".",
"_channel_name",
"=",
"None",
"self",
".",
"_channel_number",
"=",
"None",
"self",
".",
"_source",
"=",
"None",
"self",
".",
"_source_list",
"=",
"[",
"]",
"self",
".",
"_original_content_list",
"=",
"[",
"]",
"self",
".",
"_content_mapping",
"=",
"{",
"}",
"self",
".",
"_duration",
"=",
"None",
"self",
".",
"_content_uri",
"=",
"None",
"self",
".",
"_playing",
"=",
"False",
"self",
".",
"_start_date_time",
"=",
"None",
"self",
".",
"_program_media_type",
"=",
"None",
"self",
".",
"_min_volume",
"=",
"None",
"self",
".",
"_max_volume",
"=",
"None",
"self",
".",
"_volume",
"=",
"None",
"self",
".",
"_unique_id",
"=",
"unique_id",
"self",
".",
"_device_info",
"=",
"device_info",
"self",
".",
"_ignored_sources",
"=",
"ignored_sources",
"self",
".",
"_state_lock",
"=",
"asyncio",
".",
"Lock",
"(",
")"
] | [
123,
4
] | [
149,
41
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.async_update | (self) | Update TV info. | Update TV info. | async def async_update(self):
"""Update TV info."""
if self._state_lock.locked():
return
power_status = await self.hass.async_add_executor_job(
self._braviarc.get_power_status
)
if power_status != "off":
connected = await self.hass.async_add_executor_job(
self._braviarc.is_connected
)
if not connected:
try:
connected = await self.hass.async_add_executor_job(
self._braviarc.connect, self._pin, CLIENTID_PREFIX, NICKNAME
)
except NoIPControl:
_LOGGER.error("IP Control is disabled in the TV settings")
if not connected:
power_status = "off"
if power_status == "active":
self._state = STATE_ON
if (
await self._async_refresh_volume()
and await self._async_refresh_channels()
):
await self._async_refresh_playing_info()
return
self._state = STATE_OFF | [
"async",
"def",
"async_update",
"(",
"self",
")",
":",
"if",
"self",
".",
"_state_lock",
".",
"locked",
"(",
")",
":",
"return",
"power_status",
"=",
"await",
"self",
".",
"hass",
".",
"async_add_executor_job",
"(",
"self",
".",
"_braviarc",
".",
"get_power_status",
")",
"if",
"power_status",
"!=",
"\"off\"",
":",
"connected",
"=",
"await",
"self",
".",
"hass",
".",
"async_add_executor_job",
"(",
"self",
".",
"_braviarc",
".",
"is_connected",
")",
"if",
"not",
"connected",
":",
"try",
":",
"connected",
"=",
"await",
"self",
".",
"hass",
".",
"async_add_executor_job",
"(",
"self",
".",
"_braviarc",
".",
"connect",
",",
"self",
".",
"_pin",
",",
"CLIENTID_PREFIX",
",",
"NICKNAME",
")",
"except",
"NoIPControl",
":",
"_LOGGER",
".",
"error",
"(",
"\"IP Control is disabled in the TV settings\"",
")",
"if",
"not",
"connected",
":",
"power_status",
"=",
"\"off\"",
"if",
"power_status",
"==",
"\"active\"",
":",
"self",
".",
"_state",
"=",
"STATE_ON",
"if",
"(",
"await",
"self",
".",
"_async_refresh_volume",
"(",
")",
"and",
"await",
"self",
".",
"_async_refresh_channels",
"(",
")",
")",
":",
"await",
"self",
".",
"_async_refresh_playing_info",
"(",
")",
"return",
"self",
".",
"_state",
"=",
"STATE_OFF"
] | [
151,
4
] | [
182,
31
] | python | co | ['en', 'co', 'it'] | False |
BraviaTVDevice._get_source | (self) | Return the name of the source. | Return the name of the source. | def _get_source(self):
"""Return the name of the source."""
for key, value in self._content_mapping.items():
if value == self._content_uri:
return key | [
"def",
"_get_source",
"(",
"self",
")",
":",
"for",
"key",
",",
"value",
"in",
"self",
".",
"_content_mapping",
".",
"items",
"(",
")",
":",
"if",
"value",
"==",
"self",
".",
"_content_uri",
":",
"return",
"key"
] | [
184,
4
] | [
188,
26
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice._async_refresh_volume | (self) | Refresh volume information. | Refresh volume information. | async def _async_refresh_volume(self):
"""Refresh volume information."""
volume_info = await self.hass.async_add_executor_job(
self._braviarc.get_volume_info
)
if volume_info is not None:
self._volume = volume_info.get("volume")
self._min_volume = volume_info.get("minVolume")
self._max_volume = volume_info.get("maxVolume")
self._muted = volume_info.get("mute")
return True
return False | [
"async",
"def",
"_async_refresh_volume",
"(",
"self",
")",
":",
"volume_info",
"=",
"await",
"self",
".",
"hass",
".",
"async_add_executor_job",
"(",
"self",
".",
"_braviarc",
".",
"get_volume_info",
")",
"if",
"volume_info",
"is",
"not",
"None",
":",
"self",
".",
"_volume",
"=",
"volume_info",
".",
"get",
"(",
"\"volume\"",
")",
"self",
".",
"_min_volume",
"=",
"volume_info",
".",
"get",
"(",
"\"minVolume\"",
")",
"self",
".",
"_max_volume",
"=",
"volume_info",
".",
"get",
"(",
"\"maxVolume\"",
")",
"self",
".",
"_muted",
"=",
"volume_info",
".",
"get",
"(",
"\"mute\"",
")",
"return",
"True",
"return",
"False"
] | [
190,
4
] | [
201,
20
] | python | de | ['nl', 'de', 'en'] | False |
BraviaTVDevice._async_refresh_channels | (self) | Refresh source and channels list. | Refresh source and channels list. | async def _async_refresh_channels(self):
"""Refresh source and channels list."""
if not self._source_list:
self._content_mapping = await self.hass.async_add_executor_job(
self._braviarc.load_source_list
)
self._source_list = []
if not self._content_mapping:
return False
for key in self._content_mapping:
if key not in self._ignored_sources:
self._source_list.append(key)
return True | [
"async",
"def",
"_async_refresh_channels",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"_source_list",
":",
"self",
".",
"_content_mapping",
"=",
"await",
"self",
".",
"hass",
".",
"async_add_executor_job",
"(",
"self",
".",
"_braviarc",
".",
"load_source_list",
")",
"self",
".",
"_source_list",
"=",
"[",
"]",
"if",
"not",
"self",
".",
"_content_mapping",
":",
"return",
"False",
"for",
"key",
"in",
"self",
".",
"_content_mapping",
":",
"if",
"key",
"not",
"in",
"self",
".",
"_ignored_sources",
":",
"self",
".",
"_source_list",
".",
"append",
"(",
"key",
")",
"return",
"True"
] | [
203,
4
] | [
215,
19
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice._async_refresh_playing_info | (self) | Refresh Playing information. | Refresh Playing information. | async def _async_refresh_playing_info(self):
"""Refresh Playing information."""
playing_info = await self.hass.async_add_executor_job(
self._braviarc.get_playing_info
)
self._program_name = playing_info.get("programTitle")
self._channel_name = playing_info.get("title")
self._program_media_type = playing_info.get("programMediaType")
self._channel_number = playing_info.get("dispNum")
self._content_uri = playing_info.get("uri")
self._source = self._get_source()
self._duration = playing_info.get("durationSec")
self._start_date_time = playing_info.get("startDateTime")
if not playing_info:
self._channel_name = "App" | [
"async",
"def",
"_async_refresh_playing_info",
"(",
"self",
")",
":",
"playing_info",
"=",
"await",
"self",
".",
"hass",
".",
"async_add_executor_job",
"(",
"self",
".",
"_braviarc",
".",
"get_playing_info",
")",
"self",
".",
"_program_name",
"=",
"playing_info",
".",
"get",
"(",
"\"programTitle\"",
")",
"self",
".",
"_channel_name",
"=",
"playing_info",
".",
"get",
"(",
"\"title\"",
")",
"self",
".",
"_program_media_type",
"=",
"playing_info",
".",
"get",
"(",
"\"programMediaType\"",
")",
"self",
".",
"_channel_number",
"=",
"playing_info",
".",
"get",
"(",
"\"dispNum\"",
")",
"self",
".",
"_content_uri",
"=",
"playing_info",
".",
"get",
"(",
"\"uri\"",
")",
"self",
".",
"_source",
"=",
"self",
".",
"_get_source",
"(",
")",
"self",
".",
"_duration",
"=",
"playing_info",
".",
"get",
"(",
"\"durationSec\"",
")",
"self",
".",
"_start_date_time",
"=",
"playing_info",
".",
"get",
"(",
"\"startDateTime\"",
")",
"if",
"not",
"playing_info",
":",
"self",
".",
"_channel_name",
"=",
"\"App\""
] | [
217,
4
] | [
231,
38
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.name | (self) | Return the name of the device. | Return the name of the device. | def name(self):
"""Return the name of the device."""
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] | [
234,
4
] | [
236,
25
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.device_class | (self) | Set the device class to TV. | Set the device class to TV. | def device_class(self):
"""Set the device class to TV."""
return DEVICE_CLASS_TV | [
"def",
"device_class",
"(",
"self",
")",
":",
"return",
"DEVICE_CLASS_TV"
] | [
239,
4
] | [
241,
30
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.unique_id | (self) | Return a unique_id for this entity. | Return a unique_id for this entity. | def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id | [
"def",
"unique_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"_unique_id"
] | [
244,
4
] | [
246,
30
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.device_info | (self) | Return the device info. | Return the device info. | def device_info(self):
"""Return the device info."""
return self._device_info | [
"def",
"device_info",
"(",
"self",
")",
":",
"return",
"self",
".",
"_device_info"
] | [
249,
4
] | [
251,
32
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.state | (self) | Return the state of the device. | Return the state of the device. | def state(self):
"""Return the state of the device."""
return self._state | [
"def",
"state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_state"
] | [
254,
4
] | [
256,
26
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.source | (self) | Return the current input source. | Return the current input source. | def source(self):
"""Return the current input source."""
return self._source | [
"def",
"source",
"(",
"self",
")",
":",
"return",
"self",
".",
"_source"
] | [
259,
4
] | [
261,
27
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.source_list | (self) | List of available input sources. | List of available input sources. | def source_list(self):
"""List of available input sources."""
return self._source_list | [
"def",
"source_list",
"(",
"self",
")",
":",
"return",
"self",
".",
"_source_list"
] | [
264,
4
] | [
266,
32
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.volume_level | (self) | Volume level of the media player (0..1). | Volume level of the media player (0..1). | def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._volume is not None:
return self._volume / 100
return None | [
"def",
"volume_level",
"(",
"self",
")",
":",
"if",
"self",
".",
"_volume",
"is",
"not",
"None",
":",
"return",
"self",
".",
"_volume",
"/",
"100",
"return",
"None"
] | [
269,
4
] | [
273,
19
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.is_volume_muted | (self) | Boolean if volume is currently muted. | Boolean if volume is currently muted. | def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted | [
"def",
"is_volume_muted",
"(",
"self",
")",
":",
"return",
"self",
".",
"_muted"
] | [
276,
4
] | [
278,
26
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.supported_features | (self) | Flag media player features that are supported. | Flag media player features that are supported. | def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_BRAVIA | [
"def",
"supported_features",
"(",
"self",
")",
":",
"return",
"SUPPORT_BRAVIA"
] | [
281,
4
] | [
283,
29
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.media_title | (self) | Title of current playing media. | Title of current playing media. | def media_title(self):
"""Title of current playing media."""
return_value = None
if self._channel_name is not None:
return_value = self._channel_name
if self._program_name is not None:
return_value = f"{return_value}: {self._program_name}"
return return_value | [
"def",
"media_title",
"(",
"self",
")",
":",
"return_value",
"=",
"None",
"if",
"self",
".",
"_channel_name",
"is",
"not",
"None",
":",
"return_value",
"=",
"self",
".",
"_channel_name",
"if",
"self",
".",
"_program_name",
"is",
"not",
"None",
":",
"return_value",
"=",
"f\"{return_value}: {self._program_name}\"",
"return",
"return_value"
] | [
286,
4
] | [
293,
27
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.media_content_id | (self) | Content ID of current playing media. | Content ID of current playing media. | def media_content_id(self):
"""Content ID of current playing media."""
return self._channel_name | [
"def",
"media_content_id",
"(",
"self",
")",
":",
"return",
"self",
".",
"_channel_name"
] | [
296,
4
] | [
298,
33
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.media_duration | (self) | Duration of current playing media in seconds. | Duration of current playing media in seconds. | def media_duration(self):
"""Duration of current playing media in seconds."""
return self._duration | [
"def",
"media_duration",
"(",
"self",
")",
":",
"return",
"self",
".",
"_duration"
] | [
301,
4
] | [
303,
29
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.set_volume_level | (self, volume) | Set volume level, range 0..1. | Set volume level, range 0..1. | def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._braviarc.set_volume_level(volume) | [
"def",
"set_volume_level",
"(",
"self",
",",
"volume",
")",
":",
"self",
".",
"_braviarc",
".",
"set_volume_level",
"(",
"volume",
")"
] | [
305,
4
] | [
307,
47
] | python | en | ['fr', 'zu', 'en'] | False |
BraviaTVDevice.async_turn_on | (self) | Turn the media player on. | Turn the media player on. | async def async_turn_on(self):
"""Turn the media player on."""
async with self._state_lock:
await self.hass.async_add_executor_job(self._braviarc.turn_on) | [
"async",
"def",
"async_turn_on",
"(",
"self",
")",
":",
"async",
"with",
"self",
".",
"_state_lock",
":",
"await",
"self",
".",
"hass",
".",
"async_add_executor_job",
"(",
"self",
".",
"_braviarc",
".",
"turn_on",
")"
] | [
309,
4
] | [
312,
74
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.async_turn_off | (self) | Turn off media player. | Turn off media player. | async def async_turn_off(self):
"""Turn off media player."""
async with self._state_lock:
await self.hass.async_add_executor_job(self._braviarc.turn_off) | [
"async",
"def",
"async_turn_off",
"(",
"self",
")",
":",
"async",
"with",
"self",
".",
"_state_lock",
":",
"await",
"self",
".",
"hass",
".",
"async_add_executor_job",
"(",
"self",
".",
"_braviarc",
".",
"turn_off",
")"
] | [
314,
4
] | [
317,
75
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.volume_up | (self) | Volume up the media player. | Volume up the media player. | def volume_up(self):
"""Volume up the media player."""
self._braviarc.volume_up() | [
"def",
"volume_up",
"(",
"self",
")",
":",
"self",
".",
"_braviarc",
".",
"volume_up",
"(",
")"
] | [
319,
4
] | [
321,
34
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.volume_down | (self) | Volume down media player. | Volume down media player. | def volume_down(self):
"""Volume down media player."""
self._braviarc.volume_down() | [
"def",
"volume_down",
"(",
"self",
")",
":",
"self",
".",
"_braviarc",
".",
"volume_down",
"(",
")"
] | [
323,
4
] | [
325,
36
] | python | en | ['en', 'sl', 'en'] | True |
BraviaTVDevice.mute_volume | (self, mute) | Send mute command. | Send mute command. | def mute_volume(self, mute):
"""Send mute command."""
self._braviarc.mute_volume(mute) | [
"def",
"mute_volume",
"(",
"self",
",",
"mute",
")",
":",
"self",
".",
"_braviarc",
".",
"mute_volume",
"(",
"mute",
")"
] | [
327,
4
] | [
329,
40
] | python | en | ['en', 'co', 'en'] | True |
BraviaTVDevice.select_source | (self, source) | Set the input source. | Set the input source. | def select_source(self, source):
"""Set the input source."""
if source in self._content_mapping:
uri = self._content_mapping[source]
self._braviarc.play_content(uri) | [
"def",
"select_source",
"(",
"self",
",",
"source",
")",
":",
"if",
"source",
"in",
"self",
".",
"_content_mapping",
":",
"uri",
"=",
"self",
".",
"_content_mapping",
"[",
"source",
"]",
"self",
".",
"_braviarc",
".",
"play_content",
"(",
"uri",
")"
] | [
331,
4
] | [
335,
44
] | python | en | ['en', 'su', 'en'] | True |
BraviaTVDevice.media_play_pause | (self) | Simulate play pause media player. | Simulate play pause media player. | def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play() | [
"def",
"media_play_pause",
"(",
"self",
")",
":",
"if",
"self",
".",
"_playing",
":",
"self",
".",
"media_pause",
"(",
")",
"else",
":",
"self",
".",
"media_play",
"(",
")"
] | [
337,
4
] | [
342,
29
] | python | en | ['en', 'en', 'it'] | True |
BraviaTVDevice.media_play | (self) | Send play command. | Send play command. | def media_play(self):
"""Send play command."""
self._playing = True
self._braviarc.media_play() | [
"def",
"media_play",
"(",
"self",
")",
":",
"self",
".",
"_playing",
"=",
"True",
"self",
".",
"_braviarc",
".",
"media_play",
"(",
")"
] | [
344,
4
] | [
347,
35
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.media_pause | (self) | Send media pause command to media player. | Send media pause command to media player. | def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self._braviarc.media_pause() | [
"def",
"media_pause",
"(",
"self",
")",
":",
"self",
".",
"_playing",
"=",
"False",
"self",
".",
"_braviarc",
".",
"media_pause",
"(",
")"
] | [
349,
4
] | [
352,
36
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.media_stop | (self) | Send media stop command to media player. | Send media stop command to media player. | def media_stop(self):
"""Send media stop command to media player."""
self._playing = False
self._braviarc.media_stop() | [
"def",
"media_stop",
"(",
"self",
")",
":",
"self",
".",
"_playing",
"=",
"False",
"self",
".",
"_braviarc",
".",
"media_stop",
"(",
")"
] | [
354,
4
] | [
357,
35
] | python | en | ['en', 'en', 'en'] | True |
BraviaTVDevice.media_next_track | (self) | Send next track command. | Send next track command. | def media_next_track(self):
"""Send next track command."""
self._braviarc.media_next_track() | [
"def",
"media_next_track",
"(",
"self",
")",
":",
"self",
".",
"_braviarc",
".",
"media_next_track",
"(",
")"
] | [
359,
4
] | [
361,
41
] | python | en | ['en', 'pt', 'en'] | True |
BraviaTVDevice.media_previous_track | (self) | Send the previous track command. | Send the previous track command. | def media_previous_track(self):
"""Send the previous track command."""
self._braviarc.media_previous_track() | [
"def",
"media_previous_track",
"(",
"self",
")",
":",
"self",
".",
"_braviarc",
".",
"media_previous_track",
"(",
")"
] | [
363,
4
] | [
365,
45
] | python | en | ['en', 'en', 'en'] | True |
test_schema | () | Test schema. | Test schema. | def test_schema():
"""Test schema."""
assert "nl-NL" in tts.SUPPORT_LANGUAGES
processed = tts.PLATFORM_SCHEMA({"platform": "cloud", "language": "nl-NL"})
assert processed["gender"] == "female"
# Should not raise
processed = tts.PLATFORM_SCHEMA(
{"platform": "cloud", "language": "nl-NL", "gender": "female"}
) | [
"def",
"test_schema",
"(",
")",
":",
"assert",
"\"nl-NL\"",
"in",
"tts",
".",
"SUPPORT_LANGUAGES",
"processed",
"=",
"tts",
".",
"PLATFORM_SCHEMA",
"(",
"{",
"\"platform\"",
":",
"\"cloud\"",
",",
"\"language\"",
":",
"\"nl-NL\"",
"}",
")",
"assert",
"processed",
"[",
"\"gender\"",
"]",
"==",
"\"female\"",
"# Should not raise",
"processed",
"=",
"tts",
".",
"PLATFORM_SCHEMA",
"(",
"{",
"\"platform\"",
":",
"\"cloud\"",
",",
"\"language\"",
":",
"\"nl-NL\"",
",",
"\"gender\"",
":",
"\"female\"",
"}",
")"
] | [
4,
0
] | [
14,
5
] | python | de | ['de', 'de', 'it'] | False |
get_label_balance | (dataset) |
Given a dataset, return the proportion of each target class and the counts of each class type
Parameters
----------
dataset
Returns
-------
sample_weights, counts
|
Given a dataset, return the proportion of each target class and the counts of each class type | def get_label_balance(dataset):
"""
Given a dataset, return the proportion of each target class and the counts of each class type
Parameters
----------
dataset
Returns
-------
sample_weights, counts
"""
assert hasattr(dataset, 'get_targets')
labels = dataset.get_targets()
counts = np.bincount(labels)
train_weights = 1. / torch.tensor(counts, dtype=torch.float)
sample_weights = train_weights[labels]
class_freq = counts/counts.sum()
if len(counts) < 10:
tqdm.tqdm.write('Class frequency: {}'.format(' | '.join('{:.2f}'.format(c) for c in class_freq)))
else:
tqdm.tqdm.write("Class frequencies range from {:.2e} to {:.2e}".format(class_freq.min(), class_freq.max()))
return sample_weights, counts | [
"def",
"get_label_balance",
"(",
"dataset",
")",
":",
"assert",
"hasattr",
"(",
"dataset",
",",
"'get_targets'",
")",
"labels",
"=",
"dataset",
".",
"get_targets",
"(",
")",
"counts",
"=",
"np",
".",
"bincount",
"(",
"labels",
")",
"train_weights",
"=",
"1.",
"/",
"torch",
".",
"tensor",
"(",
"counts",
",",
"dtype",
"=",
"torch",
".",
"float",
")",
"sample_weights",
"=",
"train_weights",
"[",
"labels",
"]",
"class_freq",
"=",
"counts",
"/",
"counts",
".",
"sum",
"(",
")",
"if",
"len",
"(",
"counts",
")",
"<",
"10",
":",
"tqdm",
".",
"tqdm",
".",
"write",
"(",
"'Class frequency: {}'",
".",
"format",
"(",
"' | '",
".",
"join",
"(",
"'{:.2f}'",
".",
"format",
"(",
"c",
")",
"for",
"c",
"in",
"class_freq",
")",
")",
")",
"else",
":",
"tqdm",
".",
"tqdm",
".",
"write",
"(",
"\"Class frequencies range from {:.2e} to {:.2e}\"",
".",
"format",
"(",
"class_freq",
".",
"min",
"(",
")",
",",
"class_freq",
".",
"max",
"(",
")",
")",
")",
"return",
"sample_weights",
",",
"counts"
] | [
744,
0
] | [
766,
33
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.__init__ | (self, lr=0.001, metrics=None, evaluation_only_metrics=None, l2_weight_decay=0.01, cuda=None, **kwargs) |
Initialization of the Base Trainable object. Any learning procedure that leverages DN3atasets should subclass
this base class.
By default uses the SGD with momentum optimization.
Parameters
----------
cuda : bool, string, None
If boolean, sets whether to enable training on the GPU, if a string, specifies can be used to specify
which device to use. If None (default) figures it out automatically.
lr : float
The learning rate to use, this will probably something that should be tuned for each application.
Start with multiplying or dividing by values of 2, 5 or 10 to seek out a good number.
metrics : dict, list
A dictionary of named (keys) metrics (values) or some iterable set of metrics that will be identified
by their class names.
evaluation_only_metrics : list
A list of names of metrics that will be used for evaluation only (not calculated or
reported during training steps).
l2_weight_decay : float
One of the simplest and most common regularizing techniques. If you find a model rapidly
reaching high training accuracy (and not validation) increase this. If having trouble fitting
the training data, decrease this.
kwargs : dict
Arguments that will be used by the processes' :py:meth:`BaseProcess.build_network()` method.
|
Initialization of the Base Trainable object. Any learning procedure that leverages DN3atasets should subclass
this base class. | def __init__(self, lr=0.001, metrics=None, evaluation_only_metrics=None, l2_weight_decay=0.01, cuda=None, **kwargs):
"""
Initialization of the Base Trainable object. Any learning procedure that leverages DN3atasets should subclass
this base class.
By default uses the SGD with momentum optimization.
Parameters
----------
cuda : bool, string, None
If boolean, sets whether to enable training on the GPU, if a string, specifies can be used to specify
which device to use. If None (default) figures it out automatically.
lr : float
The learning rate to use, this will probably something that should be tuned for each application.
Start with multiplying or dividing by values of 2, 5 or 10 to seek out a good number.
metrics : dict, list
A dictionary of named (keys) metrics (values) or some iterable set of metrics that will be identified
by their class names.
evaluation_only_metrics : list
A list of names of metrics that will be used for evaluation only (not calculated or
reported during training steps).
l2_weight_decay : float
One of the simplest and most common regularizing techniques. If you find a model rapidly
reaching high training accuracy (and not validation) increase this. If having trouble fitting
the training data, decrease this.
kwargs : dict
Arguments that will be used by the processes' :py:meth:`BaseProcess.build_network()` method.
"""
if cuda is None:
cuda = torch.cuda.is_available()
if cuda:
tqdm.tqdm.write("GPU(s) detected: training and model execution will be performed on GPU.")
if isinstance(cuda, bool):
cuda = "cuda" if cuda else "cpu"
assert isinstance(cuda, str)
self.cuda = cuda
self.device = torch.device(cuda)
self._eval_metrics = list() if evaluation_only_metrics is None else list(evaluation_only_metrics).copy()
self.metrics = OrderedDict()
if metrics is not None:
if isinstance(metrics, (list, tuple)):
metrics = {m.__class__.__name__: m for m in metrics}
if isinstance(metrics, dict):
self.add_metrics(metrics)
_before_members = set(self.__dict__.keys())
self.build_network(**kwargs)
new_members = set(self.__dict__.keys()).difference(_before_members)
self._training = False
self._trainables = list()
for member in new_members:
if isinstance(self.__dict__[member], (torch.nn.Module, torch.Tensor)):
if not (isinstance(self.__dict__[member], torch.Tensor) and not self.__dict__[member].requires_grad):
self._trainables.append(member)
self.__dict__[member] = self.__dict__[member].to(self.device)
self.optimizer = torch.optim.SGD(self.parameters(), weight_decay=l2_weight_decay, lr=lr, nesterov=True,
momentum=0.9)
self.scheduler = None
self.scheduler_after_batch = False
self.epoch = None
self.lr = lr
self.weight_decay = l2_weight_decay
self._batch_transforms = list()
self._eval_transforms = list() | [
"def",
"__init__",
"(",
"self",
",",
"lr",
"=",
"0.001",
",",
"metrics",
"=",
"None",
",",
"evaluation_only_metrics",
"=",
"None",
",",
"l2_weight_decay",
"=",
"0.01",
",",
"cuda",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"cuda",
"is",
"None",
":",
"cuda",
"=",
"torch",
".",
"cuda",
".",
"is_available",
"(",
")",
"if",
"cuda",
":",
"tqdm",
".",
"tqdm",
".",
"write",
"(",
"\"GPU(s) detected: training and model execution will be performed on GPU.\"",
")",
"if",
"isinstance",
"(",
"cuda",
",",
"bool",
")",
":",
"cuda",
"=",
"\"cuda\"",
"if",
"cuda",
"else",
"\"cpu\"",
"assert",
"isinstance",
"(",
"cuda",
",",
"str",
")",
"self",
".",
"cuda",
"=",
"cuda",
"self",
".",
"device",
"=",
"torch",
".",
"device",
"(",
"cuda",
")",
"self",
".",
"_eval_metrics",
"=",
"list",
"(",
")",
"if",
"evaluation_only_metrics",
"is",
"None",
"else",
"list",
"(",
"evaluation_only_metrics",
")",
".",
"copy",
"(",
")",
"self",
".",
"metrics",
"=",
"OrderedDict",
"(",
")",
"if",
"metrics",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"metrics",
",",
"(",
"list",
",",
"tuple",
")",
")",
":",
"metrics",
"=",
"{",
"m",
".",
"__class__",
".",
"__name__",
":",
"m",
"for",
"m",
"in",
"metrics",
"}",
"if",
"isinstance",
"(",
"metrics",
",",
"dict",
")",
":",
"self",
".",
"add_metrics",
"(",
"metrics",
")",
"_before_members",
"=",
"set",
"(",
"self",
".",
"__dict__",
".",
"keys",
"(",
")",
")",
"self",
".",
"build_network",
"(",
"*",
"*",
"kwargs",
")",
"new_members",
"=",
"set",
"(",
"self",
".",
"__dict__",
".",
"keys",
"(",
")",
")",
".",
"difference",
"(",
"_before_members",
")",
"self",
".",
"_training",
"=",
"False",
"self",
".",
"_trainables",
"=",
"list",
"(",
")",
"for",
"member",
"in",
"new_members",
":",
"if",
"isinstance",
"(",
"self",
".",
"__dict__",
"[",
"member",
"]",
",",
"(",
"torch",
".",
"nn",
".",
"Module",
",",
"torch",
".",
"Tensor",
")",
")",
":",
"if",
"not",
"(",
"isinstance",
"(",
"self",
".",
"__dict__",
"[",
"member",
"]",
",",
"torch",
".",
"Tensor",
")",
"and",
"not",
"self",
".",
"__dict__",
"[",
"member",
"]",
".",
"requires_grad",
")",
":",
"self",
".",
"_trainables",
".",
"append",
"(",
"member",
")",
"self",
".",
"__dict__",
"[",
"member",
"]",
"=",
"self",
".",
"__dict__",
"[",
"member",
"]",
".",
"to",
"(",
"self",
".",
"device",
")",
"self",
".",
"optimizer",
"=",
"torch",
".",
"optim",
".",
"SGD",
"(",
"self",
".",
"parameters",
"(",
")",
",",
"weight_decay",
"=",
"l2_weight_decay",
",",
"lr",
"=",
"lr",
",",
"nesterov",
"=",
"True",
",",
"momentum",
"=",
"0.9",
")",
"self",
".",
"scheduler",
"=",
"None",
"self",
".",
"scheduler_after_batch",
"=",
"False",
"self",
".",
"epoch",
"=",
"None",
"self",
".",
"lr",
"=",
"lr",
"self",
".",
"weight_decay",
"=",
"l2_weight_decay",
"self",
".",
"_batch_transforms",
"=",
"list",
"(",
")",
"self",
".",
"_eval_transforms",
"=",
"list",
"(",
")"
] | [
31,
4
] | [
96,
38
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.set_scheduler | (self, scheduler, step_every_batch=False) |
This allow the addition of a learning rate schedule to the process. By default, a linear warmup with cosine
decay will be used. Any scheduler that is an instance of :any:`Scheduler` (pytorch's schedulers, or extensions
thereof) can be set here. Additionally, a string keywords can be used including:
- "constant"
Parameters
----------
scheduler: str, Scheduler
step_every_batch: bool
Whether to call step after every batch (if `True`), or after every epoch (`False`)
|
This allow the addition of a learning rate schedule to the process. By default, a linear warmup with cosine
decay will be used. Any scheduler that is an instance of :any:`Scheduler` (pytorch's schedulers, or extensions
thereof) can be set here. Additionally, a string keywords can be used including:
- "constant" | def set_scheduler(self, scheduler, step_every_batch=False):
"""
This allow the addition of a learning rate schedule to the process. By default, a linear warmup with cosine
decay will be used. Any scheduler that is an instance of :any:`Scheduler` (pytorch's schedulers, or extensions
thereof) can be set here. Additionally, a string keywords can be used including:
- "constant"
Parameters
----------
scheduler: str, Scheduler
step_every_batch: bool
Whether to call step after every batch (if `True`), or after every epoch (`False`)
"""
if isinstance(scheduler, str):
if scheduler.lower() == 'constant':
scheduler = torch.optim.lr_scheduler.LambdaLR(self.optimizer, lambda e: 1.0)
else:
raise ValueError("Scheduler {} is not supported.".format(scheduler))
# This is the most common one that needs this, force this to be true
elif isinstance(scheduler, torch.optim.lr_scheduler.OneCycleLR):
self.scheduler_after_batch = True
else:
self.scheduler_after_batch = step_every_batch
self.scheduler = scheduler | [
"def",
"set_scheduler",
"(",
"self",
",",
"scheduler",
",",
"step_every_batch",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"scheduler",
",",
"str",
")",
":",
"if",
"scheduler",
".",
"lower",
"(",
")",
"==",
"'constant'",
":",
"scheduler",
"=",
"torch",
".",
"optim",
".",
"lr_scheduler",
".",
"LambdaLR",
"(",
"self",
".",
"optimizer",
",",
"lambda",
"e",
":",
"1.0",
")",
"else",
":",
"raise",
"ValueError",
"(",
"\"Scheduler {} is not supported.\"",
".",
"format",
"(",
"scheduler",
")",
")",
"# This is the most common one that needs this, force this to be true",
"elif",
"isinstance",
"(",
"scheduler",
",",
"torch",
".",
"optim",
".",
"lr_scheduler",
".",
"OneCycleLR",
")",
":",
"self",
".",
"scheduler_after_batch",
"=",
"True",
"else",
":",
"self",
".",
"scheduler_after_batch",
"=",
"step_every_batch",
"self",
".",
"scheduler",
"=",
"scheduler"
] | [
104,
4
] | [
128,
34
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.build_network | (self, **kwargs) |
This method is used to add trainable modules to the process. Rather than placing objects for training
in the __init__ method, they should be placed here.
By default any arguments that propagate unused from __init__ are included here.
|
This method is used to add trainable modules to the process. Rather than placing objects for training
in the __init__ method, they should be placed here. | def build_network(self, **kwargs):
"""
This method is used to add trainable modules to the process. Rather than placing objects for training
in the __init__ method, they should be placed here.
By default any arguments that propagate unused from __init__ are included here.
"""
self.__dict__.update(**kwargs) | [
"def",
"build_network",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"__dict__",
".",
"update",
"(",
"*",
"*",
"kwargs",
")"
] | [
176,
4
] | [
183,
38
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.parameters | (self) |
All the trainable parameters in the Trainable. This includes any architecture parameters and meta-parameters.
Returns
-------
params :
An iterator of parameters
|
All the trainable parameters in the Trainable. This includes any architecture parameters and meta-parameters. | def parameters(self):
"""
All the trainable parameters in the Trainable. This includes any architecture parameters and meta-parameters.
Returns
-------
params :
An iterator of parameters
"""
for member in self._trainables:
yield from self.__dict__[member].parameters() | [
"def",
"parameters",
"(",
"self",
")",
":",
"for",
"member",
"in",
"self",
".",
"_trainables",
":",
"yield",
"from",
"self",
".",
"__dict__",
"[",
"member",
"]",
".",
"parameters",
"(",
")"
] | [
185,
4
] | [
195,
57
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.forward | (self, *inputs) |
Given a batch of inputs, return the outputs produced by the trainable module.
Parameters
----------
inputs :
Tensors needed for underlying module.
Returns
-------
outputs :
Outputs of module
|
Given a batch of inputs, return the outputs produced by the trainable module. | def forward(self, *inputs):
"""
Given a batch of inputs, return the outputs produced by the trainable module.
Parameters
----------
inputs :
Tensors needed for underlying module.
Returns
-------
outputs :
Outputs of module
"""
raise NotImplementedError | [
"def",
"forward",
"(",
"self",
",",
"*",
"inputs",
")",
":",
"raise",
"NotImplementedError"
] | [
197,
4
] | [
212,
33
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.calculate_loss | (self, inputs, outputs) |
Given the inputs to and outputs from underlying modules, calculate the loss.
Returns
-------
Loss :
Single loss quantity to be minimized.
|
Given the inputs to and outputs from underlying modules, calculate the loss. | def calculate_loss(self, inputs, outputs):
"""
Given the inputs to and outputs from underlying modules, calculate the loss.
Returns
-------
Loss :
Single loss quantity to be minimized.
"""
if isinstance(outputs, (tuple, list)):
device = outputs[0].device
else:
device = outputs.device
loss_fn = self.loss
if hasattr(self.loss, 'to'):
loss_fn = loss_fn.to(device)
return loss_fn(outputs, inputs[-1]) | [
"def",
"calculate_loss",
"(",
"self",
",",
"inputs",
",",
"outputs",
")",
":",
"if",
"isinstance",
"(",
"outputs",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"device",
"=",
"outputs",
"[",
"0",
"]",
".",
"device",
"else",
":",
"device",
"=",
"outputs",
".",
"device",
"loss_fn",
"=",
"self",
".",
"loss",
"if",
"hasattr",
"(",
"self",
".",
"loss",
",",
"'to'",
")",
":",
"loss_fn",
"=",
"loss_fn",
".",
"to",
"(",
"device",
")",
"return",
"loss_fn",
"(",
"outputs",
",",
"inputs",
"[",
"-",
"1",
"]",
")"
] | [
214,
4
] | [
230,
43
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.calculate_metrics | (self, inputs, outputs) |
Given the inputs to and outputs from the underlying module. Return tracked metrics.
Parameters
----------
inputs :
Input tensors.
outputs :
Output tensors.
Returns
-------
metrics : OrderedDict, None
Dictionary of metric quantities.
|
Given the inputs to and outputs from the underlying module. Return tracked metrics. | def calculate_metrics(self, inputs, outputs):
"""
Given the inputs to and outputs from the underlying module. Return tracked metrics.
Parameters
----------
inputs :
Input tensors.
outputs :
Output tensors.
Returns
-------
metrics : OrderedDict, None
Dictionary of metric quantities.
"""
metrics = OrderedDict()
for met_name, met_fn in self.metrics.items():
if self._training and met_name in self._eval_metrics:
continue
try:
metrics[met_name] = met_fn(inputs, outputs)
# I know its super broad, but basically if metrics fail during training, I want to just ignore them...
except:
continue
return metrics | [
"def",
"calculate_metrics",
"(",
"self",
",",
"inputs",
",",
"outputs",
")",
":",
"metrics",
"=",
"OrderedDict",
"(",
")",
"for",
"met_name",
",",
"met_fn",
"in",
"self",
".",
"metrics",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"_training",
"and",
"met_name",
"in",
"self",
".",
"_eval_metrics",
":",
"continue",
"try",
":",
"metrics",
"[",
"met_name",
"]",
"=",
"met_fn",
"(",
"inputs",
",",
"outputs",
")",
"# I know its super broad, but basically if metrics fail during training, I want to just ignore them...",
"except",
":",
"continue",
"return",
"metrics"
] | [
232,
4
] | [
257,
22
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.evaluate | (self, dataset, **loader_kwargs) |
Calculate and return metrics for a dataset
Parameters
----------
dataset: DN3ataset, DataLoader
The dataset that will be used for evaluation, if not a DataLoader, one will be constructed
loader_kwargs: dict
Args that will be passed to the dataloader, but `shuffle` and `drop_last` will be both be
forced to `False`
Returns
-------
metrics : OrderedDict
Metric scores for the entire
|
Calculate and return metrics for a dataset | def evaluate(self, dataset, **loader_kwargs):
"""
Calculate and return metrics for a dataset
Parameters
----------
dataset: DN3ataset, DataLoader
The dataset that will be used for evaluation, if not a DataLoader, one will be constructed
loader_kwargs: dict
Args that will be passed to the dataloader, but `shuffle` and `drop_last` will be both be
forced to `False`
Returns
-------
metrics : OrderedDict
Metric scores for the entire
"""
self.train(False)
inputs, outputs = self.predict(dataset, **loader_kwargs)
metrics = self.calculate_metrics(inputs, outputs)
metrics['loss'] = self.calculate_loss(inputs, outputs).item()
return metrics | [
"def",
"evaluate",
"(",
"self",
",",
"dataset",
",",
"*",
"*",
"loader_kwargs",
")",
":",
"self",
".",
"train",
"(",
"False",
")",
"inputs",
",",
"outputs",
"=",
"self",
".",
"predict",
"(",
"dataset",
",",
"*",
"*",
"loader_kwargs",
")",
"metrics",
"=",
"self",
".",
"calculate_metrics",
"(",
"inputs",
",",
"outputs",
")",
"metrics",
"[",
"'loss'",
"]",
"=",
"self",
".",
"calculate_loss",
"(",
"inputs",
",",
"outputs",
")",
".",
"item",
"(",
")",
"return",
"metrics"
] | [
283,
4
] | [
304,
22
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.predict | (self, dataset, **loader_kwargs) |
Determine the outputs for all loaded data from the dataset
Parameters
----------
dataset: DN3ataset, DataLoader
The dataset that will be used for evaluation, if not a DataLoader, one will be constructed
loader_kwargs: dict
Args that will be passed to the dataloader, but `shuffle` and `drop_last` will be both be
forced to `False`
Returns
-------
inputs : Tensor
The exact inputs used to calculate the outputs (in case they were stochastic and need saving)
outputs : Tensor
The outputs from each run of :function:`forward`
|
Determine the outputs for all loaded data from the dataset | def predict(self, dataset, **loader_kwargs):
"""
Determine the outputs for all loaded data from the dataset
Parameters
----------
dataset: DN3ataset, DataLoader
The dataset that will be used for evaluation, if not a DataLoader, one will be constructed
loader_kwargs: dict
Args that will be passed to the dataloader, but `shuffle` and `drop_last` will be both be
forced to `False`
Returns
-------
inputs : Tensor
The exact inputs used to calculate the outputs (in case they were stochastic and need saving)
outputs : Tensor
The outputs from each run of :function:`forward`
"""
self.train(False)
loader_kwargs.setdefault('batch_size', 1)
dataset = self._make_dataloader(dataset, **loader_kwargs)
pbar = tqdm.trange(len(dataset), desc="Predicting")
data_iterator = iter(dataset)
inputs = list()
outputs = list()
with torch.no_grad():
for iteration in pbar:
input_batch = self._get_batch(data_iterator)
output_batch = self.forward(*input_batch)
inputs.append([tensor.cpu() for tensor in input_batch])
if isinstance(output_batch, torch.Tensor):
outputs.append(output_batch.cpu())
else:
outputs.append([tensor.cpu() for tensor in output_batch])
def package_multiple_tensors(batches: list):
if isinstance(batches[0], torch.Tensor):
return torch.cat(batches)
elif isinstance(batches[0], (tuple, list)):
return [torch.cat(b) for b in zip(*batches)]
return package_multiple_tensors(inputs), package_multiple_tensors(outputs) | [
"def",
"predict",
"(",
"self",
",",
"dataset",
",",
"*",
"*",
"loader_kwargs",
")",
":",
"self",
".",
"train",
"(",
"False",
")",
"loader_kwargs",
".",
"setdefault",
"(",
"'batch_size'",
",",
"1",
")",
"dataset",
"=",
"self",
".",
"_make_dataloader",
"(",
"dataset",
",",
"*",
"*",
"loader_kwargs",
")",
"pbar",
"=",
"tqdm",
".",
"trange",
"(",
"len",
"(",
"dataset",
")",
",",
"desc",
"=",
"\"Predicting\"",
")",
"data_iterator",
"=",
"iter",
"(",
"dataset",
")",
"inputs",
"=",
"list",
"(",
")",
"outputs",
"=",
"list",
"(",
")",
"with",
"torch",
".",
"no_grad",
"(",
")",
":",
"for",
"iteration",
"in",
"pbar",
":",
"input_batch",
"=",
"self",
".",
"_get_batch",
"(",
"data_iterator",
")",
"output_batch",
"=",
"self",
".",
"forward",
"(",
"*",
"input_batch",
")",
"inputs",
".",
"append",
"(",
"[",
"tensor",
".",
"cpu",
"(",
")",
"for",
"tensor",
"in",
"input_batch",
"]",
")",
"if",
"isinstance",
"(",
"output_batch",
",",
"torch",
".",
"Tensor",
")",
":",
"outputs",
".",
"append",
"(",
"output_batch",
".",
"cpu",
"(",
")",
")",
"else",
":",
"outputs",
".",
"append",
"(",
"[",
"tensor",
".",
"cpu",
"(",
")",
"for",
"tensor",
"in",
"output_batch",
"]",
")",
"def",
"package_multiple_tensors",
"(",
"batches",
":",
"list",
")",
":",
"if",
"isinstance",
"(",
"batches",
"[",
"0",
"]",
",",
"torch",
".",
"Tensor",
")",
":",
"return",
"torch",
".",
"cat",
"(",
"batches",
")",
"elif",
"isinstance",
"(",
"batches",
"[",
"0",
"]",
",",
"(",
"tuple",
",",
"list",
")",
")",
":",
"return",
"[",
"torch",
".",
"cat",
"(",
"b",
")",
"for",
"b",
"in",
"zip",
"(",
"*",
"batches",
")",
"]",
"return",
"package_multiple_tensors",
"(",
"inputs",
")",
",",
"package_multiple_tensors",
"(",
"outputs",
")"
] | [
306,
4
] | [
352,
82
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.save_best | (self) |
Create a snapshot of what is being currently trained for re-laoding with the :py:meth:`load_best()` method.
Returns
-------
best : Any
Whatever format is needed for :py:meth:`load_best()`, will be the argument provided to it.
|
Create a snapshot of what is being currently trained for re-laoding with the :py:meth:`load_best()` method. | def save_best(self):
"""
Create a snapshot of what is being currently trained for re-laoding with the :py:meth:`load_best()` method.
Returns
-------
best : Any
Whatever format is needed for :py:meth:`load_best()`, will be the argument provided to it.
"""
return [{k: v.cpu() for k, v in self.__dict__[m].state_dict().items()} for m in self._trainables] | [
"def",
"save_best",
"(",
"self",
")",
":",
"return",
"[",
"{",
"k",
":",
"v",
".",
"cpu",
"(",
")",
"for",
"k",
",",
"v",
"in",
"self",
".",
"__dict__",
"[",
"m",
"]",
".",
"state_dict",
"(",
")",
".",
"items",
"(",
")",
"}",
"for",
"m",
"in",
"self",
".",
"_trainables",
"]"
] | [
367,
4
] | [
376,
105
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess.load_best | (self, best) |
Load the parameters as saved by :py:meth:`save_best()`.
Parameters
----------
best: Any
|
Load the parameters as saved by :py:meth:`save_best()`. | def load_best(self, best):
"""
Load the parameters as saved by :py:meth:`save_best()`.
Parameters
----------
best: Any
"""
for m, state_dict in zip(self._trainables, best):
self.__dict__[m].load_state_dict({k: v.to(self.device) for k, v in state_dict.items()}) | [
"def",
"load_best",
"(",
"self",
",",
"best",
")",
":",
"for",
"m",
",",
"state_dict",
"in",
"zip",
"(",
"self",
".",
"_trainables",
",",
"best",
")",
":",
"self",
".",
"__dict__",
"[",
"m",
"]",
".",
"load_state_dict",
"(",
"{",
"k",
":",
"v",
".",
"to",
"(",
"self",
".",
"device",
")",
"for",
"k",
",",
"v",
"in",
"state_dict",
".",
"items",
"(",
")",
"}",
")"
] | [
378,
4
] | [
387,
99
] | python | en | ['en', 'error', 'th'] | False |
BaseProcess._make_dataloader | (self, dataset, training=False, **loader_kwargs) | Any args that make more sense as a convenience function to be set | Any args that make more sense as a convenience function to be set | def _make_dataloader(self, dataset, training=False, **loader_kwargs):
"""Any args that make more sense as a convenience function to be set"""
if isinstance(dataset, DataLoader):
return dataset
return DataLoader(dataset, **self._dataloader_args(dataset, training, **loader_kwargs)) | [
"def",
"_make_dataloader",
"(",
"self",
",",
"dataset",
",",
"training",
"=",
"False",
",",
"*",
"*",
"loader_kwargs",
")",
":",
"if",
"isinstance",
"(",
"dataset",
",",
"DataLoader",
")",
":",
"return",
"dataset",
"return",
"DataLoader",
"(",
"dataset",
",",
"*",
"*",
"self",
".",
"_dataloader_args",
"(",
"dataset",
",",
"training",
",",
"*",
"*",
"loader_kwargs",
")",
")"
] | [
418,
4
] | [
423,
95
] | python | en | ['en', 'en', 'en'] | True |
BaseProcess.fit | (self, training_dataset, epochs=1, validation_dataset=None, step_callback=None,
resume_epoch=None, resume_iteration=None, log_callback=None,
epoch_callback=None, batch_size=8, warmup_frac=0.2, retain_best='loss',
validation_interval=None, train_log_interval=None, **loader_kwargs) |
sklearn/keras-like convenience method to simply proceed with training across multiple epochs of the provided
dataset
Parameters
----------
training_dataset : DN3ataset, DataLoader
validation_dataset : DN3ataset, DataLoader
epochs : int
Total number of epochs to fit
resume_epoch : int
The starting epoch to train from. This will likely only be used to resume training at a certain
point.
resume_iteration : int
Similar to start epoch but specified in batches. This can either be used alone, or in
conjunction with `start_epoch`. If used alone, the start epoch is the floor of
`start_iteration` divided by batches per epoch. In other words this specifies cumulative
batches if start_epoch is not specified, and relative to the current epoch otherwise.
step_callback : callable
Function to run after every training step that has signature: fn(train_metrics) -> None
log_callback : callable
Function to run after every log interval that has signature: fn(train_metrics) -> None
epoch_callback : callable
Function to run after every epoch that has signature: fn(validation_metrics) -> None
batch_size : int
The batch_size to be used for the training and validation datasets. This is ignored if they are
provided as `DataLoader`.
warmup_frac : float
The fraction of iterations that will be spent *increasing* the learning rate under the default
1cycle policy (with cosine annealing). Value will be automatically clamped values between [0, 0.5]
retain_best : (str, None)
**If `validation_dataset` is provided**, which model weights to retain. If 'loss' (default), will
retain the model at the epoch with the lowest validation loss. If another string, will assume that
is the metric to monitor for the *highest score*. If None, the final model is used.
validation_interval: int, None
The number of batches between checking the validation dataset
train_log_interval: int, None
The number of batches between persistent logging of training metrics, if None (default) happens
at the end of every epoch.
loader_kwargs :
Any remaining keyword arguments will be passed as such to any DataLoaders that are automatically
constructed. If both training and validation datasets are provided as `DataLoaders`, this will be
ignored.
Notes
-----
If the datasets above are provided as DN3atasets, automatic optimizations are performed to speed up loading.
These include setting the number of workers = to the number of CPUs/system threads - 1, and pinning memory for
rapid CUDA transfer if leveraging the GPU. Unless you are very comfortable with PyTorch, it's probably better
to not provide your own DataLoader, and let this be done automatically.
Returns
-------
train_log : Dataframe
Metrics after each iteration of training as a pandas dataframe
validation_log : Dataframe
Validation metrics after each epoch of training as a pandas dataframe
|
sklearn/keras-like convenience method to simply proceed with training across multiple epochs of the provided
dataset | def fit(self, training_dataset, epochs=1, validation_dataset=None, step_callback=None,
resume_epoch=None, resume_iteration=None, log_callback=None,
epoch_callback=None, batch_size=8, warmup_frac=0.2, retain_best='loss',
validation_interval=None, train_log_interval=None, **loader_kwargs):
"""
sklearn/keras-like convenience method to simply proceed with training across multiple epochs of the provided
dataset
Parameters
----------
training_dataset : DN3ataset, DataLoader
validation_dataset : DN3ataset, DataLoader
epochs : int
Total number of epochs to fit
resume_epoch : int
The starting epoch to train from. This will likely only be used to resume training at a certain
point.
resume_iteration : int
Similar to start epoch but specified in batches. This can either be used alone, or in
conjunction with `start_epoch`. If used alone, the start epoch is the floor of
`start_iteration` divided by batches per epoch. In other words this specifies cumulative
batches if start_epoch is not specified, and relative to the current epoch otherwise.
step_callback : callable
Function to run after every training step that has signature: fn(train_metrics) -> None
log_callback : callable
Function to run after every log interval that has signature: fn(train_metrics) -> None
epoch_callback : callable
Function to run after every epoch that has signature: fn(validation_metrics) -> None
batch_size : int
The batch_size to be used for the training and validation datasets. This is ignored if they are
provided as `DataLoader`.
warmup_frac : float
The fraction of iterations that will be spent *increasing* the learning rate under the default
1cycle policy (with cosine annealing). Value will be automatically clamped values between [0, 0.5]
retain_best : (str, None)
**If `validation_dataset` is provided**, which model weights to retain. If 'loss' (default), will
retain the model at the epoch with the lowest validation loss. If another string, will assume that
is the metric to monitor for the *highest score*. If None, the final model is used.
validation_interval: int, None
The number of batches between checking the validation dataset
train_log_interval: int, None
The number of batches between persistent logging of training metrics, if None (default) happens
at the end of every epoch.
loader_kwargs :
Any remaining keyword arguments will be passed as such to any DataLoaders that are automatically
constructed. If both training and validation datasets are provided as `DataLoaders`, this will be
ignored.
Notes
-----
If the datasets above are provided as DN3atasets, automatic optimizations are performed to speed up loading.
These include setting the number of workers = to the number of CPUs/system threads - 1, and pinning memory for
rapid CUDA transfer if leveraging the GPU. Unless you are very comfortable with PyTorch, it's probably better
to not provide your own DataLoader, and let this be done automatically.
Returns
-------
train_log : Dataframe
Metrics after each iteration of training as a pandas dataframe
validation_log : Dataframe
Validation metrics after each epoch of training as a pandas dataframe
"""
loader_kwargs.setdefault('batch_size', batch_size)
loader_kwargs = self._optimize_dataloader_kwargs(**loader_kwargs)
training_dataset = self._make_dataloader(training_dataset, training=True, **loader_kwargs)
if resume_epoch is None:
if resume_iteration is None or resume_iteration < len(training_dataset):
resume_epoch = 1
else:
resume_epoch = resume_iteration // len(training_dataset)
resume_iteration = 1 if resume_iteration is None else resume_iteration % len(training_dataset)
_clear_scheduler_after = self.scheduler is None
if _clear_scheduler_after:
last_epoch_workaround = len(training_dataset) * (resume_epoch - 1) + resume_iteration
last_epoch_workaround = -1 if last_epoch_workaround <= 1 else last_epoch_workaround
self.set_scheduler(
torch.optim.lr_scheduler.OneCycleLR(self.optimizer, self.lr, epochs=epochs,
steps_per_epoch=len(training_dataset),
pct_start=warmup_frac,
last_epoch=last_epoch_workaround)
)
validation_log = list()
train_log = list()
self.best_metric = None
best_model = self.save_best()
train_log_interval = len(training_dataset) if train_log_interval is None else train_log_interval
metrics = OrderedDict()
def update_metrics(new_metrics: dict, iterations):
if len(metrics) == 0:
return metrics.update(new_metrics)
else:
for m in new_metrics:
try:
metrics[m] = (metrics[m] * (iterations - 1) + new_metrics[m]) / iterations
except KeyError:
metrics[m] = new_metrics[m]
def print_training_metrics(epoch, iteration=None):
if iteration is not None:
self.standard_logging(metrics, "Training: Epoch {} - Iteration {}".format(epoch, iteration))
else:
self.standard_logging(metrics, "Training: End of Epoch {}".format(epoch))
def _validation(epoch, iteration=None):
_metrics = self.evaluate(validation_dataset, **loader_kwargs)
if iteration is not None:
self.standard_logging(_metrics, "Validation: Epoch {} - Iteration {}".format(epoch, iteration))
else:
self.standard_logging(_metrics, "Validation: End of Epoch {}".format(epoch))
_metrics['epoch'] = epoch
validation_log.append(_metrics)
return _metrics
epoch_bar = tqdm.trange(resume_epoch, epochs + 1, desc="Epoch", unit='epoch', initial=resume_epoch, total=epochs)
for epoch in epoch_bar:
self.epoch = epoch
pbar = tqdm.trange(resume_iteration, len(training_dataset) + 1, desc="Iteration", unit='batches',
initial=resume_iteration, total=len(training_dataset))
data_iterator = iter(training_dataset)
for iteration in pbar:
inputs = self._get_batch(data_iterator)
train_metrics = self.train_step(*inputs)
train_metrics['lr'] = self.optimizer.param_groups[0]['lr']
if 'momentum' in self.optimizer.defaults:
train_metrics['momentum'] = self.optimizer.param_groups[0]['momentum']
update_metrics(train_metrics, iteration+1)
pbar.set_postfix(metrics)
train_metrics['epoch'] = epoch
train_metrics['iteration'] = iteration
train_log.append(train_metrics)
if callable(step_callback):
step_callback(train_metrics)
if iteration % train_log_interval == 0 and pbar.total != iteration:
print_training_metrics(epoch, iteration)
train_metrics['epoch'] = epoch
train_metrics['iteration'] = iteration
if callable(log_callback):
log_callback(metrics)
metrics = OrderedDict()
if isinstance(validation_interval, int) and (iteration % validation_interval == 0)\
and validation_dataset is not None:
_m = _validation(epoch, iteration)
best_model = self._retain_best(best_model, _m, retain_best)
# Make epoch summary
metrics = DataFrame(train_log)
metrics = metrics[metrics['epoch'] == epoch]
metrics = metrics.mean().to_dict()
metrics.pop('iteration', None)
print_training_metrics(epoch)
if validation_dataset is not None:
metrics = _validation(epoch)
best_model = self._retain_best(best_model, metrics, retain_best)
if callable(epoch_callback):
epoch_callback(metrics)
metrics = OrderedDict()
# All future epochs should not start offset in iterations
resume_iteration = 1
if not self.scheduler_after_batch and self.scheduler is not None:
tqdm.tqdm.write(f"Step {self.scheduler.get_last_lr()} {self.scheduler.last_epoch}")
self.scheduler.step()
if _clear_scheduler_after:
self.set_scheduler(None)
self.epoch = None
if retain_best is not None and validation_dataset is not None:
tqdm.tqdm.write("Loading best model...")
self.load_best(best_model)
return DataFrame(train_log), DataFrame(validation_log) | [
"def",
"fit",
"(",
"self",
",",
"training_dataset",
",",
"epochs",
"=",
"1",
",",
"validation_dataset",
"=",
"None",
",",
"step_callback",
"=",
"None",
",",
"resume_epoch",
"=",
"None",
",",
"resume_iteration",
"=",
"None",
",",
"log_callback",
"=",
"None",
",",
"epoch_callback",
"=",
"None",
",",
"batch_size",
"=",
"8",
",",
"warmup_frac",
"=",
"0.2",
",",
"retain_best",
"=",
"'loss'",
",",
"validation_interval",
"=",
"None",
",",
"train_log_interval",
"=",
"None",
",",
"*",
"*",
"loader_kwargs",
")",
":",
"loader_kwargs",
".",
"setdefault",
"(",
"'batch_size'",
",",
"batch_size",
")",
"loader_kwargs",
"=",
"self",
".",
"_optimize_dataloader_kwargs",
"(",
"*",
"*",
"loader_kwargs",
")",
"training_dataset",
"=",
"self",
".",
"_make_dataloader",
"(",
"training_dataset",
",",
"training",
"=",
"True",
",",
"*",
"*",
"loader_kwargs",
")",
"if",
"resume_epoch",
"is",
"None",
":",
"if",
"resume_iteration",
"is",
"None",
"or",
"resume_iteration",
"<",
"len",
"(",
"training_dataset",
")",
":",
"resume_epoch",
"=",
"1",
"else",
":",
"resume_epoch",
"=",
"resume_iteration",
"//",
"len",
"(",
"training_dataset",
")",
"resume_iteration",
"=",
"1",
"if",
"resume_iteration",
"is",
"None",
"else",
"resume_iteration",
"%",
"len",
"(",
"training_dataset",
")",
"_clear_scheduler_after",
"=",
"self",
".",
"scheduler",
"is",
"None",
"if",
"_clear_scheduler_after",
":",
"last_epoch_workaround",
"=",
"len",
"(",
"training_dataset",
")",
"*",
"(",
"resume_epoch",
"-",
"1",
")",
"+",
"resume_iteration",
"last_epoch_workaround",
"=",
"-",
"1",
"if",
"last_epoch_workaround",
"<=",
"1",
"else",
"last_epoch_workaround",
"self",
".",
"set_scheduler",
"(",
"torch",
".",
"optim",
".",
"lr_scheduler",
".",
"OneCycleLR",
"(",
"self",
".",
"optimizer",
",",
"self",
".",
"lr",
",",
"epochs",
"=",
"epochs",
",",
"steps_per_epoch",
"=",
"len",
"(",
"training_dataset",
")",
",",
"pct_start",
"=",
"warmup_frac",
",",
"last_epoch",
"=",
"last_epoch_workaround",
")",
")",
"validation_log",
"=",
"list",
"(",
")",
"train_log",
"=",
"list",
"(",
")",
"self",
".",
"best_metric",
"=",
"None",
"best_model",
"=",
"self",
".",
"save_best",
"(",
")",
"train_log_interval",
"=",
"len",
"(",
"training_dataset",
")",
"if",
"train_log_interval",
"is",
"None",
"else",
"train_log_interval",
"metrics",
"=",
"OrderedDict",
"(",
")",
"def",
"update_metrics",
"(",
"new_metrics",
":",
"dict",
",",
"iterations",
")",
":",
"if",
"len",
"(",
"metrics",
")",
"==",
"0",
":",
"return",
"metrics",
".",
"update",
"(",
"new_metrics",
")",
"else",
":",
"for",
"m",
"in",
"new_metrics",
":",
"try",
":",
"metrics",
"[",
"m",
"]",
"=",
"(",
"metrics",
"[",
"m",
"]",
"*",
"(",
"iterations",
"-",
"1",
")",
"+",
"new_metrics",
"[",
"m",
"]",
")",
"/",
"iterations",
"except",
"KeyError",
":",
"metrics",
"[",
"m",
"]",
"=",
"new_metrics",
"[",
"m",
"]",
"def",
"print_training_metrics",
"(",
"epoch",
",",
"iteration",
"=",
"None",
")",
":",
"if",
"iteration",
"is",
"not",
"None",
":",
"self",
".",
"standard_logging",
"(",
"metrics",
",",
"\"Training: Epoch {} - Iteration {}\"",
".",
"format",
"(",
"epoch",
",",
"iteration",
")",
")",
"else",
":",
"self",
".",
"standard_logging",
"(",
"metrics",
",",
"\"Training: End of Epoch {}\"",
".",
"format",
"(",
"epoch",
")",
")",
"def",
"_validation",
"(",
"epoch",
",",
"iteration",
"=",
"None",
")",
":",
"_metrics",
"=",
"self",
".",
"evaluate",
"(",
"validation_dataset",
",",
"*",
"*",
"loader_kwargs",
")",
"if",
"iteration",
"is",
"not",
"None",
":",
"self",
".",
"standard_logging",
"(",
"_metrics",
",",
"\"Validation: Epoch {} - Iteration {}\"",
".",
"format",
"(",
"epoch",
",",
"iteration",
")",
")",
"else",
":",
"self",
".",
"standard_logging",
"(",
"_metrics",
",",
"\"Validation: End of Epoch {}\"",
".",
"format",
"(",
"epoch",
")",
")",
"_metrics",
"[",
"'epoch'",
"]",
"=",
"epoch",
"validation_log",
".",
"append",
"(",
"_metrics",
")",
"return",
"_metrics",
"epoch_bar",
"=",
"tqdm",
".",
"trange",
"(",
"resume_epoch",
",",
"epochs",
"+",
"1",
",",
"desc",
"=",
"\"Epoch\"",
",",
"unit",
"=",
"'epoch'",
",",
"initial",
"=",
"resume_epoch",
",",
"total",
"=",
"epochs",
")",
"for",
"epoch",
"in",
"epoch_bar",
":",
"self",
".",
"epoch",
"=",
"epoch",
"pbar",
"=",
"tqdm",
".",
"trange",
"(",
"resume_iteration",
",",
"len",
"(",
"training_dataset",
")",
"+",
"1",
",",
"desc",
"=",
"\"Iteration\"",
",",
"unit",
"=",
"'batches'",
",",
"initial",
"=",
"resume_iteration",
",",
"total",
"=",
"len",
"(",
"training_dataset",
")",
")",
"data_iterator",
"=",
"iter",
"(",
"training_dataset",
")",
"for",
"iteration",
"in",
"pbar",
":",
"inputs",
"=",
"self",
".",
"_get_batch",
"(",
"data_iterator",
")",
"train_metrics",
"=",
"self",
".",
"train_step",
"(",
"*",
"inputs",
")",
"train_metrics",
"[",
"'lr'",
"]",
"=",
"self",
".",
"optimizer",
".",
"param_groups",
"[",
"0",
"]",
"[",
"'lr'",
"]",
"if",
"'momentum'",
"in",
"self",
".",
"optimizer",
".",
"defaults",
":",
"train_metrics",
"[",
"'momentum'",
"]",
"=",
"self",
".",
"optimizer",
".",
"param_groups",
"[",
"0",
"]",
"[",
"'momentum'",
"]",
"update_metrics",
"(",
"train_metrics",
",",
"iteration",
"+",
"1",
")",
"pbar",
".",
"set_postfix",
"(",
"metrics",
")",
"train_metrics",
"[",
"'epoch'",
"]",
"=",
"epoch",
"train_metrics",
"[",
"'iteration'",
"]",
"=",
"iteration",
"train_log",
".",
"append",
"(",
"train_metrics",
")",
"if",
"callable",
"(",
"step_callback",
")",
":",
"step_callback",
"(",
"train_metrics",
")",
"if",
"iteration",
"%",
"train_log_interval",
"==",
"0",
"and",
"pbar",
".",
"total",
"!=",
"iteration",
":",
"print_training_metrics",
"(",
"epoch",
",",
"iteration",
")",
"train_metrics",
"[",
"'epoch'",
"]",
"=",
"epoch",
"train_metrics",
"[",
"'iteration'",
"]",
"=",
"iteration",
"if",
"callable",
"(",
"log_callback",
")",
":",
"log_callback",
"(",
"metrics",
")",
"metrics",
"=",
"OrderedDict",
"(",
")",
"if",
"isinstance",
"(",
"validation_interval",
",",
"int",
")",
"and",
"(",
"iteration",
"%",
"validation_interval",
"==",
"0",
")",
"and",
"validation_dataset",
"is",
"not",
"None",
":",
"_m",
"=",
"_validation",
"(",
"epoch",
",",
"iteration",
")",
"best_model",
"=",
"self",
".",
"_retain_best",
"(",
"best_model",
",",
"_m",
",",
"retain_best",
")",
"# Make epoch summary",
"metrics",
"=",
"DataFrame",
"(",
"train_log",
")",
"metrics",
"=",
"metrics",
"[",
"metrics",
"[",
"'epoch'",
"]",
"==",
"epoch",
"]",
"metrics",
"=",
"metrics",
".",
"mean",
"(",
")",
".",
"to_dict",
"(",
")",
"metrics",
".",
"pop",
"(",
"'iteration'",
",",
"None",
")",
"print_training_metrics",
"(",
"epoch",
")",
"if",
"validation_dataset",
"is",
"not",
"None",
":",
"metrics",
"=",
"_validation",
"(",
"epoch",
")",
"best_model",
"=",
"self",
".",
"_retain_best",
"(",
"best_model",
",",
"metrics",
",",
"retain_best",
")",
"if",
"callable",
"(",
"epoch_callback",
")",
":",
"epoch_callback",
"(",
"metrics",
")",
"metrics",
"=",
"OrderedDict",
"(",
")",
"# All future epochs should not start offset in iterations",
"resume_iteration",
"=",
"1",
"if",
"not",
"self",
".",
"scheduler_after_batch",
"and",
"self",
".",
"scheduler",
"is",
"not",
"None",
":",
"tqdm",
".",
"tqdm",
".",
"write",
"(",
"f\"Step {self.scheduler.get_last_lr()} {self.scheduler.last_epoch}\"",
")",
"self",
".",
"scheduler",
".",
"step",
"(",
")",
"if",
"_clear_scheduler_after",
":",
"self",
".",
"set_scheduler",
"(",
"None",
")",
"self",
".",
"epoch",
"=",
"None",
"if",
"retain_best",
"is",
"not",
"None",
"and",
"validation_dataset",
"is",
"not",
"None",
":",
"tqdm",
".",
"tqdm",
".",
"write",
"(",
"\"Loading best model...\"",
")",
"self",
".",
"load_best",
"(",
"best_model",
")",
"return",
"DataFrame",
"(",
"train_log",
")",
",",
"DataFrame",
"(",
"validation_log",
")"
] | [
425,
4
] | [
605,
62
] | python | en | ['en', 'error', 'th'] | False |
StandardClassification.fit | (self, training_dataset, epochs=1, validation_dataset=None, step_callback=None, epoch_callback=None,
batch_size=8, warmup_frac=0.2, retain_best='loss', balance_method=None, **loader_kwargs) |
sklearn/keras-like convenience method to simply proceed with training across multiple epochs of the provided
dataset
Parameters
----------
training_dataset : DN3ataset, DataLoader
validation_dataset : DN3ataset, DataLoader
epochs : int
step_callback : callable
Function to run after every training step that has signature: fn(train_metrics) -> None
epoch_callback : callable
Function to run after every epoch that has signature: fn(validation_metrics) -> None
batch_size : int
The batch_size to be used for the training and validation datasets. This is ignored if they are
provided as `DataLoader`.
warmup_frac : float
The fraction of iterations that will be spent *increasing* the learning rate under the default
1cycle policy (with cosine annealing). Value will be automatically clamped values between [0, 0.5]
retain_best : (str, None)
**If `validation_dataset` is provided**, which model weights to retain. If 'loss' (default), will
retain the model at the epoch with the lowest validation loss. If another string, will assume that
is the metric to monitor for the *highest score*. If None, the final model is used.
balance_method : (None, str)
If and how to balance training samples when training. `None` (default) will simply randomly
sample all training samples equally. 'undersample' will sample each class N_min times
where N_min is equal to the number of examples in the minority class. 'oversample' will sample
each class N_max times, where N_max is the number of the majority class.
loader_kwargs :
Any remaining keyword arguments will be passed as such to any DataLoaders that are automatically
constructed. If both training and validation datasets are provided as `DataLoaders`, this will be
ignored.
Notes
-----
If the datasets above are provided as DN3atasets, automatic optimizations are performed to speed up loading.
These include setting the number of workers = to the number of CPUs/system threads - 1, and pinning memory for
rapid CUDA transfer if leveraging the GPU. Unless you are very comfortable with PyTorch, it's probably better
to not provide your own DataLoader, and let this be done automatically.
Returns
-------
train_log : Dataframe
Metrics after each iteration of training as a pandas dataframe
validation_log : Dataframe
Validation metrics after each epoch of training as a pandas dataframe
|
sklearn/keras-like convenience method to simply proceed with training across multiple epochs of the provided
dataset | def fit(self, training_dataset, epochs=1, validation_dataset=None, step_callback=None, epoch_callback=None,
batch_size=8, warmup_frac=0.2, retain_best='loss', balance_method=None, **loader_kwargs):
"""
sklearn/keras-like convenience method to simply proceed with training across multiple epochs of the provided
dataset
Parameters
----------
training_dataset : DN3ataset, DataLoader
validation_dataset : DN3ataset, DataLoader
epochs : int
step_callback : callable
Function to run after every training step that has signature: fn(train_metrics) -> None
epoch_callback : callable
Function to run after every epoch that has signature: fn(validation_metrics) -> None
batch_size : int
The batch_size to be used for the training and validation datasets. This is ignored if they are
provided as `DataLoader`.
warmup_frac : float
The fraction of iterations that will be spent *increasing* the learning rate under the default
1cycle policy (with cosine annealing). Value will be automatically clamped values between [0, 0.5]
retain_best : (str, None)
**If `validation_dataset` is provided**, which model weights to retain. If 'loss' (default), will
retain the model at the epoch with the lowest validation loss. If another string, will assume that
is the metric to monitor for the *highest score*. If None, the final model is used.
balance_method : (None, str)
If and how to balance training samples when training. `None` (default) will simply randomly
sample all training samples equally. 'undersample' will sample each class N_min times
where N_min is equal to the number of examples in the minority class. 'oversample' will sample
each class N_max times, where N_max is the number of the majority class.
loader_kwargs :
Any remaining keyword arguments will be passed as such to any DataLoaders that are automatically
constructed. If both training and validation datasets are provided as `DataLoaders`, this will be
ignored.
Notes
-----
If the datasets above are provided as DN3atasets, automatic optimizations are performed to speed up loading.
These include setting the number of workers = to the number of CPUs/system threads - 1, and pinning memory for
rapid CUDA transfer if leveraging the GPU. Unless you are very comfortable with PyTorch, it's probably better
to not provide your own DataLoader, and let this be done automatically.
Returns
-------
train_log : Dataframe
Metrics after each iteration of training as a pandas dataframe
validation_log : Dataframe
Validation metrics after each epoch of training as a pandas dataframe
"""
return super(StandardClassification, self).fit(training_dataset, epochs=epochs, step_callback=step_callback,
epoch_callback=epoch_callback, batch_size=batch_size,
warmup_frac=warmup_frac, retain_best=retain_best,
validation_dataset=validation_dataset,
balance_method=balance_method,
**loader_kwargs) | [
"def",
"fit",
"(",
"self",
",",
"training_dataset",
",",
"epochs",
"=",
"1",
",",
"validation_dataset",
"=",
"None",
",",
"step_callback",
"=",
"None",
",",
"epoch_callback",
"=",
"None",
",",
"batch_size",
"=",
"8",
",",
"warmup_frac",
"=",
"0.2",
",",
"retain_best",
"=",
"'loss'",
",",
"balance_method",
"=",
"None",
",",
"*",
"*",
"loader_kwargs",
")",
":",
"return",
"super",
"(",
"StandardClassification",
",",
"self",
")",
".",
"fit",
"(",
"training_dataset",
",",
"epochs",
"=",
"epochs",
",",
"step_callback",
"=",
"step_callback",
",",
"epoch_callback",
"=",
"epoch_callback",
",",
"batch_size",
"=",
"batch_size",
",",
"warmup_frac",
"=",
"warmup_frac",
",",
"retain_best",
"=",
"retain_best",
",",
"validation_dataset",
"=",
"validation_dataset",
",",
"balance_method",
"=",
"balance_method",
",",
"*",
"*",
"loader_kwargs",
")"
] | [
655,
4
] | [
709,
71
] | python | en | ['en', 'error', 'th'] | False |
_conf_preprocess | (value) | Preprocess alternative configuration formats. | Preprocess alternative configuration formats. | def _conf_preprocess(value):
"""Preprocess alternative configuration formats."""
if not isinstance(value, dict):
value = {CONF_ENTITIES: value}
return value | [
"def",
"_conf_preprocess",
"(",
"value",
")",
":",
"if",
"not",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"value",
"=",
"{",
"CONF_ENTITIES",
":",
"value",
"}",
"return",
"value"
] | [
65,
0
] | [
70,
16
] | python | en | ['en', 'it', 'en'] | True |
is_on | (hass, entity_id) | Test if the group state is in its ON-state. | Test if the group state is in its ON-state. | def is_on(hass, entity_id):
"""Test if the group state is in its ON-state."""
if REG_KEY not in hass.data:
# Integration not setup yet, it cannot be on
return False
state = hass.states.get(entity_id)
if state is not None:
return state.state in hass.data[REG_KEY].on_off_mapping
return False | [
"def",
"is_on",
"(",
"hass",
",",
"entity_id",
")",
":",
"if",
"REG_KEY",
"not",
"in",
"hass",
".",
"data",
":",
"# Integration not setup yet, it cannot be on",
"return",
"False",
"state",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"entity_id",
")",
"if",
"state",
"is",
"not",
"None",
":",
"return",
"state",
".",
"state",
"in",
"hass",
".",
"data",
"[",
"REG_KEY",
"]",
".",
"on_off_mapping",
"return",
"False"
] | [
115,
0
] | [
126,
16
] | python | en | ['en', 'en', 'en'] | True |
expand_entity_ids | (hass: HomeAssistantType, entity_ids: Iterable[Any]) | Return entity_ids with group entity ids replaced by their members.
Async friendly.
| Return entity_ids with group entity ids replaced by their members. | def expand_entity_ids(hass: HomeAssistantType, entity_ids: Iterable[Any]) -> List[str]:
"""Return entity_ids with group entity ids replaced by their members.
Async friendly.
"""
found_ids: List[str] = []
for entity_id in entity_ids:
if not isinstance(entity_id, str) or entity_id in (
ENTITY_MATCH_NONE,
ENTITY_MATCH_ALL,
):
continue
entity_id = entity_id.lower()
try:
# If entity_id points at a group, expand it
domain, _ = ha.split_entity_id(entity_id)
if domain == DOMAIN:
child_entities = get_entity_ids(hass, entity_id)
if entity_id in child_entities:
child_entities = list(child_entities)
child_entities.remove(entity_id)
found_ids.extend(
ent_id
for ent_id in expand_entity_ids(hass, child_entities)
if ent_id not in found_ids
)
else:
if entity_id not in found_ids:
found_ids.append(entity_id)
except AttributeError:
# Raised by split_entity_id if entity_id is not a string
pass
return found_ids | [
"def",
"expand_entity_ids",
"(",
"hass",
":",
"HomeAssistantType",
",",
"entity_ids",
":",
"Iterable",
"[",
"Any",
"]",
")",
"->",
"List",
"[",
"str",
"]",
":",
"found_ids",
":",
"List",
"[",
"str",
"]",
"=",
"[",
"]",
"for",
"entity_id",
"in",
"entity_ids",
":",
"if",
"not",
"isinstance",
"(",
"entity_id",
",",
"str",
")",
"or",
"entity_id",
"in",
"(",
"ENTITY_MATCH_NONE",
",",
"ENTITY_MATCH_ALL",
",",
")",
":",
"continue",
"entity_id",
"=",
"entity_id",
".",
"lower",
"(",
")",
"try",
":",
"# If entity_id points at a group, expand it",
"domain",
",",
"_",
"=",
"ha",
".",
"split_entity_id",
"(",
"entity_id",
")",
"if",
"domain",
"==",
"DOMAIN",
":",
"child_entities",
"=",
"get_entity_ids",
"(",
"hass",
",",
"entity_id",
")",
"if",
"entity_id",
"in",
"child_entities",
":",
"child_entities",
"=",
"list",
"(",
"child_entities",
")",
"child_entities",
".",
"remove",
"(",
"entity_id",
")",
"found_ids",
".",
"extend",
"(",
"ent_id",
"for",
"ent_id",
"in",
"expand_entity_ids",
"(",
"hass",
",",
"child_entities",
")",
"if",
"ent_id",
"not",
"in",
"found_ids",
")",
"else",
":",
"if",
"entity_id",
"not",
"in",
"found_ids",
":",
"found_ids",
".",
"append",
"(",
"entity_id",
")",
"except",
"AttributeError",
":",
"# Raised by split_entity_id if entity_id is not a string",
"pass",
"return",
"found_ids"
] | [
130,
0
] | [
168,
20
] | python | en | ['en', 'en', 'en'] | True |
get_entity_ids | (
hass: HomeAssistantType, entity_id: str, domain_filter: Optional[str] = None
) | Get members of this group.
Async friendly.
| Get members of this group. | def get_entity_ids(
hass: HomeAssistantType, entity_id: str, domain_filter: Optional[str] = None
) -> List[str]:
"""Get members of this group.
Async friendly.
"""
group = hass.states.get(entity_id)
if not group or ATTR_ENTITY_ID not in group.attributes:
return []
entity_ids = group.attributes[ATTR_ENTITY_ID]
if not domain_filter:
return cast(List[str], entity_ids)
domain_filter = f"{domain_filter.lower()}."
return [ent_id for ent_id in entity_ids if ent_id.startswith(domain_filter)] | [
"def",
"get_entity_ids",
"(",
"hass",
":",
"HomeAssistantType",
",",
"entity_id",
":",
"str",
",",
"domain_filter",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"str",
"]",
":",
"group",
"=",
"hass",
".",
"states",
".",
"get",
"(",
"entity_id",
")",
"if",
"not",
"group",
"or",
"ATTR_ENTITY_ID",
"not",
"in",
"group",
".",
"attributes",
":",
"return",
"[",
"]",
"entity_ids",
"=",
"group",
".",
"attributes",
"[",
"ATTR_ENTITY_ID",
"]",
"if",
"not",
"domain_filter",
":",
"return",
"cast",
"(",
"List",
"[",
"str",
"]",
",",
"entity_ids",
")",
"domain_filter",
"=",
"f\"{domain_filter.lower()}.\"",
"return",
"[",
"ent_id",
"for",
"ent_id",
"in",
"entity_ids",
"if",
"ent_id",
".",
"startswith",
"(",
"domain_filter",
")",
"]"
] | [
172,
0
] | [
190,
80
] | python | en | ['en', 'en', 'en'] | True |
groups_with_entity | (hass: HomeAssistantType, entity_id: str) | Get all groups that contain this entity.
Async friendly.
| Get all groups that contain this entity. | def groups_with_entity(hass: HomeAssistantType, entity_id: str) -> List[str]:
"""Get all groups that contain this entity.
Async friendly.
"""
if DOMAIN not in hass.data:
return []
groups = []
for group in hass.data[DOMAIN].entities:
if entity_id in group.tracking:
groups.append(group.entity_id)
return groups | [
"def",
"groups_with_entity",
"(",
"hass",
":",
"HomeAssistantType",
",",
"entity_id",
":",
"str",
")",
"->",
"List",
"[",
"str",
"]",
":",
"if",
"DOMAIN",
"not",
"in",
"hass",
".",
"data",
":",
"return",
"[",
"]",
"groups",
"=",
"[",
"]",
"for",
"group",
"in",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
".",
"entities",
":",
"if",
"entity_id",
"in",
"group",
".",
"tracking",
":",
"groups",
".",
"append",
"(",
"group",
".",
"entity_id",
")",
"return",
"groups"
] | [
194,
0
] | [
208,
17
] | python | en | ['en', 'en', 'en'] | True |
async_setup | (hass, config) | Set up all groups found defined in the configuration. | Set up all groups found defined in the configuration. | async def async_setup(hass, config):
"""Set up all groups found defined in the configuration."""
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
hass.data[REG_KEY] = GroupIntegrationRegistry()
await async_process_integration_platforms(hass, DOMAIN, _process_group_platform)
await _async_process_config(hass, config, component)
async def reload_service_handler(service):
"""Remove all user-defined groups and load new ones from config."""
auto = list(filter(lambda e: not e.user_defined, component.entities))
conf = await component.async_prepare_reload()
if conf is None:
return
await _async_process_config(hass, conf, component)
await component.async_add_entities(auto)
await async_reload_integration_platforms(hass, DOMAIN, PLATFORMS)
hass.services.async_register(
DOMAIN, SERVICE_RELOAD, reload_service_handler, schema=vol.Schema({})
)
service_lock = asyncio.Lock()
async def locked_service_handler(service):
"""Handle a service with an async lock."""
async with service_lock:
await groups_service_handler(service)
async def groups_service_handler(service):
"""Handle dynamic group service functions."""
object_id = service.data[ATTR_OBJECT_ID]
entity_id = f"{DOMAIN}.{object_id}"
group = component.get_entity(entity_id)
# new group
if service.service == SERVICE_SET and group is None:
entity_ids = (
service.data.get(ATTR_ENTITIES)
or service.data.get(ATTR_ADD_ENTITIES)
or None
)
extra_arg = {
attr: service.data[attr]
for attr in (ATTR_ICON,)
if service.data.get(attr) is not None
}
await Group.async_create_group(
hass,
service.data.get(ATTR_NAME, object_id),
object_id=object_id,
entity_ids=entity_ids,
user_defined=False,
mode=service.data.get(ATTR_ALL),
**extra_arg,
)
return
if group is None:
_LOGGER.warning("%s:Group '%s' doesn't exist!", service.service, object_id)
return
# update group
if service.service == SERVICE_SET:
need_update = False
if ATTR_ADD_ENTITIES in service.data:
delta = service.data[ATTR_ADD_ENTITIES]
entity_ids = set(group.tracking) | set(delta)
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_ENTITIES in service.data:
entity_ids = service.data[ATTR_ENTITIES]
await group.async_update_tracked_entity_ids(entity_ids)
if ATTR_NAME in service.data:
group.name = service.data[ATTR_NAME]
need_update = True
if ATTR_ICON in service.data:
group.icon = service.data[ATTR_ICON]
need_update = True
if ATTR_ALL in service.data:
group.mode = all if service.data[ATTR_ALL] else any
need_update = True
if need_update:
group.async_write_ha_state()
return
# remove group
if service.service == SERVICE_REMOVE:
await component.async_remove_entity(entity_id)
hass.services.async_register(
DOMAIN,
SERVICE_SET,
locked_service_handler,
schema=vol.All(
vol.Schema(
{
vol.Required(ATTR_OBJECT_ID): cv.slug,
vol.Optional(ATTR_NAME): cv.string,
vol.Optional(ATTR_ICON): cv.string,
vol.Optional(ATTR_ALL): cv.boolean,
vol.Exclusive(ATTR_ENTITIES, "entities"): cv.entity_ids,
vol.Exclusive(ATTR_ADD_ENTITIES, "entities"): cv.entity_ids,
}
)
),
)
hass.services.async_register(
DOMAIN,
SERVICE_REMOVE,
groups_service_handler,
schema=vol.Schema({vol.Required(ATTR_OBJECT_ID): cv.slug}),
)
return True | [
"async",
"def",
"async_setup",
"(",
"hass",
",",
"config",
")",
":",
"component",
"=",
"hass",
".",
"data",
".",
"get",
"(",
"DOMAIN",
")",
"if",
"component",
"is",
"None",
":",
"component",
"=",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"=",
"EntityComponent",
"(",
"_LOGGER",
",",
"DOMAIN",
",",
"hass",
")",
"hass",
".",
"data",
"[",
"REG_KEY",
"]",
"=",
"GroupIntegrationRegistry",
"(",
")",
"await",
"async_process_integration_platforms",
"(",
"hass",
",",
"DOMAIN",
",",
"_process_group_platform",
")",
"await",
"_async_process_config",
"(",
"hass",
",",
"config",
",",
"component",
")",
"async",
"def",
"reload_service_handler",
"(",
"service",
")",
":",
"\"\"\"Remove all user-defined groups and load new ones from config.\"\"\"",
"auto",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"e",
":",
"not",
"e",
".",
"user_defined",
",",
"component",
".",
"entities",
")",
")",
"conf",
"=",
"await",
"component",
".",
"async_prepare_reload",
"(",
")",
"if",
"conf",
"is",
"None",
":",
"return",
"await",
"_async_process_config",
"(",
"hass",
",",
"conf",
",",
"component",
")",
"await",
"component",
".",
"async_add_entities",
"(",
"auto",
")",
"await",
"async_reload_integration_platforms",
"(",
"hass",
",",
"DOMAIN",
",",
"PLATFORMS",
")",
"hass",
".",
"services",
".",
"async_register",
"(",
"DOMAIN",
",",
"SERVICE_RELOAD",
",",
"reload_service_handler",
",",
"schema",
"=",
"vol",
".",
"Schema",
"(",
"{",
"}",
")",
")",
"service_lock",
"=",
"asyncio",
".",
"Lock",
"(",
")",
"async",
"def",
"locked_service_handler",
"(",
"service",
")",
":",
"\"\"\"Handle a service with an async lock.\"\"\"",
"async",
"with",
"service_lock",
":",
"await",
"groups_service_handler",
"(",
"service",
")",
"async",
"def",
"groups_service_handler",
"(",
"service",
")",
":",
"\"\"\"Handle dynamic group service functions.\"\"\"",
"object_id",
"=",
"service",
".",
"data",
"[",
"ATTR_OBJECT_ID",
"]",
"entity_id",
"=",
"f\"{DOMAIN}.{object_id}\"",
"group",
"=",
"component",
".",
"get_entity",
"(",
"entity_id",
")",
"# new group",
"if",
"service",
".",
"service",
"==",
"SERVICE_SET",
"and",
"group",
"is",
"None",
":",
"entity_ids",
"=",
"(",
"service",
".",
"data",
".",
"get",
"(",
"ATTR_ENTITIES",
")",
"or",
"service",
".",
"data",
".",
"get",
"(",
"ATTR_ADD_ENTITIES",
")",
"or",
"None",
")",
"extra_arg",
"=",
"{",
"attr",
":",
"service",
".",
"data",
"[",
"attr",
"]",
"for",
"attr",
"in",
"(",
"ATTR_ICON",
",",
")",
"if",
"service",
".",
"data",
".",
"get",
"(",
"attr",
")",
"is",
"not",
"None",
"}",
"await",
"Group",
".",
"async_create_group",
"(",
"hass",
",",
"service",
".",
"data",
".",
"get",
"(",
"ATTR_NAME",
",",
"object_id",
")",
",",
"object_id",
"=",
"object_id",
",",
"entity_ids",
"=",
"entity_ids",
",",
"user_defined",
"=",
"False",
",",
"mode",
"=",
"service",
".",
"data",
".",
"get",
"(",
"ATTR_ALL",
")",
",",
"*",
"*",
"extra_arg",
",",
")",
"return",
"if",
"group",
"is",
"None",
":",
"_LOGGER",
".",
"warning",
"(",
"\"%s:Group '%s' doesn't exist!\"",
",",
"service",
".",
"service",
",",
"object_id",
")",
"return",
"# update group",
"if",
"service",
".",
"service",
"==",
"SERVICE_SET",
":",
"need_update",
"=",
"False",
"if",
"ATTR_ADD_ENTITIES",
"in",
"service",
".",
"data",
":",
"delta",
"=",
"service",
".",
"data",
"[",
"ATTR_ADD_ENTITIES",
"]",
"entity_ids",
"=",
"set",
"(",
"group",
".",
"tracking",
")",
"|",
"set",
"(",
"delta",
")",
"await",
"group",
".",
"async_update_tracked_entity_ids",
"(",
"entity_ids",
")",
"if",
"ATTR_ENTITIES",
"in",
"service",
".",
"data",
":",
"entity_ids",
"=",
"service",
".",
"data",
"[",
"ATTR_ENTITIES",
"]",
"await",
"group",
".",
"async_update_tracked_entity_ids",
"(",
"entity_ids",
")",
"if",
"ATTR_NAME",
"in",
"service",
".",
"data",
":",
"group",
".",
"name",
"=",
"service",
".",
"data",
"[",
"ATTR_NAME",
"]",
"need_update",
"=",
"True",
"if",
"ATTR_ICON",
"in",
"service",
".",
"data",
":",
"group",
".",
"icon",
"=",
"service",
".",
"data",
"[",
"ATTR_ICON",
"]",
"need_update",
"=",
"True",
"if",
"ATTR_ALL",
"in",
"service",
".",
"data",
":",
"group",
".",
"mode",
"=",
"all",
"if",
"service",
".",
"data",
"[",
"ATTR_ALL",
"]",
"else",
"any",
"need_update",
"=",
"True",
"if",
"need_update",
":",
"group",
".",
"async_write_ha_state",
"(",
")",
"return",
"# remove group",
"if",
"service",
".",
"service",
"==",
"SERVICE_REMOVE",
":",
"await",
"component",
".",
"async_remove_entity",
"(",
"entity_id",
")",
"hass",
".",
"services",
".",
"async_register",
"(",
"DOMAIN",
",",
"SERVICE_SET",
",",
"locked_service_handler",
",",
"schema",
"=",
"vol",
".",
"All",
"(",
"vol",
".",
"Schema",
"(",
"{",
"vol",
".",
"Required",
"(",
"ATTR_OBJECT_ID",
")",
":",
"cv",
".",
"slug",
",",
"vol",
".",
"Optional",
"(",
"ATTR_NAME",
")",
":",
"cv",
".",
"string",
",",
"vol",
".",
"Optional",
"(",
"ATTR_ICON",
")",
":",
"cv",
".",
"string",
",",
"vol",
".",
"Optional",
"(",
"ATTR_ALL",
")",
":",
"cv",
".",
"boolean",
",",
"vol",
".",
"Exclusive",
"(",
"ATTR_ENTITIES",
",",
"\"entities\"",
")",
":",
"cv",
".",
"entity_ids",
",",
"vol",
".",
"Exclusive",
"(",
"ATTR_ADD_ENTITIES",
",",
"\"entities\"",
")",
":",
"cv",
".",
"entity_ids",
",",
"}",
")",
")",
",",
")",
"hass",
".",
"services",
".",
"async_register",
"(",
"DOMAIN",
",",
"SERVICE_REMOVE",
",",
"groups_service_handler",
",",
"schema",
"=",
"vol",
".",
"Schema",
"(",
"{",
"vol",
".",
"Required",
"(",
"ATTR_OBJECT_ID",
")",
":",
"cv",
".",
"slug",
"}",
")",
",",
")",
"return",
"True"
] | [
211,
0
] | [
342,
15
] | python | en | ['en', 'en', 'en'] | True |
_process_group_platform | (hass, domain, platform) | Process a group platform. | Process a group platform. | async def _process_group_platform(hass, domain, platform):
"""Process a group platform."""
current_domain.set(domain)
platform.async_describe_on_off_states(hass, hass.data[REG_KEY]) | [
"async",
"def",
"_process_group_platform",
"(",
"hass",
",",
"domain",
",",
"platform",
")",
":",
"current_domain",
".",
"set",
"(",
"domain",
")",
"platform",
".",
"async_describe_on_off_states",
"(",
"hass",
",",
"hass",
".",
"data",
"[",
"REG_KEY",
"]",
")"
] | [
345,
0
] | [
349,
67
] | python | en | ['en', 'lv', 'en'] | True |
_async_process_config | (hass, config, component) | Process group configuration. | Process group configuration. | async def _async_process_config(hass, config, component):
"""Process group configuration."""
hass.data.setdefault(GROUP_ORDER, 0)
tasks = []
for object_id, conf in config.get(DOMAIN, {}).items():
name = conf.get(CONF_NAME, object_id)
entity_ids = conf.get(CONF_ENTITIES) or []
icon = conf.get(CONF_ICON)
mode = conf.get(CONF_ALL)
# We keep track of the order when we are creating the tasks
# in the same way that async_create_group does to make
# sure we use the same ordering system. This overcomes
# the problem with concurrently creating the groups
tasks.append(
Group.async_create_group(
hass,
name,
entity_ids,
icon=icon,
object_id=object_id,
mode=mode,
order=hass.data[GROUP_ORDER],
)
)
# Keep track of the group order without iterating
# every state in the state machine every time
# we setup a new group
hass.data[GROUP_ORDER] += 1
await asyncio.gather(*tasks) | [
"async",
"def",
"_async_process_config",
"(",
"hass",
",",
"config",
",",
"component",
")",
":",
"hass",
".",
"data",
".",
"setdefault",
"(",
"GROUP_ORDER",
",",
"0",
")",
"tasks",
"=",
"[",
"]",
"for",
"object_id",
",",
"conf",
"in",
"config",
".",
"get",
"(",
"DOMAIN",
",",
"{",
"}",
")",
".",
"items",
"(",
")",
":",
"name",
"=",
"conf",
".",
"get",
"(",
"CONF_NAME",
",",
"object_id",
")",
"entity_ids",
"=",
"conf",
".",
"get",
"(",
"CONF_ENTITIES",
")",
"or",
"[",
"]",
"icon",
"=",
"conf",
".",
"get",
"(",
"CONF_ICON",
")",
"mode",
"=",
"conf",
".",
"get",
"(",
"CONF_ALL",
")",
"# We keep track of the order when we are creating the tasks",
"# in the same way that async_create_group does to make",
"# sure we use the same ordering system. This overcomes",
"# the problem with concurrently creating the groups",
"tasks",
".",
"append",
"(",
"Group",
".",
"async_create_group",
"(",
"hass",
",",
"name",
",",
"entity_ids",
",",
"icon",
"=",
"icon",
",",
"object_id",
"=",
"object_id",
",",
"mode",
"=",
"mode",
",",
"order",
"=",
"hass",
".",
"data",
"[",
"GROUP_ORDER",
"]",
",",
")",
")",
"# Keep track of the group order without iterating",
"# every state in the state machine every time",
"# we setup a new group",
"hass",
".",
"data",
"[",
"GROUP_ORDER",
"]",
"+=",
"1",
"await",
"asyncio",
".",
"gather",
"(",
"*",
"tasks",
")"
] | [
352,
0
] | [
385,
32
] | python | en | ['en', 'fr', 'en'] | True |
GroupIntegrationRegistry.exclude_domain | (self) | Exclude the current domain. | Exclude the current domain. | def exclude_domain(self) -> None:
"""Exclude the current domain."""
self.exclude_domains.add(current_domain.get()) | [
"def",
"exclude_domain",
"(",
"self",
")",
"->",
"None",
":",
"self",
".",
"exclude_domains",
".",
"add",
"(",
"current_domain",
".",
"get",
"(",
")",
")"
] | [
98,
4
] | [
100,
54
] | python | en | ['en', 'en', 'en'] | True |
GroupIntegrationRegistry.on_off_states | (self, on_states: Set, off_state: str) | Register on and off states for the current domain. | Register on and off states for the current domain. | def on_off_states(self, on_states: Set, off_state: str) -> None:
"""Register on and off states for the current domain."""
for on_state in on_states:
if on_state not in self.on_off_mapping:
self.on_off_mapping[on_state] = off_state
if len(on_states) == 1 and off_state not in self.off_on_mapping:
self.off_on_mapping[off_state] = list(on_states)[0]
self.on_states_by_domain[current_domain.get()] = set(on_states) | [
"def",
"on_off_states",
"(",
"self",
",",
"on_states",
":",
"Set",
",",
"off_state",
":",
"str",
")",
"->",
"None",
":",
"for",
"on_state",
"in",
"on_states",
":",
"if",
"on_state",
"not",
"in",
"self",
".",
"on_off_mapping",
":",
"self",
".",
"on_off_mapping",
"[",
"on_state",
"]",
"=",
"off_state",
"if",
"len",
"(",
"on_states",
")",
"==",
"1",
"and",
"off_state",
"not",
"in",
"self",
".",
"off_on_mapping",
":",
"self",
".",
"off_on_mapping",
"[",
"off_state",
"]",
"=",
"list",
"(",
"on_states",
")",
"[",
"0",
"]",
"self",
".",
"on_states_by_domain",
"[",
"current_domain",
".",
"get",
"(",
")",
"]",
"=",
"set",
"(",
"on_states",
")"
] | [
102,
4
] | [
111,
71
] | python | en | ['en', 'en', 'en'] | True |
GroupEntity.should_poll | (self) | Disable polling for group. | Disable polling for group. | def should_poll(self) -> bool:
"""Disable polling for group."""
return False | [
"def",
"should_poll",
"(",
"self",
")",
"->",
"bool",
":",
"return",
"False"
] | [
392,
4
] | [
394,
20
] | python | en | ['en', 'en', 'en'] | True |
GroupEntity.async_added_to_hass | (self) | Register listeners. | Register listeners. | async def async_added_to_hass(self) -> None:
"""Register listeners."""
assert self.hass is not None
async def _update_at_start(_):
await self.async_update()
self.async_write_ha_state()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _update_at_start) | [
"async",
"def",
"async_added_to_hass",
"(",
"self",
")",
"->",
"None",
":",
"assert",
"self",
".",
"hass",
"is",
"not",
"None",
"async",
"def",
"_update_at_start",
"(",
"_",
")",
":",
"await",
"self",
".",
"async_update",
"(",
")",
"self",
".",
"async_write_ha_state",
"(",
")",
"self",
".",
"hass",
".",
"bus",
".",
"async_listen_once",
"(",
"EVENT_HOMEASSISTANT_START",
",",
"_update_at_start",
")"
] | [
396,
4
] | [
404,
84
] | python | en | ['fr', 'no', 'en'] | False |
GroupEntity.async_defer_or_update_ha_state | (self) | Only update once at start. | Only update once at start. | async def async_defer_or_update_ha_state(self) -> None:
"""Only update once at start."""
assert self.hass is not None
if self.hass.state != CoreState.running:
return
await self.async_update()
self.async_write_ha_state() | [
"async",
"def",
"async_defer_or_update_ha_state",
"(",
"self",
")",
"->",
"None",
":",
"assert",
"self",
".",
"hass",
"is",
"not",
"None",
"if",
"self",
".",
"hass",
".",
"state",
"!=",
"CoreState",
".",
"running",
":",
"return",
"await",
"self",
".",
"async_update",
"(",
")",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
406,
4
] | [
414,
35
] | python | en | ['en', 'en', 'en'] | True |
GroupEntity.async_update | (self) | Abstract method to update the entity. | Abstract method to update the entity. | async def async_update(self) -> None:
"""Abstract method to update the entity.""" | [
"async",
"def",
"async_update",
"(",
"self",
")",
"->",
"None",
":"
] | [
417,
4
] | [
418,
51
] | python | en | ['en', 'en', 'en'] | True |
Group.__init__ | (
self,
hass,
name,
order=None,
icon=None,
user_defined=True,
entity_ids=None,
mode=None,
) | Initialize a group.
This Object has factory function for creation.
| Initialize a group. | def __init__(
self,
hass,
name,
order=None,
icon=None,
user_defined=True,
entity_ids=None,
mode=None,
):
"""Initialize a group.
This Object has factory function for creation.
"""
self.hass = hass
self._name = name
self._state = None
self._icon = icon
self._set_tracked(entity_ids)
self._on_off = None
self._assumed = None
self._on_states = None
self.user_defined = user_defined
self.mode = any
if mode:
self.mode = all
self._order = order
self._assumed_state = False
self._async_unsub_state_changed = None | [
"def",
"__init__",
"(",
"self",
",",
"hass",
",",
"name",
",",
"order",
"=",
"None",
",",
"icon",
"=",
"None",
",",
"user_defined",
"=",
"True",
",",
"entity_ids",
"=",
"None",
",",
"mode",
"=",
"None",
",",
")",
":",
"self",
".",
"hass",
"=",
"hass",
"self",
".",
"_name",
"=",
"name",
"self",
".",
"_state",
"=",
"None",
"self",
".",
"_icon",
"=",
"icon",
"self",
".",
"_set_tracked",
"(",
"entity_ids",
")",
"self",
".",
"_on_off",
"=",
"None",
"self",
".",
"_assumed",
"=",
"None",
"self",
".",
"_on_states",
"=",
"None",
"self",
".",
"user_defined",
"=",
"user_defined",
"self",
".",
"mode",
"=",
"any",
"if",
"mode",
":",
"self",
".",
"mode",
"=",
"all",
"self",
".",
"_order",
"=",
"order",
"self",
".",
"_assumed_state",
"=",
"False",
"self",
".",
"_async_unsub_state_changed",
"=",
"None"
] | [
424,
4
] | [
452,
46
] | python | en | ['en', 'en', 'en'] | True |
Group.create_group | (
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
order=None,
) | Initialize a group. | Initialize a group. | def create_group(
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
order=None,
):
"""Initialize a group."""
return asyncio.run_coroutine_threadsafe(
Group.async_create_group(
hass, name, entity_ids, user_defined, icon, object_id, mode, order
),
hass.loop,
).result() | [
"def",
"create_group",
"(",
"hass",
",",
"name",
",",
"entity_ids",
"=",
"None",
",",
"user_defined",
"=",
"True",
",",
"icon",
"=",
"None",
",",
"object_id",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"order",
"=",
"None",
",",
")",
":",
"return",
"asyncio",
".",
"run_coroutine_threadsafe",
"(",
"Group",
".",
"async_create_group",
"(",
"hass",
",",
"name",
",",
"entity_ids",
",",
"user_defined",
",",
"icon",
",",
"object_id",
",",
"mode",
",",
"order",
")",
",",
"hass",
".",
"loop",
",",
")",
".",
"result",
"(",
")"
] | [
455,
4
] | [
471,
18
] | python | en | ['en', 'en', 'en'] | True |
Group.async_create_group | (
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
order=None,
) | Initialize a group.
This method must be run in the event loop.
| Initialize a group. | async def async_create_group(
hass,
name,
entity_ids=None,
user_defined=True,
icon=None,
object_id=None,
mode=None,
order=None,
):
"""Initialize a group.
This method must be run in the event loop.
"""
if order is None:
hass.data.setdefault(GROUP_ORDER, 0)
order = hass.data[GROUP_ORDER]
# Keep track of the group order without iterating
# every state in the state machine every time
# we setup a new group
hass.data[GROUP_ORDER] += 1
group = Group(
hass,
name,
order=order,
icon=icon,
user_defined=user_defined,
entity_ids=entity_ids,
mode=mode,
)
group.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, object_id or name, hass=hass
)
# If called before the platform async_setup is called (test cases)
component = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_add_entities([group])
return group | [
"async",
"def",
"async_create_group",
"(",
"hass",
",",
"name",
",",
"entity_ids",
"=",
"None",
",",
"user_defined",
"=",
"True",
",",
"icon",
"=",
"None",
",",
"object_id",
"=",
"None",
",",
"mode",
"=",
"None",
",",
"order",
"=",
"None",
",",
")",
":",
"if",
"order",
"is",
"None",
":",
"hass",
".",
"data",
".",
"setdefault",
"(",
"GROUP_ORDER",
",",
"0",
")",
"order",
"=",
"hass",
".",
"data",
"[",
"GROUP_ORDER",
"]",
"# Keep track of the group order without iterating",
"# every state in the state machine every time",
"# we setup a new group",
"hass",
".",
"data",
"[",
"GROUP_ORDER",
"]",
"+=",
"1",
"group",
"=",
"Group",
"(",
"hass",
",",
"name",
",",
"order",
"=",
"order",
",",
"icon",
"=",
"icon",
",",
"user_defined",
"=",
"user_defined",
",",
"entity_ids",
"=",
"entity_ids",
",",
"mode",
"=",
"mode",
",",
")",
"group",
".",
"entity_id",
"=",
"async_generate_entity_id",
"(",
"ENTITY_ID_FORMAT",
",",
"object_id",
"or",
"name",
",",
"hass",
"=",
"hass",
")",
"# If called before the platform async_setup is called (test cases)",
"component",
"=",
"hass",
".",
"data",
".",
"get",
"(",
"DOMAIN",
")",
"if",
"component",
"is",
"None",
":",
"component",
"=",
"hass",
".",
"data",
"[",
"DOMAIN",
"]",
"=",
"EntityComponent",
"(",
"_LOGGER",
",",
"DOMAIN",
",",
"hass",
")",
"await",
"component",
".",
"async_add_entities",
"(",
"[",
"group",
"]",
")",
"return",
"group"
] | [
474,
4
] | [
518,
20
] | python | en | ['en', 'en', 'en'] | True |
Group.should_poll | (self) | No need to poll because groups will update themselves. | No need to poll because groups will update themselves. | def should_poll(self):
"""No need to poll because groups will update themselves."""
return False | [
"def",
"should_poll",
"(",
"self",
")",
":",
"return",
"False"
] | [
521,
4
] | [
523,
20
] | python | en | ['en', 'en', 'en'] | True |
Group.name | (self) | Return the name of the group. | Return the name of the group. | def name(self):
"""Return the name of the group."""
return self._name | [
"def",
"name",
"(",
"self",
")",
":",
"return",
"self",
".",
"_name"
] | [
526,
4
] | [
528,
25
] | python | en | ['en', 'en', 'en'] | True |
Group.name | (self, value) | Set Group name. | Set Group name. | def name(self, value):
"""Set Group name."""
self._name = value | [
"def",
"name",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_name",
"=",
"value"
] | [
531,
4
] | [
533,
26
] | python | en | ['en', 'ceb', 'en'] | True |
Group.state | (self) | Return the state of the group. | Return the state of the group. | def state(self):
"""Return the state of the group."""
return self._state | [
"def",
"state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_state"
] | [
536,
4
] | [
538,
26
] | python | en | ['en', 'en', 'en'] | True |
Group.icon | (self) | Return the icon of the group. | Return the icon of the group. | def icon(self):
"""Return the icon of the group."""
return self._icon | [
"def",
"icon",
"(",
"self",
")",
":",
"return",
"self",
".",
"_icon"
] | [
541,
4
] | [
543,
25
] | python | en | ['en', 'en', 'en'] | True |
Group.icon | (self, value) | Set Icon for group. | Set Icon for group. | def icon(self, value):
"""Set Icon for group."""
self._icon = value | [
"def",
"icon",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_icon",
"=",
"value"
] | [
546,
4
] | [
548,
26
] | python | en | ['en', 'en', 'en'] | True |
Group.state_attributes | (self) | Return the state attributes for the group. | Return the state attributes for the group. | def state_attributes(self):
"""Return the state attributes for the group."""
data = {ATTR_ENTITY_ID: self.tracking, ATTR_ORDER: self._order}
if not self.user_defined:
data[ATTR_AUTO] = True
return data | [
"def",
"state_attributes",
"(",
"self",
")",
":",
"data",
"=",
"{",
"ATTR_ENTITY_ID",
":",
"self",
".",
"tracking",
",",
"ATTR_ORDER",
":",
"self",
".",
"_order",
"}",
"if",
"not",
"self",
".",
"user_defined",
":",
"data",
"[",
"ATTR_AUTO",
"]",
"=",
"True",
"return",
"data"
] | [
551,
4
] | [
557,
19
] | python | en | ['en', 'en', 'en'] | True |
Group.assumed_state | (self) | Test if any member has an assumed state. | Test if any member has an assumed state. | def assumed_state(self):
"""Test if any member has an assumed state."""
return self._assumed_state | [
"def",
"assumed_state",
"(",
"self",
")",
":",
"return",
"self",
".",
"_assumed_state"
] | [
560,
4
] | [
562,
34
] | python | en | ['en', 'en', 'en'] | True |
Group.update_tracked_entity_ids | (self, entity_ids) | Update the member entity IDs. | Update the member entity IDs. | def update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs."""
asyncio.run_coroutine_threadsafe(
self.async_update_tracked_entity_ids(entity_ids), self.hass.loop
).result() | [
"def",
"update_tracked_entity_ids",
"(",
"self",
",",
"entity_ids",
")",
":",
"asyncio",
".",
"run_coroutine_threadsafe",
"(",
"self",
".",
"async_update_tracked_entity_ids",
"(",
"entity_ids",
")",
",",
"self",
".",
"hass",
".",
"loop",
")",
".",
"result",
"(",
")"
] | [
564,
4
] | [
568,
18
] | python | en | ['en', 'en', 'en'] | True |
Group.async_update_tracked_entity_ids | (self, entity_ids) | Update the member entity IDs.
This method must be run in the event loop.
| Update the member entity IDs. | async def async_update_tracked_entity_ids(self, entity_ids):
"""Update the member entity IDs.
This method must be run in the event loop.
"""
self._async_stop()
self._set_tracked(entity_ids)
self._reset_tracked_state()
self._async_start() | [
"async",
"def",
"async_update_tracked_entity_ids",
"(",
"self",
",",
"entity_ids",
")",
":",
"self",
".",
"_async_stop",
"(",
")",
"self",
".",
"_set_tracked",
"(",
"entity_ids",
")",
"self",
".",
"_reset_tracked_state",
"(",
")",
"self",
".",
"_async_start",
"(",
")"
] | [
570,
4
] | [
578,
27
] | python | en | ['en', 'en', 'en'] | True |
Group._set_tracked | (self, entity_ids) | Tuple of entities to be tracked. | Tuple of entities to be tracked. | def _set_tracked(self, entity_ids):
"""Tuple of entities to be tracked."""
# tracking are the entities we want to track
# trackable are the entities we actually watch
if not entity_ids:
self.tracking = ()
self.trackable = ()
return
excluded_domains = self.hass.data[REG_KEY].exclude_domains
tracking = []
trackable = []
for ent_id in entity_ids:
ent_id_lower = ent_id.lower()
domain = split_entity_id(ent_id_lower)[0]
tracking.append(ent_id_lower)
if domain not in excluded_domains:
trackable.append(ent_id_lower)
self.trackable = tuple(trackable)
self.tracking = tuple(tracking) | [
"def",
"_set_tracked",
"(",
"self",
",",
"entity_ids",
")",
":",
"# tracking are the entities we want to track",
"# trackable are the entities we actually watch",
"if",
"not",
"entity_ids",
":",
"self",
".",
"tracking",
"=",
"(",
")",
"self",
".",
"trackable",
"=",
"(",
")",
"return",
"excluded_domains",
"=",
"self",
".",
"hass",
".",
"data",
"[",
"REG_KEY",
"]",
".",
"exclude_domains",
"tracking",
"=",
"[",
"]",
"trackable",
"=",
"[",
"]",
"for",
"ent_id",
"in",
"entity_ids",
":",
"ent_id_lower",
"=",
"ent_id",
".",
"lower",
"(",
")",
"domain",
"=",
"split_entity_id",
"(",
"ent_id_lower",
")",
"[",
"0",
"]",
"tracking",
".",
"append",
"(",
"ent_id_lower",
")",
"if",
"domain",
"not",
"in",
"excluded_domains",
":",
"trackable",
".",
"append",
"(",
"ent_id_lower",
")",
"self",
".",
"trackable",
"=",
"tuple",
"(",
"trackable",
")",
"self",
".",
"tracking",
"=",
"tuple",
"(",
"tracking",
")"
] | [
580,
4
] | [
602,
39
] | python | en | ['en', 'en', 'en'] | True |
Group._async_start | (self, *_) | Start tracking members and write state. | Start tracking members and write state. | def _async_start(self, *_):
"""Start tracking members and write state."""
self._reset_tracked_state()
self._async_start_tracking()
self.async_write_ha_state() | [
"def",
"_async_start",
"(",
"self",
",",
"*",
"_",
")",
":",
"self",
".",
"_reset_tracked_state",
"(",
")",
"self",
".",
"_async_start_tracking",
"(",
")",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
605,
4
] | [
609,
35
] | python | en | ['en', 'en', 'en'] | True |
Group._async_start_tracking | (self) | Start tracking members.
This method must be run in the event loop.
| Start tracking members. | def _async_start_tracking(self):
"""Start tracking members.
This method must be run in the event loop.
"""
if self.trackable and self._async_unsub_state_changed is None:
self._async_unsub_state_changed = async_track_state_change_event(
self.hass, self.trackable, self._async_state_changed_listener
)
self._async_update_group_state() | [
"def",
"_async_start_tracking",
"(",
"self",
")",
":",
"if",
"self",
".",
"trackable",
"and",
"self",
".",
"_async_unsub_state_changed",
"is",
"None",
":",
"self",
".",
"_async_unsub_state_changed",
"=",
"async_track_state_change_event",
"(",
"self",
".",
"hass",
",",
"self",
".",
"trackable",
",",
"self",
".",
"_async_state_changed_listener",
")",
"self",
".",
"_async_update_group_state",
"(",
")"
] | [
612,
4
] | [
622,
40
] | python | en | ['en', 'sn', 'en'] | True |
Group._async_stop | (self) | Unregister the group from Home Assistant.
This method must be run in the event loop.
| Unregister the group from Home Assistant. | def _async_stop(self):
"""Unregister the group from Home Assistant.
This method must be run in the event loop.
"""
if self._async_unsub_state_changed:
self._async_unsub_state_changed()
self._async_unsub_state_changed = None | [
"def",
"_async_stop",
"(",
"self",
")",
":",
"if",
"self",
".",
"_async_unsub_state_changed",
":",
"self",
".",
"_async_unsub_state_changed",
"(",
")",
"self",
".",
"_async_unsub_state_changed",
"=",
"None"
] | [
625,
4
] | [
632,
50
] | python | en | ['en', 'en', 'en'] | True |
Group.async_update | (self) | Query all members and determine current group state. | Query all members and determine current group state. | async def async_update(self):
"""Query all members and determine current group state."""
self._state = None
self._async_update_group_state() | [
"async",
"def",
"async_update",
"(",
"self",
")",
":",
"self",
".",
"_state",
"=",
"None",
"self",
".",
"_async_update_group_state",
"(",
")"
] | [
634,
4
] | [
637,
40
] | python | en | ['en', 'en', 'en'] | True |
Group.async_added_to_hass | (self) | Handle addition to Home Assistant. | Handle addition to Home Assistant. | async def async_added_to_hass(self):
"""Handle addition to Home Assistant."""
if self.hass.state != CoreState.running:
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, self._async_start
)
return
if self.tracking:
self._reset_tracked_state()
self._async_start_tracking() | [
"async",
"def",
"async_added_to_hass",
"(",
"self",
")",
":",
"if",
"self",
".",
"hass",
".",
"state",
"!=",
"CoreState",
".",
"running",
":",
"self",
".",
"hass",
".",
"bus",
".",
"async_listen_once",
"(",
"EVENT_HOMEASSISTANT_START",
",",
"self",
".",
"_async_start",
")",
"return",
"if",
"self",
".",
"tracking",
":",
"self",
".",
"_reset_tracked_state",
"(",
")",
"self",
".",
"_async_start_tracking",
"(",
")"
] | [
639,
4
] | [
649,
36
] | python | en | ['en', 'en', 'en'] | True |
Group.async_will_remove_from_hass | (self) | Handle removal from Home Assistant. | Handle removal from Home Assistant. | async def async_will_remove_from_hass(self):
"""Handle removal from Home Assistant."""
self._async_stop() | [
"async",
"def",
"async_will_remove_from_hass",
"(",
"self",
")",
":",
"self",
".",
"_async_stop",
"(",
")"
] | [
651,
4
] | [
653,
26
] | python | en | ['en', 'en', 'en'] | True |
Group._async_state_changed_listener | (self, event) | Respond to a member state changing.
This method must be run in the event loop.
| Respond to a member state changing. | async def _async_state_changed_listener(self, event):
"""Respond to a member state changing.
This method must be run in the event loop.
"""
# removed
if self._async_unsub_state_changed is None:
return
self.async_set_context(event.context)
new_state = event.data.get("new_state")
if new_state is None:
# The state was removed from the state machine
self._reset_tracked_state()
self._async_update_group_state(new_state)
self.async_write_ha_state() | [
"async",
"def",
"_async_state_changed_listener",
"(",
"self",
",",
"event",
")",
":",
"# removed",
"if",
"self",
".",
"_async_unsub_state_changed",
"is",
"None",
":",
"return",
"self",
".",
"async_set_context",
"(",
"event",
".",
"context",
")",
"new_state",
"=",
"event",
".",
"data",
".",
"get",
"(",
"\"new_state\"",
")",
"if",
"new_state",
"is",
"None",
":",
"# The state was removed from the state machine",
"self",
".",
"_reset_tracked_state",
"(",
")",
"self",
".",
"_async_update_group_state",
"(",
"new_state",
")",
"self",
".",
"async_write_ha_state",
"(",
")"
] | [
655,
4
] | [
672,
35
] | python | en | ['en', 'en', 'en'] | True |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.