Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
get_m4s
(segment: io.BytesIO, sequence: int)
Get m4s section from fragmented mp4.
Get m4s section from fragmented mp4.
def get_m4s(segment: io.BytesIO, sequence: int) -> bytes: """Get m4s section from fragmented mp4.""" moof_location = next(find_box(segment, b"moof")) mfra_location = next(find_box(segment, b"mfra")) segment.seek(moof_location) return segment.read(mfra_location - moof_location)
[ "def", "get_m4s", "(", "segment", ":", "io", ".", "BytesIO", ",", "sequence", ":", "int", ")", "->", "bytes", ":", "moof_location", "=", "next", "(", "find_box", "(", "segment", ",", "b\"moof\"", ")", ")", "mfra_location", "=", "next", "(", "find_box", "(", "segment", ",", "b\"mfra\"", ")", ")", "segment", ".", "seek", "(", "moof_location", ")", "return", "segment", ".", "read", "(", "mfra_location", "-", "moof_location", ")" ]
[ 32, 0 ]
[ 37, 54 ]
python
en
['en', 'en', 'en']
True
get_codec_string
(segment: io.BytesIO)
Get RFC 6381 codec string.
Get RFC 6381 codec string.
def get_codec_string(segment: io.BytesIO) -> str: """Get RFC 6381 codec string.""" codecs = [] # Find moov moov_location = next(find_box(segment, b"moov")) # Find tracks for trak_location in find_box(segment, b"trak", moov_location): # Drill down to media info mdia_location = next(find_box(segment, b"mdia", trak_location)) minf_location = next(find_box(segment, b"minf", mdia_location)) stbl_location = next(find_box(segment, b"stbl", minf_location)) stsd_location = next(find_box(segment, b"stsd", stbl_location)) # Get stsd box segment.seek(stsd_location) stsd_length = int.from_bytes(segment.read(4), byteorder="big") segment.seek(stsd_location) stsd_box = segment.read(stsd_length) # Base Codec codec = stsd_box[20:24].decode("utf-8") # Handle H264 if ( codec in ("avc1", "avc2", "avc3", "avc4") and stsd_length > 110 and stsd_box[106:110] == b"avcC" ): profile = stsd_box[111:112].hex() compatibility = stsd_box[112:113].hex() # Cap level at 4.1 for compatibility with some Google Cast devices level = hex(min(stsd_box[113], 41))[2:] codec += "." + profile + compatibility + level # Handle H265 elif ( codec in ("hev1", "hvc1") and stsd_length > 110 and stsd_box[106:110] == b"hvcC" ): tmp_byte = int.from_bytes(stsd_box[111:112], byteorder="big") # Profile Space codec += "." profile_space_map = {0: "", 1: "A", 2: "B", 3: "C"} profile_space = tmp_byte >> 6 codec += profile_space_map[profile_space] general_profile_idc = tmp_byte & 31 codec += str(general_profile_idc) # Compatibility codec += "." general_profile_compatibility = int.from_bytes( stsd_box[112:116], byteorder="big" ) reverse = 0 for i in range(0, 32): reverse |= general_profile_compatibility & 1 if i == 31: break reverse <<= 1 general_profile_compatibility >>= 1 codec += hex(reverse)[2:] # Tier Flag if (tmp_byte & 32) >> 5 == 0: codec += ".L" else: codec += ".H" codec += str(int.from_bytes(stsd_box[122:123], byteorder="big")) # Constraint String has_byte = False constraint_string = "" for i in range(121, 115, -1): gci = int.from_bytes(stsd_box[i : i + 1], byteorder="big") if gci or has_byte: constraint_string = "." + hex(gci)[2:] + constraint_string has_byte = True codec += constraint_string # Handle Audio elif codec == "mp4a": oti = None dsi = None # Parse ES Descriptors oti_loc = stsd_box.find(b"\x04\x80\x80\x80") if oti_loc > 0: oti = stsd_box[oti_loc + 5 : oti_loc + 6].hex() codec += f".{oti}" dsi_loc = stsd_box.find(b"\x05\x80\x80\x80") if dsi_loc > 0: dsi_length = int.from_bytes( stsd_box[dsi_loc + 4 : dsi_loc + 5], byteorder="big" ) dsi_data = stsd_box[dsi_loc + 5 : dsi_loc + 5 + dsi_length] dsi0 = int.from_bytes(dsi_data[0:1], byteorder="big") dsi = (dsi0 & 248) >> 3 if dsi == 31 and len(dsi_data) >= 2: dsi1 = int.from_bytes(dsi_data[1:2], byteorder="big") dsi = 32 + ((dsi0 & 7) << 3) + ((dsi1 & 224) >> 5) codec += f".{dsi}" codecs.append(codec) return ",".join(codecs)
[ "def", "get_codec_string", "(", "segment", ":", "io", ".", "BytesIO", ")", "->", "str", ":", "codecs", "=", "[", "]", "# Find moov", "moov_location", "=", "next", "(", "find_box", "(", "segment", ",", "b\"moov\"", ")", ")", "# Find tracks", "for", "trak_location", "in", "find_box", "(", "segment", ",", "b\"trak\"", ",", "moov_location", ")", ":", "# Drill down to media info", "mdia_location", "=", "next", "(", "find_box", "(", "segment", ",", "b\"mdia\"", ",", "trak_location", ")", ")", "minf_location", "=", "next", "(", "find_box", "(", "segment", ",", "b\"minf\"", ",", "mdia_location", ")", ")", "stbl_location", "=", "next", "(", "find_box", "(", "segment", ",", "b\"stbl\"", ",", "minf_location", ")", ")", "stsd_location", "=", "next", "(", "find_box", "(", "segment", ",", "b\"stsd\"", ",", "stbl_location", ")", ")", "# Get stsd box", "segment", ".", "seek", "(", "stsd_location", ")", "stsd_length", "=", "int", ".", "from_bytes", "(", "segment", ".", "read", "(", "4", ")", ",", "byteorder", "=", "\"big\"", ")", "segment", ".", "seek", "(", "stsd_location", ")", "stsd_box", "=", "segment", ".", "read", "(", "stsd_length", ")", "# Base Codec", "codec", "=", "stsd_box", "[", "20", ":", "24", "]", ".", "decode", "(", "\"utf-8\"", ")", "# Handle H264", "if", "(", "codec", "in", "(", "\"avc1\"", ",", "\"avc2\"", ",", "\"avc3\"", ",", "\"avc4\"", ")", "and", "stsd_length", ">", "110", "and", "stsd_box", "[", "106", ":", "110", "]", "==", "b\"avcC\"", ")", ":", "profile", "=", "stsd_box", "[", "111", ":", "112", "]", ".", "hex", "(", ")", "compatibility", "=", "stsd_box", "[", "112", ":", "113", "]", ".", "hex", "(", ")", "# Cap level at 4.1 for compatibility with some Google Cast devices", "level", "=", "hex", "(", "min", "(", "stsd_box", "[", "113", "]", ",", "41", ")", ")", "[", "2", ":", "]", "codec", "+=", "\".\"", "+", "profile", "+", "compatibility", "+", "level", "# Handle H265", "elif", "(", "codec", "in", "(", "\"hev1\"", ",", "\"hvc1\"", ")", "and", "stsd_length", ">", "110", "and", "stsd_box", "[", "106", ":", "110", "]", "==", "b\"hvcC\"", ")", ":", "tmp_byte", "=", "int", ".", "from_bytes", "(", "stsd_box", "[", "111", ":", "112", "]", ",", "byteorder", "=", "\"big\"", ")", "# Profile Space", "codec", "+=", "\".\"", "profile_space_map", "=", "{", "0", ":", "\"\"", ",", "1", ":", "\"A\"", ",", "2", ":", "\"B\"", ",", "3", ":", "\"C\"", "}", "profile_space", "=", "tmp_byte", ">>", "6", "codec", "+=", "profile_space_map", "[", "profile_space", "]", "general_profile_idc", "=", "tmp_byte", "&", "31", "codec", "+=", "str", "(", "general_profile_idc", ")", "# Compatibility", "codec", "+=", "\".\"", "general_profile_compatibility", "=", "int", ".", "from_bytes", "(", "stsd_box", "[", "112", ":", "116", "]", ",", "byteorder", "=", "\"big\"", ")", "reverse", "=", "0", "for", "i", "in", "range", "(", "0", ",", "32", ")", ":", "reverse", "|=", "general_profile_compatibility", "&", "1", "if", "i", "==", "31", ":", "break", "reverse", "<<=", "1", "general_profile_compatibility", ">>=", "1", "codec", "+=", "hex", "(", "reverse", ")", "[", "2", ":", "]", "# Tier Flag", "if", "(", "tmp_byte", "&", "32", ")", ">>", "5", "==", "0", ":", "codec", "+=", "\".L\"", "else", ":", "codec", "+=", "\".H\"", "codec", "+=", "str", "(", "int", ".", "from_bytes", "(", "stsd_box", "[", "122", ":", "123", "]", ",", "byteorder", "=", "\"big\"", ")", ")", "# Constraint String", "has_byte", "=", "False", "constraint_string", "=", "\"\"", "for", "i", "in", "range", "(", "121", ",", "115", ",", "-", "1", ")", ":", "gci", "=", "int", ".", "from_bytes", "(", "stsd_box", "[", "i", ":", "i", "+", "1", "]", ",", "byteorder", "=", "\"big\"", ")", "if", "gci", "or", "has_byte", ":", "constraint_string", "=", "\".\"", "+", "hex", "(", "gci", ")", "[", "2", ":", "]", "+", "constraint_string", "has_byte", "=", "True", "codec", "+=", "constraint_string", "# Handle Audio", "elif", "codec", "==", "\"mp4a\"", ":", "oti", "=", "None", "dsi", "=", "None", "# Parse ES Descriptors", "oti_loc", "=", "stsd_box", ".", "find", "(", "b\"\\x04\\x80\\x80\\x80\"", ")", "if", "oti_loc", ">", "0", ":", "oti", "=", "stsd_box", "[", "oti_loc", "+", "5", ":", "oti_loc", "+", "6", "]", ".", "hex", "(", ")", "codec", "+=", "f\".{oti}\"", "dsi_loc", "=", "stsd_box", ".", "find", "(", "b\"\\x05\\x80\\x80\\x80\"", ")", "if", "dsi_loc", ">", "0", ":", "dsi_length", "=", "int", ".", "from_bytes", "(", "stsd_box", "[", "dsi_loc", "+", "4", ":", "dsi_loc", "+", "5", "]", ",", "byteorder", "=", "\"big\"", ")", "dsi_data", "=", "stsd_box", "[", "dsi_loc", "+", "5", ":", "dsi_loc", "+", "5", "+", "dsi_length", "]", "dsi0", "=", "int", ".", "from_bytes", "(", "dsi_data", "[", "0", ":", "1", "]", ",", "byteorder", "=", "\"big\"", ")", "dsi", "=", "(", "dsi0", "&", "248", ")", ">>", "3", "if", "dsi", "==", "31", "and", "len", "(", "dsi_data", ")", ">=", "2", ":", "dsi1", "=", "int", ".", "from_bytes", "(", "dsi_data", "[", "1", ":", "2", "]", ",", "byteorder", "=", "\"big\"", ")", "dsi", "=", "32", "+", "(", "(", "dsi0", "&", "7", ")", "<<", "3", ")", "+", "(", "(", "dsi1", "&", "224", ")", ">>", "5", ")", "codec", "+=", "f\".{dsi}\"", "codecs", ".", "append", "(", "codec", ")", "return", "\",\"", ".", "join", "(", "codecs", ")" ]
[ 40, 0 ]
[ 149, 27 ]
python
cy
['fr', 'cy', 'en']
False
async_setup_entry
(hass, config, async_add_entities)
Set up the blink binary sensors.
Set up the blink binary sensors.
async def async_setup_entry(hass, config, async_add_entities): """Set up the blink binary sensors.""" data = hass.data[DOMAIN][config.entry_id] entities = [] for camera in data.cameras: for sensor_type in BINARY_SENSORS: entities.append(BlinkBinarySensor(data, camera, sensor_type)) async_add_entities(entities)
[ "async", "def", "async_setup_entry", "(", "hass", ",", "config", ",", "async_add_entities", ")", ":", "data", "=", "hass", ".", "data", "[", "DOMAIN", "]", "[", "config", ".", "entry_id", "]", "entities", "=", "[", "]", "for", "camera", "in", "data", ".", "cameras", ":", "for", "sensor_type", "in", "BINARY_SENSORS", ":", "entities", ".", "append", "(", "BlinkBinarySensor", "(", "data", ",", "camera", ",", "sensor_type", ")", ")", "async_add_entities", "(", "entities", ")" ]
[ 16, 0 ]
[ 24, 32 ]
python
en
['en', 'no', 'en']
True
BlinkBinarySensor.__init__
(self, data, camera, sensor_type)
Initialize the sensor.
Initialize the sensor.
def __init__(self, data, camera, sensor_type): """Initialize the sensor.""" self.data = data self._type = sensor_type name, device_class = BINARY_SENSORS[sensor_type] self._name = f"{DOMAIN} {camera} {name}" self._device_class = device_class self._camera = data.cameras[camera] self._state = None self._unique_id = f"{self._camera.serial}-{self._type}"
[ "def", "__init__", "(", "self", ",", "data", ",", "camera", ",", "sensor_type", ")", ":", "self", ".", "data", "=", "data", "self", ".", "_type", "=", "sensor_type", "name", ",", "device_class", "=", "BINARY_SENSORS", "[", "sensor_type", "]", "self", ".", "_name", "=", "f\"{DOMAIN} {camera} {name}\"", "self", ".", "_device_class", "=", "device_class", "self", ".", "_camera", "=", "data", ".", "cameras", "[", "camera", "]", "self", ".", "_state", "=", "None", "self", ".", "_unique_id", "=", "f\"{self._camera.serial}-{self._type}\"" ]
[ 30, 4 ]
[ 39, 63 ]
python
en
['en', 'en', 'en']
True
BlinkBinarySensor.name
(self)
Return the name of the blink sensor.
Return the name of the blink sensor.
def name(self): """Return the name of the blink sensor.""" return self._name
[ "def", "name", "(", "self", ")", ":", "return", "self", ".", "_name" ]
[ 42, 4 ]
[ 44, 25 ]
python
en
['en', 'no', 'en']
True
BlinkBinarySensor.device_class
(self)
Return the class of this device.
Return the class of this device.
def device_class(self): """Return the class of this device.""" return self._device_class
[ "def", "device_class", "(", "self", ")", ":", "return", "self", ".", "_device_class" ]
[ 47, 4 ]
[ 49, 33 ]
python
en
['en', 'en', 'en']
True
BlinkBinarySensor.is_on
(self)
Return the status of the sensor.
Return the status of the sensor.
def is_on(self): """Return the status of the sensor.""" return self._state
[ "def", "is_on", "(", "self", ")", ":", "return", "self", ".", "_state" ]
[ 52, 4 ]
[ 54, 26 ]
python
en
['en', 'id', 'en']
True
BlinkBinarySensor.update
(self)
Update sensor state.
Update sensor state.
def update(self): """Update sensor state.""" self.data.refresh() state = self._camera.attributes[self._type] if self._type == TYPE_BATTERY: state = state != "ok" self._state = state
[ "def", "update", "(", "self", ")", ":", "self", ".", "data", ".", "refresh", "(", ")", "state", "=", "self", ".", "_camera", ".", "attributes", "[", "self", ".", "_type", "]", "if", "self", ".", "_type", "==", "TYPE_BATTERY", ":", "state", "=", "state", "!=", "\"ok\"", "self", ".", "_state", "=", "state" ]
[ 56, 4 ]
[ 62, 27 ]
python
en
['en', 'co', 'en']
True
TFTrainer.get_train_tfdataset
(self)
Returns the training :class:`~tf.data.Dataset`. Subclass and override this method if you want to inject some custom behavior.
Returns the training :class:`~tf.data.Dataset`.
def get_train_tfdataset(self) -> tf.data.Dataset: """ Returns the training :class:`~tf.data.Dataset`. Subclass and override this method if you want to inject some custom behavior. """ if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") self.total_train_batch_size = self.args.train_batch_size * self.args.gradient_accumulation_steps self.num_train_examples = self.train_dataset.cardinality().numpy() if self.num_train_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") ds = ( self.train_dataset.repeat() .shuffle(self.num_train_examples, seed=self.args.seed) .batch(self.total_train_batch_size, drop_remainder=self.args.dataloader_drop_last) .prefetch(tf.data.experimental.AUTOTUNE) ) return self.args.strategy.experimental_distribute_dataset(ds)
[ "def", "get_train_tfdataset", "(", "self", ")", "->", "tf", ".", "data", ".", "Dataset", ":", "if", "self", ".", "train_dataset", "is", "None", ":", "raise", "ValueError", "(", "\"Trainer: training requires a train_dataset.\"", ")", "self", ".", "total_train_batch_size", "=", "self", ".", "args", ".", "train_batch_size", "*", "self", ".", "args", ".", "gradient_accumulation_steps", "self", ".", "num_train_examples", "=", "self", ".", "train_dataset", ".", "cardinality", "(", ")", ".", "numpy", "(", ")", "if", "self", ".", "num_train_examples", "<", "0", ":", "raise", "ValueError", "(", "\"The training dataset must have an asserted cardinality\"", ")", "ds", "=", "(", "self", ".", "train_dataset", ".", "repeat", "(", ")", ".", "shuffle", "(", "self", ".", "num_train_examples", ",", "seed", "=", "self", ".", "args", ".", "seed", ")", ".", "batch", "(", "self", ".", "total_train_batch_size", ",", "drop_remainder", "=", "self", ".", "args", ".", "dataloader_drop_last", ")", ".", "prefetch", "(", "tf", ".", "data", ".", "experimental", ".", "AUTOTUNE", ")", ")", "return", "self", ".", "args", ".", "strategy", ".", "experimental_distribute_dataset", "(", "ds", ")" ]
[ 130, 4 ]
[ 152, 69 ]
python
en
['en', 'error', 'th']
False
TFTrainer.get_eval_tfdataset
(self, eval_dataset: Optional[tf.data.Dataset] = None)
Returns the evaluation :class:`~tf.data.Dataset`. Args: eval_dataset (:class:`~tf.data.Dataset`, `optional`): If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Subclass and override this method if you want to inject some custom behavior.
Returns the evaluation :class:`~tf.data.Dataset`.
def get_eval_tfdataset(self, eval_dataset: Optional[tf.data.Dataset] = None) -> tf.data.Dataset: """ Returns the evaluation :class:`~tf.data.Dataset`. Args: eval_dataset (:class:`~tf.data.Dataset`, `optional`): If provided, will override `self.eval_dataset`. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Subclass and override this method if you want to inject some custom behavior. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset num_examples = eval_dataset.cardinality().numpy() if num_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") approx = math.floor if self.args.dataloader_drop_last else math.ceil steps = approx(num_examples / self.args.eval_batch_size) ds = ( eval_dataset.repeat() .batch(self.args.eval_batch_size, drop_remainder=self.args.dataloader_drop_last) .prefetch(tf.data.experimental.AUTOTUNE) ) return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
[ "def", "get_eval_tfdataset", "(", "self", ",", "eval_dataset", ":", "Optional", "[", "tf", ".", "data", ".", "Dataset", "]", "=", "None", ")", "->", "tf", ".", "data", ".", "Dataset", ":", "if", "eval_dataset", "is", "None", "and", "self", ".", "eval_dataset", "is", "None", ":", "raise", "ValueError", "(", "\"Trainer: evaluation requires an eval_dataset.\"", ")", "eval_dataset", "=", "eval_dataset", "if", "eval_dataset", "is", "not", "None", "else", "self", ".", "eval_dataset", "num_examples", "=", "eval_dataset", ".", "cardinality", "(", ")", ".", "numpy", "(", ")", "if", "num_examples", "<", "0", ":", "raise", "ValueError", "(", "\"The training dataset must have an asserted cardinality\"", ")", "approx", "=", "math", ".", "floor", "if", "self", ".", "args", ".", "dataloader_drop_last", "else", "math", ".", "ceil", "steps", "=", "approx", "(", "num_examples", "/", "self", ".", "args", ".", "eval_batch_size", ")", "ds", "=", "(", "eval_dataset", ".", "repeat", "(", ")", ".", "batch", "(", "self", ".", "args", ".", "eval_batch_size", ",", "drop_remainder", "=", "self", ".", "args", ".", "dataloader_drop_last", ")", ".", "prefetch", "(", "tf", ".", "data", ".", "experimental", ".", "AUTOTUNE", ")", ")", "return", "self", ".", "args", ".", "strategy", ".", "experimental_distribute_dataset", "(", "ds", ")", ",", "steps", ",", "num_examples" ]
[ 154, 4 ]
[ 185, 90 ]
python
en
['en', 'error', 'th']
False
TFTrainer.get_test_tfdataset
(self, test_dataset: tf.data.Dataset)
Returns a test :class:`~tf.data.Dataset`. Args: test_dataset (:class:`~tf.data.Dataset`): The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Subclass and override this method if you want to inject some custom behavior.
Returns a test :class:`~tf.data.Dataset`.
def get_test_tfdataset(self, test_dataset: tf.data.Dataset) -> tf.data.Dataset: """ Returns a test :class:`~tf.data.Dataset`. Args: test_dataset (:class:`~tf.data.Dataset`): The dataset to use. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Subclass and override this method if you want to inject some custom behavior. """ num_examples = test_dataset.cardinality().numpy() if num_examples < 0: raise ValueError("The training dataset must have an asserted cardinality") steps = math.ceil(num_examples / self.args.eval_batch_size) ds = test_dataset.batch(self.args.eval_batch_size).prefetch(tf.data.experimental.AUTOTUNE) return self.args.strategy.experimental_distribute_dataset(ds), steps, num_examples
[ "def", "get_test_tfdataset", "(", "self", ",", "test_dataset", ":", "tf", ".", "data", ".", "Dataset", ")", "->", "tf", ".", "data", ".", "Dataset", ":", "num_examples", "=", "test_dataset", ".", "cardinality", "(", ")", ".", "numpy", "(", ")", "if", "num_examples", "<", "0", ":", "raise", "ValueError", "(", "\"The training dataset must have an asserted cardinality\"", ")", "steps", "=", "math", ".", "ceil", "(", "num_examples", "/", "self", ".", "args", ".", "eval_batch_size", ")", "ds", "=", "test_dataset", ".", "batch", "(", "self", ".", "args", ".", "eval_batch_size", ")", ".", "prefetch", "(", "tf", ".", "data", ".", "experimental", ".", "AUTOTUNE", ")", "return", "self", ".", "args", ".", "strategy", ".", "experimental_distribute_dataset", "(", "ds", ")", ",", "steps", ",", "num_examples" ]
[ 187, 4 ]
[ 210, 90 ]
python
en
['en', 'error', 'th']
False
TFTrainer.create_optimizer_and_scheduler
(self, num_training_steps: int)
Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the TFTrainer's init through :obj:`optimizers`, or subclass and override this method.
Setup the optimizer and the learning rate scheduler.
def create_optimizer_and_scheduler(self, num_training_steps: int): """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the TFTrainer's init through :obj:`optimizers`, or subclass and override this method. """ if not self.optimizer and not self.lr_scheduler: warmup_steps = ( self.args.warmup_steps if self.args.warmup_steps > 0 else math.ceil(num_training_steps * self.args.warmup_ratio) ) self.optimizer, self.lr_scheduler = create_optimizer( self.args.learning_rate, num_training_steps, warmup_steps, adam_beta1=self.args.adam_beta1, adam_beta2=self.args.adam_beta2, adam_epsilon=self.args.adam_epsilon, weight_decay_rate=self.args.weight_decay, power=self.args.poly_power, )
[ "def", "create_optimizer_and_scheduler", "(", "self", ",", "num_training_steps", ":", "int", ")", ":", "if", "not", "self", ".", "optimizer", "and", "not", "self", ".", "lr_scheduler", ":", "warmup_steps", "=", "(", "self", ".", "args", ".", "warmup_steps", "if", "self", ".", "args", ".", "warmup_steps", ">", "0", "else", "math", ".", "ceil", "(", "num_training_steps", "*", "self", ".", "args", ".", "warmup_ratio", ")", ")", "self", ".", "optimizer", ",", "self", ".", "lr_scheduler", "=", "create_optimizer", "(", "self", ".", "args", ".", "learning_rate", ",", "num_training_steps", ",", "warmup_steps", ",", "adam_beta1", "=", "self", ".", "args", ".", "adam_beta1", ",", "adam_beta2", "=", "self", ".", "args", ".", "adam_beta2", ",", "adam_epsilon", "=", "self", ".", "args", ".", "adam_epsilon", ",", "weight_decay_rate", "=", "self", ".", "args", ".", "weight_decay", ",", "power", "=", "self", ".", "args", ".", "poly_power", ",", ")" ]
[ 212, 4 ]
[ 235, 13 ]
python
en
['en', 'error', 'th']
False
TFTrainer.setup_wandb
(self)
Setup the optional Weights & Biases (`wandb`) integration. One can subclass and override this method to customize the setup if needed. Find more information `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables: Environment: WANDB_PROJECT: (Optional): str - "huggingface" by default, set this to a custom string to store results in a different project. WANDB_DISABLED: (Optional): boolean - defaults to false, set to "true" to disable wandb entirely.
Setup the optional Weights & Biases (`wandb`) integration.
def setup_wandb(self): """ Setup the optional Weights & Biases (`wandb`) integration. One can subclass and override this method to customize the setup if needed. Find more information `here <https://docs.wandb.com/huggingface>`__. You can also override the following environment variables: Environment: WANDB_PROJECT: (Optional): str - "huggingface" by default, set this to a custom string to store results in a different project. WANDB_DISABLED: (Optional): boolean - defaults to false, set to "true" to disable wandb entirely. """ logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"') combined_dict = {**self.model.config.to_dict(), **self.args.to_sanitized_dict()} wandb.init(project=os.getenv("WANDB_PROJECT", "huggingface"), config=combined_dict, name=self.args.run_name)
[ "def", "setup_wandb", "(", "self", ")", ":", "logger", ".", "info", "(", "'Automatic Weights & Biases logging enabled, to disable set os.environ[\"WANDB_DISABLED\"] = \"true\"'", ")", "combined_dict", "=", "{", "*", "*", "self", ".", "model", ".", "config", ".", "to_dict", "(", ")", ",", "*", "*", "self", ".", "args", ".", "to_sanitized_dict", "(", ")", "}", "wandb", ".", "init", "(", "project", "=", "os", ".", "getenv", "(", "\"WANDB_PROJECT\"", ",", "\"huggingface\"", ")", ",", "config", "=", "combined_dict", ",", "name", "=", "self", ".", "args", ".", "run_name", ")" ]
[ 237, 4 ]
[ 254, 116 ]
python
en
['en', 'error', 'th']
False
TFTrainer.setup_comet
(self)
Setup the optional Comet.ml integration. Environment: COMET_MODE: (Optional): str - "OFFLINE", "ONLINE", or "DISABLED" COMET_PROJECT_NAME: (Optional): str - Comet.ml project name for experiments COMET_OFFLINE_DIRECTORY: (Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE" For a number of configurable items in the environment, see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__
Setup the optional Comet.ml integration.
def setup_comet(self): """ Setup the optional Comet.ml integration. Environment: COMET_MODE: (Optional): str - "OFFLINE", "ONLINE", or "DISABLED" COMET_PROJECT_NAME: (Optional): str - Comet.ml project name for experiments COMET_OFFLINE_DIRECTORY: (Optional): str - folder to use for saving offline experiments when `COMET_MODE` is "OFFLINE" For a number of configurable items in the environment, see `here <https://www.comet.ml/docs/python-sdk/advanced/#comet-configuration-variables>`__ """ comet_mode = os.getenv("COMET_MODE", "ONLINE").upper() args = {"project_name": os.getenv("COMET_PROJECT_NAME", "huggingface")} experiment = None if comet_mode == "ONLINE": experiment = comet_ml.Experiment(**args) logger.info("Automatic Comet.ml online logging enabled") elif comet_mode == "OFFLINE": args["offline_directory"] = os.getenv("COMET_OFFLINE_DIRECTORY", "./") experiment = comet_ml.OfflineExperiment(**args) logger.info("Automatic Comet.ml offline logging enabled; use `comet upload` when finished") if experiment is not None: experiment._set_model_graph(self.model, framework="transformers") experiment._log_parameters(self.args, prefix="args/", framework="transformers") experiment._log_parameters(self.model.config, prefix="config/", framework="transformers")
[ "def", "setup_comet", "(", "self", ")", ":", "comet_mode", "=", "os", ".", "getenv", "(", "\"COMET_MODE\"", ",", "\"ONLINE\"", ")", ".", "upper", "(", ")", "args", "=", "{", "\"project_name\"", ":", "os", ".", "getenv", "(", "\"COMET_PROJECT_NAME\"", ",", "\"huggingface\"", ")", "}", "experiment", "=", "None", "if", "comet_mode", "==", "\"ONLINE\"", ":", "experiment", "=", "comet_ml", ".", "Experiment", "(", "*", "*", "args", ")", "logger", ".", "info", "(", "\"Automatic Comet.ml online logging enabled\"", ")", "elif", "comet_mode", "==", "\"OFFLINE\"", ":", "args", "[", "\"offline_directory\"", "]", "=", "os", ".", "getenv", "(", "\"COMET_OFFLINE_DIRECTORY\"", ",", "\"./\"", ")", "experiment", "=", "comet_ml", ".", "OfflineExperiment", "(", "*", "*", "args", ")", "logger", ".", "info", "(", "\"Automatic Comet.ml offline logging enabled; use `comet upload` when finished\"", ")", "if", "experiment", "is", "not", "None", ":", "experiment", ".", "_set_model_graph", "(", "self", ".", "model", ",", "framework", "=", "\"transformers\"", ")", "experiment", ".", "_log_parameters", "(", "self", ".", "args", ",", "prefix", "=", "\"args/\"", ",", "framework", "=", "\"transformers\"", ")", "experiment", ".", "_log_parameters", "(", "self", ".", "model", ".", "config", ",", "prefix", "=", "\"config/\"", ",", "framework", "=", "\"transformers\"", ")" ]
[ 256, 4 ]
[ 284, 101 ]
python
en
['en', 'error', 'th']
False
TFTrainer.prediction_loop
( self, dataset: tf.data.Dataset, steps: int, num_examples: int, description: str, prediction_loss_only: Optional[bool] = None, )
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and :func:`~transformers.TFTrainer.predict`. Works both with or without labels.
Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and :func:`~transformers.TFTrainer.predict`.
def prediction_loop( self, dataset: tf.data.Dataset, steps: int, num_examples: int, description: str, prediction_loss_only: Optional[bool] = None, ) -> PredictionOutput: """ Prediction/evaluation loop, shared by :func:`~transformers.TFTrainer.evaluate` and :func:`~transformers.TFTrainer.predict`. Works both with or without labels. """ prediction_loss_only = ( prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only ) logger.info("***** Running %s *****", description) logger.info(" Num examples in dataset = %d", num_examples) if description == "Evaluation": logger.info(" Num examples in used in evaluation = %d", self.args.eval_batch_size * steps) logger.info(" Batch size = %d", self.args.eval_batch_size) label_ids: np.ndarray = None preds: np.ndarray = None self.eval_loss.reset_states() # Reset the past mems state at the beginning of the evaluation if necessary. if self.args.past_index >= 0: self._past = None for step, batch in enumerate(dataset): logits = self.distributed_prediction_steps(batch) _, labels = batch if not prediction_loss_only: if isinstance(logits, tuple): logits = logits[0] if isinstance(labels, tuple): labels = labels[0] if self.args.n_replicas > 1: for val in logits.values: if preds is None: preds = val.numpy() else: preds = np.append(preds, val.numpy(), axis=0) for val in labels.values: if label_ids is None: label_ids = val.numpy() else: label_ids = np.append(label_ids, val.numpy(), axis=0) else: if preds is None: preds = logits.numpy() else: preds = np.append(preds, logits.numpy(), axis=0) if label_ids is None: label_ids = labels.numpy() else: label_ids = np.append(label_ids, labels.numpy(), axis=0) if step == steps - 1: break if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} metrics["eval_loss"] = self.eval_loss.result().numpy() / steps for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) if self.args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
[ "def", "prediction_loop", "(", "self", ",", "dataset", ":", "tf", ".", "data", ".", "Dataset", ",", "steps", ":", "int", ",", "num_examples", ":", "int", ",", "description", ":", "str", ",", "prediction_loss_only", ":", "Optional", "[", "bool", "]", "=", "None", ",", ")", "->", "PredictionOutput", ":", "prediction_loss_only", "=", "(", "prediction_loss_only", "if", "prediction_loss_only", "is", "not", "None", "else", "self", ".", "args", ".", "prediction_loss_only", ")", "logger", ".", "info", "(", "\"***** Running %s *****\"", ",", "description", ")", "logger", ".", "info", "(", "\" Num examples in dataset = %d\"", ",", "num_examples", ")", "if", "description", "==", "\"Evaluation\"", ":", "logger", ".", "info", "(", "\" Num examples in used in evaluation = %d\"", ",", "self", ".", "args", ".", "eval_batch_size", "*", "steps", ")", "logger", ".", "info", "(", "\" Batch size = %d\"", ",", "self", ".", "args", ".", "eval_batch_size", ")", "label_ids", ":", "np", ".", "ndarray", "=", "None", "preds", ":", "np", ".", "ndarray", "=", "None", "self", ".", "eval_loss", ".", "reset_states", "(", ")", "# Reset the past mems state at the beginning of the evaluation if necessary.", "if", "self", ".", "args", ".", "past_index", ">=", "0", ":", "self", ".", "_past", "=", "None", "for", "step", ",", "batch", "in", "enumerate", "(", "dataset", ")", ":", "logits", "=", "self", ".", "distributed_prediction_steps", "(", "batch", ")", "_", ",", "labels", "=", "batch", "if", "not", "prediction_loss_only", ":", "if", "isinstance", "(", "logits", ",", "tuple", ")", ":", "logits", "=", "logits", "[", "0", "]", "if", "isinstance", "(", "labels", ",", "tuple", ")", ":", "labels", "=", "labels", "[", "0", "]", "if", "self", ".", "args", ".", "n_replicas", ">", "1", ":", "for", "val", "in", "logits", ".", "values", ":", "if", "preds", "is", "None", ":", "preds", "=", "val", ".", "numpy", "(", ")", "else", ":", "preds", "=", "np", ".", "append", "(", "preds", ",", "val", ".", "numpy", "(", ")", ",", "axis", "=", "0", ")", "for", "val", "in", "labels", ".", "values", ":", "if", "label_ids", "is", "None", ":", "label_ids", "=", "val", ".", "numpy", "(", ")", "else", ":", "label_ids", "=", "np", ".", "append", "(", "label_ids", ",", "val", ".", "numpy", "(", ")", ",", "axis", "=", "0", ")", "else", ":", "if", "preds", "is", "None", ":", "preds", "=", "logits", ".", "numpy", "(", ")", "else", ":", "preds", "=", "np", ".", "append", "(", "preds", ",", "logits", ".", "numpy", "(", ")", ",", "axis", "=", "0", ")", "if", "label_ids", "is", "None", ":", "label_ids", "=", "labels", ".", "numpy", "(", ")", "else", ":", "label_ids", "=", "np", ".", "append", "(", "label_ids", ",", "labels", ".", "numpy", "(", ")", ",", "axis", "=", "0", ")", "if", "step", "==", "steps", "-", "1", ":", "break", "if", "self", ".", "compute_metrics", "is", "not", "None", "and", "preds", "is", "not", "None", "and", "label_ids", "is", "not", "None", ":", "metrics", "=", "self", ".", "compute_metrics", "(", "EvalPrediction", "(", "predictions", "=", "preds", ",", "label_ids", "=", "label_ids", ")", ")", "else", ":", "metrics", "=", "{", "}", "metrics", "[", "\"eval_loss\"", "]", "=", "self", ".", "eval_loss", ".", "result", "(", ")", ".", "numpy", "(", ")", "/", "steps", "for", "key", "in", "list", "(", "metrics", ".", "keys", "(", ")", ")", ":", "if", "not", "key", ".", "startswith", "(", "\"eval_\"", ")", ":", "metrics", "[", "f\"eval_{key}\"", "]", "=", "metrics", ".", "pop", "(", "key", ")", "if", "self", ".", "args", ".", "past_index", "and", "hasattr", "(", "self", ",", "\"_past\"", ")", ":", "# Clean the state at the end of training", "delattr", "(", "self", ",", "\"_past\"", ")", "return", "PredictionOutput", "(", "predictions", "=", "preds", ",", "label_ids", "=", "label_ids", ",", "metrics", "=", "metrics", ")" ]
[ 286, 4 ]
[ 371, 88 ]
python
en
['en', 'error', 'th']
False
TFTrainer.log
(self, logs: Dict[str, float])
Log :obj:`logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (:obj:`Dict[str, float]`): The values to log.
Log :obj:`logs` on the various objects watching training.
def log(self, logs: Dict[str, float]) -> None: """ Log :obj:`logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (:obj:`Dict[str, float]`): The values to log. """ logs["epoch"] = self.epoch_logging if self.tb_writer: with self.tb_writer.as_default(): for k, v in logs.items(): tf.summary.scalar(k, v, step=self.global_step) self.tb_writer.flush() if is_wandb_available(): wandb.log(logs, step=self.global_step) if is_comet_available(): experiment = comet_ml.config.get_global_experiment() if experiment is not None: experiment._log_metrics( logs, step=self.global_step, epoch=self.epoch_logging, framework="transformers" ) output = {**logs, **{"step": self.global_step}} logger.info(output)
[ "def", "log", "(", "self", ",", "logs", ":", "Dict", "[", "str", ",", "float", "]", ")", "->", "None", ":", "logs", "[", "\"epoch\"", "]", "=", "self", ".", "epoch_logging", "if", "self", ".", "tb_writer", ":", "with", "self", ".", "tb_writer", ".", "as_default", "(", ")", ":", "for", "k", ",", "v", "in", "logs", ".", "items", "(", ")", ":", "tf", ".", "summary", ".", "scalar", "(", "k", ",", "v", ",", "step", "=", "self", ".", "global_step", ")", "self", ".", "tb_writer", ".", "flush", "(", ")", "if", "is_wandb_available", "(", ")", ":", "wandb", ".", "log", "(", "logs", ",", "step", "=", "self", ".", "global_step", ")", "if", "is_comet_available", "(", ")", ":", "experiment", "=", "comet_ml", ".", "config", ".", "get_global_experiment", "(", ")", "if", "experiment", "is", "not", "None", ":", "experiment", ".", "_log_metrics", "(", "logs", ",", "step", "=", "self", ".", "global_step", ",", "epoch", "=", "self", ".", "epoch_logging", ",", "framework", "=", "\"transformers\"", ")", "output", "=", "{", "*", "*", "logs", ",", "*", "*", "{", "\"step\"", ":", "self", ".", "global_step", "}", "}", "logger", ".", "info", "(", "output", ")" ]
[ 373, 4 ]
[ 403, 27 ]
python
en
['en', 'error', 'th']
False
TFTrainer.evaluate
(self, eval_dataset: Optional[tf.data.Dataset] = None)
Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init :obj:`compute_metrics` argument). Args: eval_dataset (:class:`~tf.data.Dataset`, `optional`): Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions.
Run evaluation and returns metrics.
def evaluate(self, eval_dataset: Optional[tf.data.Dataset] = None) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init :obj:`compute_metrics` argument). Args: eval_dataset (:class:`~tf.data.Dataset`, `optional`): Pass a dataset if you wish to override :obj:`self.eval_dataset`. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)``. Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. """ eval_ds, steps, num_examples = self.get_eval_tfdataset(eval_dataset) output = self.prediction_loop(eval_ds, steps, num_examples, description="Evaluation") logs = {**output.metrics} logs["epoch"] = self.epoch_logging self.log(logs) return output.metrics
[ "def", "evaluate", "(", "self", ",", "eval_dataset", ":", "Optional", "[", "tf", ".", "data", ".", "Dataset", "]", "=", "None", ")", "->", "Dict", "[", "str", ",", "float", "]", ":", "eval_ds", ",", "steps", ",", "num_examples", "=", "self", ".", "get_eval_tfdataset", "(", "eval_dataset", ")", "output", "=", "self", ".", "prediction_loop", "(", "eval_ds", ",", "steps", ",", "num_examples", ",", "description", "=", "\"Evaluation\"", ")", "logs", "=", "{", "*", "*", "output", ".", "metrics", "}", "logs", "[", "\"epoch\"", "]", "=", "self", ".", "epoch_logging", "self", ".", "log", "(", "logs", ")", "return", "output", ".", "metrics" ]
[ 405, 4 ]
[ 431, 29 ]
python
en
['en', 'error', 'th']
False
TFTrainer.prediction_step
( self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor )
Compute the prediction on features and update the loss with labels. Subclass and override to inject some custom behavior.
Compute the prediction on features and update the loss with labels.
def prediction_step( self, features: tf.Tensor, labels: tf.Tensor, nb_instances_in_global_batch: tf.Tensor ) -> tf.Tensor: """ Compute the prediction on features and update the loss with labels. Subclass and override to inject some custom behavior. """ per_example_loss, logits = self.run_model(features, labels, False) scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype) self.eval_loss.update_state(scaled_loss) return logits
[ "def", "prediction_step", "(", "self", ",", "features", ":", "tf", ".", "Tensor", ",", "labels", ":", "tf", ".", "Tensor", ",", "nb_instances_in_global_batch", ":", "tf", ".", "Tensor", ")", "->", "tf", ".", "Tensor", ":", "per_example_loss", ",", "logits", "=", "self", ".", "run_model", "(", "features", ",", "labels", ",", "False", ")", "scaled_loss", "=", "per_example_loss", "/", "tf", ".", "cast", "(", "nb_instances_in_global_batch", ",", "dtype", "=", "per_example_loss", ".", "dtype", ")", "self", ".", "eval_loss", ".", "update_state", "(", "scaled_loss", ")", "return", "logits" ]
[ 433, 4 ]
[ 446, 21 ]
python
en
['en', 'error', 'th']
False
TFTrainer.train
(self)
Train method to train the model.
Train method to train the model.
def train(self) -> None: """ Train method to train the model. """ train_ds = self.get_train_tfdataset() if self.args.debug: tf.summary.trace_on(graph=True, profiler=True) self.gradient_accumulator.reset() num_update_steps_per_epoch = self.num_train_examples / self.total_train_batch_size # In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because # the dataset is repeated before being batched. # It has the effect only when TPU is used which requires explicit tensor shape in order to make # the gradient accumulation implementation work. approx = math.floor if self.args.dataloader_drop_last else math.ceil num_update_steps_per_epoch = approx(num_update_steps_per_epoch) # At least one update for each epoch. num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) self.steps_per_epoch = num_update_steps_per_epoch if self.args.max_steps > 0: t_total = self.args.max_steps epochs = (self.args.max_steps // self.steps_per_epoch) + int( self.args.max_steps % self.steps_per_epoch > 0 ) else: t_total = self.steps_per_epoch * self.args.num_train_epochs epochs = self.args.num_train_epochs # Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always. epochs = float(epochs) with self.args.strategy.scope(): self.create_optimizer_and_scheduler(num_training_steps=t_total) folder = os.path.join(self.args.output_dir, PREFIX_CHECKPOINT_DIR) ckpt = tf.train.Checkpoint(optimizer=self.optimizer, model=self.model) self.model.ckpt_manager = tf.train.CheckpointManager(ckpt, folder, max_to_keep=self.args.save_total_limit) iterations = self.optimizer.iterations epochs_trained = 0 steps_trained_in_current_epoch = 0 if self.model.ckpt_manager.latest_checkpoint: logger.info( "Checkpoint file %s found and restoring from checkpoint", self.model.ckpt_manager.latest_checkpoint ) ckpt.restore(self.model.ckpt_manager.latest_checkpoint).expect_partial() self.global_step = iterations.numpy() epochs_trained = self.global_step // self.steps_per_epoch steps_trained_in_current_epoch = self.global_step % self.steps_per_epoch logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", self.global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) tf.summary.experimental.set_step(self.global_step) with self.tb_writer.as_default(): tf.summary.text("args", self.args.to_json_string()) self.tb_writer.flush() logger.info("***** Running training *****") logger.info(" Num examples = %d", self.num_train_examples) # TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ? logger.info(" Num Epochs = %d", epochs) logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", self.total_train_batch_size ) logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps) logger.info(" Steps per epoch = %d", self.steps_per_epoch) logger.info(" Total optimization steps = %d", t_total) self.train_loss = tf.keras.metrics.Sum() start_time = datetime.datetime.now() for epoch_iter in range(epochs_trained, int(epochs)): # Reset the past mems state at the beginning of each epoch if necessary. if self.args.past_index >= 0: self._past = None for step, batch in enumerate(train_ds): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue self.distributed_training_steps(batch) self.global_step = iterations.numpy() self.epoch_logging = epoch_iter + (step + 1) / self.steps_per_epoch training_loss = self.train_loss.result() / (step + 1) if self.args.debug: logs = {} logs["loss"] = training_loss.numpy() logs["epoch"] = self.epoch_logging self.log(logs) if self.global_step == 1 and self.args.debug: with self.tb_writer.as_default(): tf.summary.trace_export( name="training", step=self.global_step, profiler_outdir=self.args.logging_dir ) if ( self.args.eval_steps > 0 and self.args.evaluation_strategy == IntervalStrategy.STEPS and self.global_step % self.args.eval_steps == 0 ): self.evaluate() if (self.args.logging_steps > 0 and self.global_step % self.args.logging_steps == 0) or ( self.global_step == 1 and self.args.logging_first_step ): logs = {} logs["loss"] = training_loss.numpy() logs["learning_rate"] = self.lr_scheduler(self.global_step).numpy() logs["epoch"] = self.epoch_logging self.log(logs) if self.args.save_steps > 0 and self.global_step % self.args.save_steps == 0: ckpt_save_path = self.model.ckpt_manager.save() logger.info("Saving checkpoint for step {} at {}".format(self.global_step, ckpt_save_path)) if self.args.max_steps > 0 and self.global_step >= t_total: break if self.global_step % self.steps_per_epoch == 0: break self.train_loss.reset_states() if self.args.max_steps > 0 and self.global_step >= self.args.max_steps: break end_time = datetime.datetime.now() logger.info("Training took: {}".format(str(end_time - start_time))) if self.args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past")
[ "def", "train", "(", "self", ")", "->", "None", ":", "train_ds", "=", "self", ".", "get_train_tfdataset", "(", ")", "if", "self", ".", "args", ".", "debug", ":", "tf", ".", "summary", ".", "trace_on", "(", "graph", "=", "True", ",", "profiler", "=", "True", ")", "self", ".", "gradient_accumulator", ".", "reset", "(", ")", "num_update_steps_per_epoch", "=", "self", ".", "num_train_examples", "/", "self", ".", "total_train_batch_size", "# In fact, ``self.args.dataloader_drop_last`` has no effect in `trainer_tf.py`, because", "# the dataset is repeated before being batched.", "# It has the effect only when TPU is used which requires explicit tensor shape in order to make", "# the gradient accumulation implementation work.", "approx", "=", "math", ".", "floor", "if", "self", ".", "args", ".", "dataloader_drop_last", "else", "math", ".", "ceil", "num_update_steps_per_epoch", "=", "approx", "(", "num_update_steps_per_epoch", ")", "# At least one update for each epoch.", "num_update_steps_per_epoch", "=", "max", "(", "num_update_steps_per_epoch", ",", "1", ")", "self", ".", "steps_per_epoch", "=", "num_update_steps_per_epoch", "if", "self", ".", "args", ".", "max_steps", ">", "0", ":", "t_total", "=", "self", ".", "args", ".", "max_steps", "epochs", "=", "(", "self", ".", "args", ".", "max_steps", "//", "self", ".", "steps_per_epoch", ")", "+", "int", "(", "self", ".", "args", ".", "max_steps", "%", "self", ".", "steps_per_epoch", ">", "0", ")", "else", ":", "t_total", "=", "self", ".", "steps_per_epoch", "*", "self", ".", "args", ".", "num_train_epochs", "epochs", "=", "self", ".", "args", ".", "num_train_epochs", "# Since ``self.args.num_train_epochs`` can be `float`, we make ``epochs`` be a `float` always.", "epochs", "=", "float", "(", "epochs", ")", "with", "self", ".", "args", ".", "strategy", ".", "scope", "(", ")", ":", "self", ".", "create_optimizer_and_scheduler", "(", "num_training_steps", "=", "t_total", ")", "folder", "=", "os", ".", "path", ".", "join", "(", "self", ".", "args", ".", "output_dir", ",", "PREFIX_CHECKPOINT_DIR", ")", "ckpt", "=", "tf", ".", "train", ".", "Checkpoint", "(", "optimizer", "=", "self", ".", "optimizer", ",", "model", "=", "self", ".", "model", ")", "self", ".", "model", ".", "ckpt_manager", "=", "tf", ".", "train", ".", "CheckpointManager", "(", "ckpt", ",", "folder", ",", "max_to_keep", "=", "self", ".", "args", ".", "save_total_limit", ")", "iterations", "=", "self", ".", "optimizer", ".", "iterations", "epochs_trained", "=", "0", "steps_trained_in_current_epoch", "=", "0", "if", "self", ".", "model", ".", "ckpt_manager", ".", "latest_checkpoint", ":", "logger", ".", "info", "(", "\"Checkpoint file %s found and restoring from checkpoint\"", ",", "self", ".", "model", ".", "ckpt_manager", ".", "latest_checkpoint", ")", "ckpt", ".", "restore", "(", "self", ".", "model", ".", "ckpt_manager", ".", "latest_checkpoint", ")", ".", "expect_partial", "(", ")", "self", ".", "global_step", "=", "iterations", ".", "numpy", "(", ")", "epochs_trained", "=", "self", ".", "global_step", "//", "self", ".", "steps_per_epoch", "steps_trained_in_current_epoch", "=", "self", ".", "global_step", "%", "self", ".", "steps_per_epoch", "logger", ".", "info", "(", "\" Continuing training from checkpoint, will skip to saved global_step\"", ")", "logger", ".", "info", "(", "\" Continuing training from epoch %d\"", ",", "epochs_trained", ")", "logger", ".", "info", "(", "\" Continuing training from global step %d\"", ",", "self", ".", "global_step", ")", "logger", ".", "info", "(", "\" Will skip the first %d steps in the first epoch\"", ",", "steps_trained_in_current_epoch", ")", "tf", ".", "summary", ".", "experimental", ".", "set_step", "(", "self", ".", "global_step", ")", "with", "self", ".", "tb_writer", ".", "as_default", "(", ")", ":", "tf", ".", "summary", ".", "text", "(", "\"args\"", ",", "self", ".", "args", ".", "to_json_string", "(", ")", ")", "self", ".", "tb_writer", ".", "flush", "(", ")", "logger", ".", "info", "(", "\"***** Running training *****\"", ")", "logger", ".", "info", "(", "\" Num examples = %d\"", ",", "self", ".", "num_train_examples", ")", "# TODO: We might want to print a more precise ``epochs`` if self.args.max_steps > 0 ?", "logger", ".", "info", "(", "\" Num Epochs = %d\"", ",", "epochs", ")", "logger", ".", "info", "(", "\" Instantaneous batch size per device = %d\"", ",", "self", ".", "args", ".", "per_device_train_batch_size", ")", "logger", ".", "info", "(", "\" Total train batch size (w. parallel, distributed & accumulation) = %d\"", ",", "self", ".", "total_train_batch_size", ")", "logger", ".", "info", "(", "\" Gradient Accumulation steps = %d\"", ",", "self", ".", "args", ".", "gradient_accumulation_steps", ")", "logger", ".", "info", "(", "\" Steps per epoch = %d\"", ",", "self", ".", "steps_per_epoch", ")", "logger", ".", "info", "(", "\" Total optimization steps = %d\"", ",", "t_total", ")", "self", ".", "train_loss", "=", "tf", ".", "keras", ".", "metrics", ".", "Sum", "(", ")", "start_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "for", "epoch_iter", "in", "range", "(", "epochs_trained", ",", "int", "(", "epochs", ")", ")", ":", "# Reset the past mems state at the beginning of each epoch if necessary.", "if", "self", ".", "args", ".", "past_index", ">=", "0", ":", "self", ".", "_past", "=", "None", "for", "step", ",", "batch", "in", "enumerate", "(", "train_ds", ")", ":", "# Skip past any already trained steps if resuming training", "if", "steps_trained_in_current_epoch", ">", "0", ":", "steps_trained_in_current_epoch", "-=", "1", "continue", "self", ".", "distributed_training_steps", "(", "batch", ")", "self", ".", "global_step", "=", "iterations", ".", "numpy", "(", ")", "self", ".", "epoch_logging", "=", "epoch_iter", "+", "(", "step", "+", "1", ")", "/", "self", ".", "steps_per_epoch", "training_loss", "=", "self", ".", "train_loss", ".", "result", "(", ")", "/", "(", "step", "+", "1", ")", "if", "self", ".", "args", ".", "debug", ":", "logs", "=", "{", "}", "logs", "[", "\"loss\"", "]", "=", "training_loss", ".", "numpy", "(", ")", "logs", "[", "\"epoch\"", "]", "=", "self", ".", "epoch_logging", "self", ".", "log", "(", "logs", ")", "if", "self", ".", "global_step", "==", "1", "and", "self", ".", "args", ".", "debug", ":", "with", "self", ".", "tb_writer", ".", "as_default", "(", ")", ":", "tf", ".", "summary", ".", "trace_export", "(", "name", "=", "\"training\"", ",", "step", "=", "self", ".", "global_step", ",", "profiler_outdir", "=", "self", ".", "args", ".", "logging_dir", ")", "if", "(", "self", ".", "args", ".", "eval_steps", ">", "0", "and", "self", ".", "args", ".", "evaluation_strategy", "==", "IntervalStrategy", ".", "STEPS", "and", "self", ".", "global_step", "%", "self", ".", "args", ".", "eval_steps", "==", "0", ")", ":", "self", ".", "evaluate", "(", ")", "if", "(", "self", ".", "args", ".", "logging_steps", ">", "0", "and", "self", ".", "global_step", "%", "self", ".", "args", ".", "logging_steps", "==", "0", ")", "or", "(", "self", ".", "global_step", "==", "1", "and", "self", ".", "args", ".", "logging_first_step", ")", ":", "logs", "=", "{", "}", "logs", "[", "\"loss\"", "]", "=", "training_loss", ".", "numpy", "(", ")", "logs", "[", "\"learning_rate\"", "]", "=", "self", ".", "lr_scheduler", "(", "self", ".", "global_step", ")", ".", "numpy", "(", ")", "logs", "[", "\"epoch\"", "]", "=", "self", ".", "epoch_logging", "self", ".", "log", "(", "logs", ")", "if", "self", ".", "args", ".", "save_steps", ">", "0", "and", "self", ".", "global_step", "%", "self", ".", "args", ".", "save_steps", "==", "0", ":", "ckpt_save_path", "=", "self", ".", "model", ".", "ckpt_manager", ".", "save", "(", ")", "logger", ".", "info", "(", "\"Saving checkpoint for step {} at {}\"", ".", "format", "(", "self", ".", "global_step", ",", "ckpt_save_path", ")", ")", "if", "self", ".", "args", ".", "max_steps", ">", "0", "and", "self", ".", "global_step", ">=", "t_total", ":", "break", "if", "self", ".", "global_step", "%", "self", ".", "steps_per_epoch", "==", "0", ":", "break", "self", ".", "train_loss", ".", "reset_states", "(", ")", "if", "self", ".", "args", ".", "max_steps", ">", "0", "and", "self", ".", "global_step", ">=", "self", ".", "args", ".", "max_steps", ":", "break", "end_time", "=", "datetime", ".", "datetime", ".", "now", "(", ")", "logger", ".", "info", "(", "\"Training took: {}\"", ".", "format", "(", "str", "(", "end_time", "-", "start_time", ")", ")", ")", "if", "self", ".", "args", ".", "past_index", "and", "hasattr", "(", "self", ",", "\"_past\"", ")", ":", "# Clean the state at the end of training", "delattr", "(", "self", ",", "\"_past\"", ")" ]
[ 458, 4 ]
[ 613, 34 ]
python
en
['en', 'error', 'th']
False
TFTrainer.training_step
(self, features, labels, nb_instances_in_global_batch)
Perform a training step on features and labels. Subclass and override to inject some custom behavior.
Perform a training step on features and labels.
def training_step(self, features, labels, nb_instances_in_global_batch): """ Perform a training step on features and labels. Subclass and override to inject some custom behavior. """ per_example_loss, _ = self.run_model(features, labels, True) scaled_loss = per_example_loss / tf.cast(nb_instances_in_global_batch, dtype=per_example_loss.dtype) gradients = tf.gradients(scaled_loss, self.model.trainable_variables) gradients = [ g if g is not None else tf.zeros_like(v) for g, v in zip(gradients, self.model.trainable_variables) ] if self.args.gradient_accumulation_steps > 1: self.gradient_accumulator(gradients) self.train_loss.update_state(scaled_loss) if self.args.gradient_accumulation_steps == 1: return gradients
[ "def", "training_step", "(", "self", ",", "features", ",", "labels", ",", "nb_instances_in_global_batch", ")", ":", "per_example_loss", ",", "_", "=", "self", ".", "run_model", "(", "features", ",", "labels", ",", "True", ")", "scaled_loss", "=", "per_example_loss", "/", "tf", ".", "cast", "(", "nb_instances_in_global_batch", ",", "dtype", "=", "per_example_loss", ".", "dtype", ")", "gradients", "=", "tf", ".", "gradients", "(", "scaled_loss", ",", "self", ".", "model", ".", "trainable_variables", ")", "gradients", "=", "[", "g", "if", "g", "is", "not", "None", "else", "tf", ".", "zeros_like", "(", "v", ")", "for", "g", ",", "v", "in", "zip", "(", "gradients", ",", "self", ".", "model", ".", "trainable_variables", ")", "]", "if", "self", ".", "args", ".", "gradient_accumulation_steps", ">", "1", ":", "self", ".", "gradient_accumulator", "(", "gradients", ")", "self", ".", "train_loss", ".", "update_state", "(", "scaled_loss", ")", "if", "self", ".", "args", ".", "gradient_accumulation_steps", "==", "1", ":", "return", "gradients" ]
[ 615, 4 ]
[ 634, 28 ]
python
en
['en', 'error', 'th']
False
TFTrainer.run_model
(self, features, labels, training)
Computes the loss of the given features and labels pair. Subclass and override this method if you want to inject some custom behavior. Args: features (:obj:`tf.Tensor`): A batch of input features. labels (:obj:`tf.Tensor`): A batch of labels. training (:obj:`bool`): Whether or not to run the model in training mode. Returns: A tuple of two :obj:`tf.Tensor`: The loss and logits.
Computes the loss of the given features and labels pair.
def run_model(self, features, labels, training): """ Computes the loss of the given features and labels pair. Subclass and override this method if you want to inject some custom behavior. Args: features (:obj:`tf.Tensor`): A batch of input features. labels (:obj:`tf.Tensor`): A batch of labels. training (:obj:`bool`): Whether or not to run the model in training mode. Returns: A tuple of two :obj:`tf.Tensor`: The loss and logits. """ if self.args.past_index >= 0 and getattr(self, "_past", None) is not None: features["mems"] = self._past if isinstance(labels, (dict)): outputs = self.model(features, training=training, **labels)[:2] else: outputs = self.model(features, labels=labels, training=training)[:2] loss, logits = outputs[:2] if self.args.past_index >= 0: self._past = outputs[self.args.past_index] return loss, logits
[ "def", "run_model", "(", "self", ",", "features", ",", "labels", ",", "training", ")", ":", "if", "self", ".", "args", ".", "past_index", ">=", "0", "and", "getattr", "(", "self", ",", "\"_past\"", ",", "None", ")", "is", "not", "None", ":", "features", "[", "\"mems\"", "]", "=", "self", ".", "_past", "if", "isinstance", "(", "labels", ",", "(", "dict", ")", ")", ":", "outputs", "=", "self", ".", "model", "(", "features", ",", "training", "=", "training", ",", "*", "*", "labels", ")", "[", ":", "2", "]", "else", ":", "outputs", "=", "self", ".", "model", "(", "features", ",", "labels", "=", "labels", ",", "training", "=", "training", ")", "[", ":", "2", "]", "loss", ",", "logits", "=", "outputs", "[", ":", "2", "]", "if", "self", ".", "args", ".", "past_index", ">=", "0", ":", "self", ".", "_past", "=", "outputs", "[", "self", ".", "args", ".", "past_index", "]", "return", "loss", ",", "logits" ]
[ 722, 4 ]
[ 750, 27 ]
python
en
['en', 'error', 'th']
False
TFTrainer.predict
(self, test_dataset: tf.data.Dataset)
Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in :obj:`evaluate()`. Args: test_dataset (:class:`~tf.data.Dataset`): Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)`` Returns: `NamedTuple` A namedtuple with the following keys: - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`. - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some). - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset contained labels).
Run prediction and returns predictions and potential metrics.
def predict(self, test_dataset: tf.data.Dataset) -> PredictionOutput: """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in :obj:`evaluate()`. Args: test_dataset (:class:`~tf.data.Dataset`): Dataset to run the predictions on. The dataset should yield tuples of ``(features, labels)`` where ``features`` is a dict of input features and ``labels`` is the labels. If ``labels`` is a tensor, the loss is calculated by the model by calling ``model(features, labels=labels)``. If ``labels`` is a dict, such as when using a QuestionAnswering head model with multiple targets, the loss is instead calculated by calling ``model(features, **labels)`` Returns: `NamedTuple` A namedtuple with the following keys: - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`. - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some). - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset contained labels). """ test_ds, steps, num_examples = self.get_test_tfdataset(test_dataset) return self.prediction_loop(test_ds, steps, num_examples, description="Prediction")
[ "def", "predict", "(", "self", ",", "test_dataset", ":", "tf", ".", "data", ".", "Dataset", ")", "->", "PredictionOutput", ":", "test_ds", ",", "steps", ",", "num_examples", "=", "self", ".", "get_test_tfdataset", "(", "test_dataset", ")", "return", "self", ".", "prediction_loop", "(", "test_ds", ",", "steps", ",", "num_examples", ",", "description", "=", "\"Prediction\"", ")" ]
[ 752, 4 ]
[ 776, 91 ]
python
en
['en', 'error', 'th']
False
TFTrainer.save_model
(self, output_dir: Optional[str] = None)
Will save the model, so you can reload it using :obj:`from_pretrained()`.
Will save the model, so you can reload it using :obj:`from_pretrained()`.
def save_model(self, output_dir: Optional[str] = None): """ Will save the model, so you can reload it using :obj:`from_pretrained()`. """ output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info("Saving model in {}".format(output_dir)) if not isinstance(self.model, TFPreTrainedModel): raise ValueError("Trainer.model appears to not be a PreTrainedModel") self.model.save_pretrained(output_dir)
[ "def", "save_model", "(", "self", ",", "output_dir", ":", "Optional", "[", "str", "]", "=", "None", ")", ":", "output_dir", "=", "output_dir", "if", "output_dir", "is", "not", "None", "else", "self", ".", "args", ".", "output_dir", "logger", ".", "info", "(", "\"Saving model in {}\"", ".", "format", "(", "output_dir", ")", ")", "if", "not", "isinstance", "(", "self", ".", "model", ",", "TFPreTrainedModel", ")", ":", "raise", "ValueError", "(", "\"Trainer.model appears to not be a PreTrainedModel\"", ")", "self", ".", "model", ".", "save_pretrained", "(", "output_dir", ")" ]
[ 778, 4 ]
[ 789, 46 ]
python
en
['en', 'error', 'th']
False
Preprocessor.__init__
(self, corpus, target=None, **kwargs)
The corpus is the `HTMLCorpusReader` to preprocess and pickle. The target is the directory on disk to output the pickled corpus to.
The corpus is the `HTMLCorpusReader` to preprocess and pickle. The target is the directory on disk to output the pickled corpus to.
def __init__(self, corpus, target=None, **kwargs): """ The corpus is the `HTMLCorpusReader` to preprocess and pickle. The target is the directory on disk to output the pickled corpus to. """ self.corpus = corpus self.target = target
[ "def", "__init__", "(", "self", ",", "corpus", ",", "target", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "corpus", "=", "corpus", "self", ".", "target", "=", "target" ]
[ 18, 4 ]
[ 24, 28 ]
python
en
['en', 'error', 'th']
False
Preprocessor.fileids
(self, fileids=None, categories=None)
Helper function access the fileids of the corpus
Helper function access the fileids of the corpus
def fileids(self, fileids=None, categories=None): """ Helper function access the fileids of the corpus """ fileids = self.corpus.resolve(fileids, categories) if fileids: return fileids return self.corpus.fileids()
[ "def", "fileids", "(", "self", ",", "fileids", "=", "None", ",", "categories", "=", "None", ")", ":", "fileids", "=", "self", ".", "corpus", ".", "resolve", "(", "fileids", ",", "categories", ")", "if", "fileids", ":", "return", "fileids", "return", "self", ".", "corpus", ".", "fileids", "(", ")" ]
[ 26, 4 ]
[ 33, 36 ]
python
en
['en', 'error', 'th']
False
Preprocessor.abspath
(self, fileid)
Returns the absolute path to the target fileid from the corpus fileid.
Returns the absolute path to the target fileid from the corpus fileid.
def abspath(self, fileid): """ Returns the absolute path to the target fileid from the corpus fileid. """ # Find the directory, relative from the corpus root. parent = os.path.relpath( os.path.dirname(self.corpus.abspath(fileid)), self.corpus.root ) # Compute the name parts to reconstruct basename = os.path.basename(fileid) name, ext = os.path.splitext(basename) # Create the pickle file extension basename = name + '.pickle' # Return the path to the file relative to the target. return os.path.normpath(os.path.join(self.target, parent, basename))
[ "def", "abspath", "(", "self", ",", "fileid", ")", ":", "# Find the directory, relative from the corpus root.", "parent", "=", "os", ".", "path", ".", "relpath", "(", "os", ".", "path", ".", "dirname", "(", "self", ".", "corpus", ".", "abspath", "(", "fileid", ")", ")", ",", "self", ".", "corpus", ".", "root", ")", "# Compute the name parts to reconstruct", "basename", "=", "os", ".", "path", ".", "basename", "(", "fileid", ")", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "basename", ")", "# Create the pickle file extension", "basename", "=", "name", "+", "'.pickle'", "# Return the path to the file relative to the target.", "return", "os", ".", "path", ".", "normpath", "(", "os", ".", "path", ".", "join", "(", "self", ".", "target", ",", "parent", ",", "basename", ")", ")" ]
[ 35, 4 ]
[ 52, 76 ]
python
en
['en', 'error', 'th']
False
Preprocessor.tokenize
(self, fileid)
Segments, tokenizes, and tags a document in the corpus. Returns a generator of paragraphs, which are lists of sentences, which in turn are lists of part of speech tagged words.
Segments, tokenizes, and tags a document in the corpus. Returns a generator of paragraphs, which are lists of sentences, which in turn are lists of part of speech tagged words.
def tokenize(self, fileid): """ Segments, tokenizes, and tags a document in the corpus. Returns a generator of paragraphs, which are lists of sentences, which in turn are lists of part of speech tagged words. """ for paragraph in self.corpus.paras(fileids=fileid): yield [ pos_tag(wordpunct_tokenize(sent)) for sent in sent_tokenize(paragraph) ]
[ "def", "tokenize", "(", "self", ",", "fileid", ")", ":", "for", "paragraph", "in", "self", ".", "corpus", ".", "paras", "(", "fileids", "=", "fileid", ")", ":", "yield", "[", "pos_tag", "(", "wordpunct_tokenize", "(", "sent", ")", ")", "for", "sent", "in", "sent_tokenize", "(", "paragraph", ")", "]" ]
[ 54, 4 ]
[ 64, 13 ]
python
en
['en', 'error', 'th']
False
Preprocessor.process
(self, fileid)
For a single file does the following preprocessing work: 1. Checks the location on disk to make sure no errors occur. 2. Gets all paragraphs for the given text. 3. Segements the paragraphs with the sent_tokenizer 4. Tokenizes the sentences with the wordpunct_tokenizer 5. Tags the sentences using the default pos_tagger 6. Writes the document as a pickle to the target location. This method is called multiple times from the transform runner.
For a single file does the following preprocessing work: 1. Checks the location on disk to make sure no errors occur. 2. Gets all paragraphs for the given text. 3. Segements the paragraphs with the sent_tokenizer 4. Tokenizes the sentences with the wordpunct_tokenizer 5. Tags the sentences using the default pos_tagger 6. Writes the document as a pickle to the target location. This method is called multiple times from the transform runner.
def process(self, fileid): """ For a single file does the following preprocessing work: 1. Checks the location on disk to make sure no errors occur. 2. Gets all paragraphs for the given text. 3. Segements the paragraphs with the sent_tokenizer 4. Tokenizes the sentences with the wordpunct_tokenizer 5. Tags the sentences using the default pos_tagger 6. Writes the document as a pickle to the target location. This method is called multiple times from the transform runner. """ # Compute the outpath to write the file to. target = self.abspath(fileid) parent = os.path.dirname(target) # Make sure the directory exists if not os.path.exists(parent): os.makedirs(parent) # Make sure that the parent is a directory and not a file if not os.path.isdir(parent): raise ValueError( "Please supply a directory to write preprocessed data to." ) # Create a data structure for the pickle document = list(self.tokenize(fileid)) # Open and serialize the pickle to disk with open(target, 'wb') as f: pickle.dump(document, f, pickle.HIGHEST_PROTOCOL) # Clean up the document del document # Return the target fileid return target
[ "def", "process", "(", "self", ",", "fileid", ")", ":", "# Compute the outpath to write the file to.", "target", "=", "self", ".", "abspath", "(", "fileid", ")", "parent", "=", "os", ".", "path", ".", "dirname", "(", "target", ")", "# Make sure the directory exists", "if", "not", "os", ".", "path", ".", "exists", "(", "parent", ")", ":", "os", ".", "makedirs", "(", "parent", ")", "# Make sure that the parent is a directory and not a file", "if", "not", "os", ".", "path", ".", "isdir", "(", "parent", ")", ":", "raise", "ValueError", "(", "\"Please supply a directory to write preprocessed data to.\"", ")", "# Create a data structure for the pickle", "document", "=", "list", "(", "self", ".", "tokenize", "(", "fileid", ")", ")", "# Open and serialize the pickle to disk", "with", "open", "(", "target", ",", "'wb'", ")", "as", "f", ":", "pickle", ".", "dump", "(", "document", ",", "f", ",", "pickle", ".", "HIGHEST_PROTOCOL", ")", "# Clean up the document", "del", "document", "# Return the target fileid", "return", "target" ]
[ 66, 4 ]
[ 102, 21 ]
python
en
['en', 'error', 'th']
False
Preprocessor.transform
(self, fileids=None, categories=None)
Transform the wrapped corpus, writing out the segmented, tokenized, and part of speech tagged corpus as a pickle to the target directory. This method will also directly copy files that are in the corpus.root directory that are not matched by the corpus.fileids().
Transform the wrapped corpus, writing out the segmented, tokenized, and part of speech tagged corpus as a pickle to the target directory. This method will also directly copy files that are in the corpus.root directory that are not matched by the corpus.fileids().
def transform(self, fileids=None, categories=None): """ Transform the wrapped corpus, writing out the segmented, tokenized, and part of speech tagged corpus as a pickle to the target directory. This method will also directly copy files that are in the corpus.root directory that are not matched by the corpus.fileids(). """ # Make the target directory if it doesn't already exist if not os.path.exists(self.target): os.makedirs(self.target) # Resolve the fileids to start processing and return the list of # target file ids to pass to downstream transformers. return [ self.process(fileid) for fileid in self.fileids(fileids, categories) ]
[ "def", "transform", "(", "self", ",", "fileids", "=", "None", ",", "categories", "=", "None", ")", ":", "# Make the target directory if it doesn't already exist", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "target", ")", ":", "os", ".", "makedirs", "(", "self", ".", "target", ")", "# Resolve the fileids to start processing and return the list of ", "# target file ids to pass to downstream transformers. ", "return", "[", "self", ".", "process", "(", "fileid", ")", "for", "fileid", "in", "self", ".", "fileids", "(", "fileids", ",", "categories", ")", "]" ]
[ 104, 4 ]
[ 120, 9 ]
python
en
['en', 'error', 'th']
False
test_config_flow_registers_webhook
(hass, aiohttp_client)
Test setting up Twilio and sending webhook.
Test setting up Twilio and sending webhook.
async def test_config_flow_registers_webhook(hass, aiohttp_client): """Test setting up Twilio and sending webhook.""" with patch("homeassistant.util.get_local_ip", return_value="example.com"): result = await hass.config_entries.flow.async_init( "twilio", context={"source": "user"} ) assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result result = await hass.config_entries.flow.async_configure(result["flow_id"], {}) assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY webhook_id = result["result"].data["webhook_id"] twilio_events = [] @callback def handle_event(event): """Handle Twilio event.""" twilio_events.append(event) hass.bus.async_listen(twilio.RECEIVED_DATA, handle_event) client = await aiohttp_client(hass.http.app) await client.post(f"/api/webhook/{webhook_id}", data={"hello": "twilio"}) assert len(twilio_events) == 1 assert twilio_events[0].data["webhook_id"] == webhook_id assert twilio_events[0].data["hello"] == "twilio"
[ "async", "def", "test_config_flow_registers_webhook", "(", "hass", ",", "aiohttp_client", ")", ":", "with", "patch", "(", "\"homeassistant.util.get_local_ip\"", ",", "return_value", "=", "\"example.com\"", ")", ":", "result", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_init", "(", "\"twilio\"", ",", "context", "=", "{", "\"source\"", ":", "\"user\"", "}", ")", "assert", "result", "[", "\"type\"", "]", "==", "data_entry_flow", ".", "RESULT_TYPE_FORM", ",", "result", "result", "=", "await", "hass", ".", "config_entries", ".", "flow", ".", "async_configure", "(", "result", "[", "\"flow_id\"", "]", ",", "{", "}", ")", "assert", "result", "[", "\"type\"", "]", "==", "data_entry_flow", ".", "RESULT_TYPE_CREATE_ENTRY", "webhook_id", "=", "result", "[", "\"result\"", "]", ".", "data", "[", "\"webhook_id\"", "]", "twilio_events", "=", "[", "]", "@", "callback", "def", "handle_event", "(", "event", ")", ":", "\"\"\"Handle Twilio event.\"\"\"", "twilio_events", ".", "append", "(", "event", ")", "hass", ".", "bus", ".", "async_listen", "(", "twilio", ".", "RECEIVED_DATA", ",", "handle_event", ")", "client", "=", "await", "aiohttp_client", "(", "hass", ".", "http", ".", "app", ")", "await", "client", ".", "post", "(", "f\"/api/webhook/{webhook_id}\"", ",", "data", "=", "{", "\"hello\"", ":", "\"twilio\"", "}", ")", "assert", "len", "(", "twilio_events", ")", "==", "1", "assert", "twilio_events", "[", "0", "]", ".", "data", "[", "\"webhook_id\"", "]", "==", "webhook_id", "assert", "twilio_events", "[", "0", "]", ".", "data", "[", "\"hello\"", "]", "==", "\"twilio\"" ]
[ 8, 0 ]
[ 34, 53 ]
python
en
['en', 'zu', 'en']
True
get_service
(hass, config, discovery_info=None)
Get the ClickSend notification service.
Get the ClickSend notification service.
def get_service(hass, config, discovery_info=None): """Get the ClickSend notification service.""" if not _authenticate(config): _LOGGER.error("You are not authorized to access ClickSend") return None return ClicksendNotificationService(config)
[ "def", "get_service", "(", "hass", ",", "config", ",", "discovery_info", "=", "None", ")", ":", "if", "not", "_authenticate", "(", "config", ")", ":", "_LOGGER", ".", "error", "(", "\"You are not authorized to access ClickSend\"", ")", "return", "None", "return", "ClicksendNotificationService", "(", "config", ")" ]
[ 44, 0 ]
[ 49, 47 ]
python
en
['en', 'en', 'en']
True
_authenticate
(config)
Authenticate with ClickSend.
Authenticate with ClickSend.
def _authenticate(config): """Authenticate with ClickSend.""" api_url = f"{BASE_API_URL}/account" resp = requests.get( api_url, headers=HEADERS, auth=(config[CONF_USERNAME], config[CONF_API_KEY]), timeout=TIMEOUT, ) if resp.status_code != HTTP_OK: return False return True
[ "def", "_authenticate", "(", "config", ")", ":", "api_url", "=", "f\"{BASE_API_URL}/account\"", "resp", "=", "requests", ".", "get", "(", "api_url", ",", "headers", "=", "HEADERS", ",", "auth", "=", "(", "config", "[", "CONF_USERNAME", "]", ",", "config", "[", "CONF_API_KEY", "]", ")", ",", "timeout", "=", "TIMEOUT", ",", ")", "if", "resp", ".", "status_code", "!=", "HTTP_OK", ":", "return", "False", "return", "True" ]
[ 94, 0 ]
[ 105, 15 ]
python
en
['en', 'en', 'en']
True
ClicksendNotificationService.__init__
(self, config)
Initialize the service.
Initialize the service.
def __init__(self, config): """Initialize the service.""" self.username = config[CONF_USERNAME] self.api_key = config[CONF_API_KEY] self.recipients = config[CONF_RECIPIENT] self.sender = config[CONF_SENDER]
[ "def", "__init__", "(", "self", ",", "config", ")", ":", "self", ".", "username", "=", "config", "[", "CONF_USERNAME", "]", "self", ".", "api_key", "=", "config", "[", "CONF_API_KEY", "]", "self", ".", "recipients", "=", "config", "[", "CONF_RECIPIENT", "]", "self", ".", "sender", "=", "config", "[", "CONF_SENDER", "]" ]
[ 55, 4 ]
[ 60, 41 ]
python
en
['en', 'en', 'en']
True
ClicksendNotificationService.send_message
(self, message="", **kwargs)
Send a message to a user.
Send a message to a user.
def send_message(self, message="", **kwargs): """Send a message to a user.""" data = {"messages": []} for recipient in self.recipients: data["messages"].append( { "source": "hass.notify", "from": self.sender, "to": recipient, "body": message, } ) api_url = f"{BASE_API_URL}/sms/send" resp = requests.post( api_url, data=json.dumps(data), headers=HEADERS, auth=(self.username, self.api_key), timeout=TIMEOUT, ) if resp.status_code == HTTP_OK: return obj = json.loads(resp.text) response_msg = obj.get("response_msg") response_code = obj.get("response_code") _LOGGER.error( "Error %s : %s (Code %s)", resp.status_code, response_msg, response_code )
[ "def", "send_message", "(", "self", ",", "message", "=", "\"\"", ",", "*", "*", "kwargs", ")", ":", "data", "=", "{", "\"messages\"", ":", "[", "]", "}", "for", "recipient", "in", "self", ".", "recipients", ":", "data", "[", "\"messages\"", "]", ".", "append", "(", "{", "\"source\"", ":", "\"hass.notify\"", ",", "\"from\"", ":", "self", ".", "sender", ",", "\"to\"", ":", "recipient", ",", "\"body\"", ":", "message", ",", "}", ")", "api_url", "=", "f\"{BASE_API_URL}/sms/send\"", "resp", "=", "requests", ".", "post", "(", "api_url", ",", "data", "=", "json", ".", "dumps", "(", "data", ")", ",", "headers", "=", "HEADERS", ",", "auth", "=", "(", "self", ".", "username", ",", "self", ".", "api_key", ")", ",", "timeout", "=", "TIMEOUT", ",", ")", "if", "resp", ".", "status_code", "==", "HTTP_OK", ":", "return", "obj", "=", "json", ".", "loads", "(", "resp", ".", "text", ")", "response_msg", "=", "obj", ".", "get", "(", "\"response_msg\"", ")", "response_code", "=", "obj", ".", "get", "(", "\"response_code\"", ")", "_LOGGER", ".", "error", "(", "\"Error %s : %s (Code %s)\"", ",", "resp", ".", "status_code", ",", "response_msg", ",", "response_code", ")" ]
[ 62, 4 ]
[ 91, 9 ]
python
en
['en', 'en', 'en']
True
async_setup_entry
(hass, config_entry, async_add_entities)
Set up a Axis binary sensor.
Set up a Axis binary sensor.
async def async_setup_entry(hass, config_entry, async_add_entities): """Set up a Axis binary sensor.""" device = hass.data[AXIS_DOMAIN][config_entry.unique_id] @callback def async_add_sensor(event_id): """Add binary sensor from Axis device.""" event = device.api.event[event_id] if event.CLASS != CLASS_OUTPUT and not ( event.CLASS == CLASS_LIGHT and event.TYPE == "Light" ): async_add_entities([AxisBinarySensor(event, device)]) device.listeners.append( async_dispatcher_connect(hass, device.signal_new_event, async_add_sensor) )
[ "async", "def", "async_setup_entry", "(", "hass", ",", "config_entry", ",", "async_add_entities", ")", ":", "device", "=", "hass", ".", "data", "[", "AXIS_DOMAIN", "]", "[", "config_entry", ".", "unique_id", "]", "@", "callback", "def", "async_add_sensor", "(", "event_id", ")", ":", "\"\"\"Add binary sensor from Axis device.\"\"\"", "event", "=", "device", ".", "api", ".", "event", "[", "event_id", "]", "if", "event", ".", "CLASS", "!=", "CLASS_OUTPUT", "and", "not", "(", "event", ".", "CLASS", "==", "CLASS_LIGHT", "and", "event", ".", "TYPE", "==", "\"Light\"", ")", ":", "async_add_entities", "(", "[", "AxisBinarySensor", "(", "event", ",", "device", ")", "]", ")", "device", ".", "listeners", ".", "append", "(", "async_dispatcher_connect", "(", "hass", ",", "device", ".", "signal_new_event", ",", "async_add_sensor", ")", ")" ]
[ 39, 0 ]
[ 55, 5 ]
python
en
['en', 'haw', 'en']
True
AxisBinarySensor.__init__
(self, event, device)
Initialize the Axis binary sensor.
Initialize the Axis binary sensor.
def __init__(self, event, device): """Initialize the Axis binary sensor.""" super().__init__(event, device) self.cancel_scheduled_update = None
[ "def", "__init__", "(", "self", ",", "event", ",", "device", ")", ":", "super", "(", ")", ".", "__init__", "(", "event", ",", "device", ")", "self", ".", "cancel_scheduled_update", "=", "None" ]
[ 61, 4 ]
[ 64, 43 ]
python
en
['en', 'en', 'en']
True
AxisBinarySensor.update_callback
(self, no_delay=False)
Update the sensor's state, if needed. Parameter no_delay is True when device_event_reachable is sent.
Update the sensor's state, if needed.
def update_callback(self, no_delay=False): """Update the sensor's state, if needed. Parameter no_delay is True when device_event_reachable is sent. """ @callback def scheduled_update(now): """Timer callback for sensor update.""" self.cancel_scheduled_update = None self.async_write_ha_state() if self.cancel_scheduled_update is not None: self.cancel_scheduled_update() self.cancel_scheduled_update = None if self.is_on or self.device.option_trigger_time == 0 or no_delay: self.async_write_ha_state() return self.cancel_scheduled_update = async_track_point_in_utc_time( self.hass, scheduled_update, utcnow() + timedelta(seconds=self.device.option_trigger_time), )
[ "def", "update_callback", "(", "self", ",", "no_delay", "=", "False", ")", ":", "@", "callback", "def", "scheduled_update", "(", "now", ")", ":", "\"\"\"Timer callback for sensor update.\"\"\"", "self", ".", "cancel_scheduled_update", "=", "None", "self", ".", "async_write_ha_state", "(", ")", "if", "self", ".", "cancel_scheduled_update", "is", "not", "None", ":", "self", ".", "cancel_scheduled_update", "(", ")", "self", ".", "cancel_scheduled_update", "=", "None", "if", "self", ".", "is_on", "or", "self", ".", "device", ".", "option_trigger_time", "==", "0", "or", "no_delay", ":", "self", ".", "async_write_ha_state", "(", ")", "return", "self", ".", "cancel_scheduled_update", "=", "async_track_point_in_utc_time", "(", "self", ".", "hass", ",", "scheduled_update", ",", "utcnow", "(", ")", "+", "timedelta", "(", "seconds", "=", "self", ".", "device", ".", "option_trigger_time", ")", ",", ")" ]
[ 67, 4 ]
[ 91, 9 ]
python
en
['en', 'en', 'en']
True
AxisBinarySensor.is_on
(self)
Return true if event is active.
Return true if event is active.
def is_on(self): """Return true if event is active.""" return self.event.is_tripped
[ "def", "is_on", "(", "self", ")", ":", "return", "self", ".", "event", ".", "is_tripped" ]
[ 94, 4 ]
[ 96, 36 ]
python
en
['en', 'nl', 'en']
True
AxisBinarySensor.name
(self)
Return the name of the event.
Return the name of the event.
def name(self): """Return the name of the event.""" if ( self.event.CLASS == CLASS_INPUT and self.event.id and self.device.api.vapix.ports[self.event.id].name ): return ( f"{self.device.name} {self.device.api.vapix.ports[self.event.id].name}" ) if self.event.CLASS == CLASS_MOTION: for event_class, event_data in ( (FenceGuard, self.device.api.vapix.fence_guard), (LoiteringGuard, self.device.api.vapix.loitering_guard), (MotionGuard, self.device.api.vapix.motion_guard), (Vmd4, self.device.api.vapix.vmd4), ): if ( isinstance(self.event, event_class) and event_data and self.event.id in event_data ): return f"{self.device.name} {self.event.TYPE} {event_data[self.event.id].name}" return super().name
[ "def", "name", "(", "self", ")", ":", "if", "(", "self", ".", "event", ".", "CLASS", "==", "CLASS_INPUT", "and", "self", ".", "event", ".", "id", "and", "self", ".", "device", ".", "api", ".", "vapix", ".", "ports", "[", "self", ".", "event", ".", "id", "]", ".", "name", ")", ":", "return", "(", "f\"{self.device.name} {self.device.api.vapix.ports[self.event.id].name}\"", ")", "if", "self", ".", "event", ".", "CLASS", "==", "CLASS_MOTION", ":", "for", "event_class", ",", "event_data", "in", "(", "(", "FenceGuard", ",", "self", ".", "device", ".", "api", ".", "vapix", ".", "fence_guard", ")", ",", "(", "LoiteringGuard", ",", "self", ".", "device", ".", "api", ".", "vapix", ".", "loitering_guard", ")", ",", "(", "MotionGuard", ",", "self", ".", "device", ".", "api", ".", "vapix", ".", "motion_guard", ")", ",", "(", "Vmd4", ",", "self", ".", "device", ".", "api", ".", "vapix", ".", "vmd4", ")", ",", ")", ":", "if", "(", "isinstance", "(", "self", ".", "event", ",", "event_class", ")", "and", "event_data", "and", "self", ".", "event", ".", "id", "in", "event_data", ")", ":", "return", "f\"{self.device.name} {self.event.TYPE} {event_data[self.event.id].name}\"", "return", "super", "(", ")", ".", "name" ]
[ 99, 4 ]
[ 125, 27 ]
python
en
['en', 'en', 'en']
True
AxisBinarySensor.device_class
(self)
Return the class of the sensor.
Return the class of the sensor.
def device_class(self): """Return the class of the sensor.""" return DEVICE_CLASS.get(self.event.CLASS)
[ "def", "device_class", "(", "self", ")", ":", "return", "DEVICE_CLASS", ".", "get", "(", "self", ".", "event", ".", "CLASS", ")" ]
[ 128, 4 ]
[ 130, 49 ]
python
en
['en', 'pt', 'en']
True
TFAlbertEmbeddings.call
( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, training: bool = False, )
Applies embedding based on inputs tensor. Returns: final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
Applies embedding based on inputs tensor.
def call( self, input_ids: tf.Tensor = None, position_ids: tf.Tensor = None, token_type_ids: tf.Tensor = None, inputs_embeds: tf.Tensor = None, training: bool = False, ) -> tf.Tensor: """ Applies embedding based on inputs tensor. Returns: final_embeddings (:obj:`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) if position_ids is None: position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1)) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds, token_type_embeds]) final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings
[ "def", "call", "(", "self", ",", "input_ids", ":", "tf", ".", "Tensor", "=", "None", ",", "position_ids", ":", "tf", ".", "Tensor", "=", "None", ",", "token_type_ids", ":", "tf", ".", "Tensor", "=", "None", ",", "inputs_embeds", ":", "tf", ".", "Tensor", "=", "None", ",", "training", ":", "bool", "=", "False", ",", ")", "->", "tf", ".", "Tensor", ":", "assert", "not", "(", "input_ids", "is", "None", "and", "inputs_embeds", "is", "None", ")", "if", "input_ids", "is", "not", "None", ":", "inputs_embeds", "=", "tf", ".", "gather", "(", "params", "=", "self", ".", "weight", ",", "indices", "=", "input_ids", ")", "input_shape", "=", "shape_list", "(", "inputs_embeds", ")", "[", ":", "-", "1", "]", "if", "token_type_ids", "is", "None", ":", "token_type_ids", "=", "tf", ".", "fill", "(", "dims", "=", "input_shape", ",", "value", "=", "0", ")", "if", "position_ids", "is", "None", ":", "position_ids", "=", "tf", ".", "expand_dims", "(", "tf", ".", "range", "(", "start", "=", "0", ",", "limit", "=", "input_shape", "[", "-", "1", "]", ")", ",", "axis", "=", "0", ")", "position_embeds", "=", "tf", ".", "gather", "(", "params", "=", "self", ".", "position_embeddings", ",", "indices", "=", "position_ids", ")", "position_embeds", "=", "tf", ".", "tile", "(", "input", "=", "position_embeds", ",", "multiples", "=", "(", "input_shape", "[", "0", "]", ",", "1", ",", "1", ")", ")", "token_type_embeds", "=", "tf", ".", "gather", "(", "params", "=", "self", ".", "token_type_embeddings", ",", "indices", "=", "token_type_ids", ")", "final_embeddings", "=", "self", ".", "embeddings_sum", "(", "inputs", "=", "[", "inputs_embeds", ",", "position_embeds", ",", "token_type_embeds", "]", ")", "final_embeddings", "=", "self", ".", "LayerNorm", "(", "inputs", "=", "final_embeddings", ")", "final_embeddings", "=", "self", ".", "dropout", "(", "inputs", "=", "final_embeddings", ",", "training", "=", "training", ")", "return", "final_embeddings" ]
[ 153, 4 ]
[ 187, 31 ]
python
en
['en', 'error', 'th']
False
_inplace_fused_prox_jv_slow
(y_hat, dout)
not efficient in python for long seqs, but template for a cython impl
not efficient in python for long seqs, but template for a cython impl
def _inplace_fused_prox_jv_slow(y_hat, dout): """not efficient in python for long seqs, but template for a cython impl""" n_features = len(dout) for i in range(n_features + 1): if i in (0, n_features) or y_hat[i] != y_hat[i - 1]: if i > 0: dout[last_ix:i] = acc / n if i < n_features: last_ix = i acc = dout[i] n = 1 else: acc += dout[i] n += 1 return dout
[ "def", "_inplace_fused_prox_jv_slow", "(", "y_hat", ",", "dout", ")", ":", "n_features", "=", "len", "(", "dout", ")", "for", "i", "in", "range", "(", "n_features", "+", "1", ")", ":", "if", "i", "in", "(", "0", ",", "n_features", ")", "or", "y_hat", "[", "i", "]", "!=", "y_hat", "[", "i", "-", "1", "]", ":", "if", "i", ">", "0", ":", "dout", "[", "last_ix", ":", "i", "]", "=", "acc", "/", "n", "if", "i", "<", "n_features", ":", "last_ix", "=", "i", "acc", "=", "dout", "[", "i", "]", "n", "=", "1", "else", ":", "acc", "+=", "dout", "[", "i", "]", "n", "+=", "1", "return", "dout" ]
[ 21, 0 ]
[ 38, 15 ]
python
en
['en', 'en', 'en']
True
RetiariiAdvisor.handle_initialize
(self, data)
callback for initializing the advisor Parameters ---------- data: dict search space
callback for initializing the advisor Parameters ---------- data: dict search space
def handle_initialize(self, data): """callback for initializing the advisor Parameters ---------- data: dict search space """ self.handle_update_search_space(data) send(CommandType.Initialized, '')
[ "def", "handle_initialize", "(", "self", ",", "data", ")", ":", "self", ".", "handle_update_search_space", "(", "data", ")", "send", "(", "CommandType", ".", "Initialized", ",", "''", ")" ]
[ 60, 4 ]
[ 68, 41 ]
python
en
['en', 'zu', 'en']
True
RetiariiAdvisor.send_trial
(self, parameters)
Send parameters to NNI. Parameters ---------- parameters : Any Any payload. Returns ------- int Parameter ID that is assigned to this parameter, which will be used for identification in future.
Send parameters to NNI.
def send_trial(self, parameters): """ Send parameters to NNI. Parameters ---------- parameters : Any Any payload. Returns ------- int Parameter ID that is assigned to this parameter, which will be used for identification in future. """ self.parameters_count += 1 new_trial = { 'parameter_id': self.parameters_count, 'parameters': parameters, 'parameter_source': 'algorithm' } _logger.debug('New trial sent: %s', new_trial) send(CommandType.NewTrialJob, json_dumps(new_trial)) if self.send_trial_callback is not None: self.send_trial_callback(parameters) # pylint: disable=not-callable return self.parameters_count
[ "def", "send_trial", "(", "self", ",", "parameters", ")", ":", "self", ".", "parameters_count", "+=", "1", "new_trial", "=", "{", "'parameter_id'", ":", "self", ".", "parameters_count", ",", "'parameters'", ":", "parameters", ",", "'parameter_source'", ":", "'algorithm'", "}", "_logger", ".", "debug", "(", "'New trial sent: %s'", ",", "new_trial", ")", "send", "(", "CommandType", ".", "NewTrialJob", ",", "json_dumps", "(", "new_trial", ")", ")", "if", "self", ".", "send_trial_callback", "is", "not", "None", ":", "self", ".", "send_trial_callback", "(", "parameters", ")", "# pylint: disable=not-callable", "return", "self", ".", "parameters_count" ]
[ 70, 4 ]
[ 95, 36 ]
python
en
['en', 'error', 'th']
False
test_intent_set_humidity
(hass)
Test the set humidity intent.
Test the set humidity intent.
async def test_intent_set_humidity(hass): """Test the set humidity intent.""" hass.states.async_set( "humidifier.bedroom_humidifier", STATE_ON, {ATTR_HUMIDITY: 40} ) humidity_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY) turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON) await intent.async_setup_intents(hass) result = await hass.helpers.intent.async_handle( "test", intent.INTENT_HUMIDITY, {"name": {"value": "Bedroom humidifier"}, "humidity": {"value": "50"}}, ) await hass.async_block_till_done() assert result.speech["plain"]["speech"] == "The bedroom humidifier is set to 50%" assert len(turn_on_calls) == 0 assert len(humidity_calls) == 1 call = humidity_calls[0] assert call.domain == DOMAIN assert call.service == SERVICE_SET_HUMIDITY assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier" assert call.data.get(ATTR_HUMIDITY) == 50
[ "async", "def", "test_intent_set_humidity", "(", "hass", ")", ":", "hass", ".", "states", ".", "async_set", "(", "\"humidifier.bedroom_humidifier\"", ",", "STATE_ON", ",", "{", "ATTR_HUMIDITY", ":", "40", "}", ")", "humidity_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_SET_HUMIDITY", ")", "turn_on_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_TURN_ON", ")", "await", "intent", ".", "async_setup_intents", "(", "hass", ")", "result", "=", "await", "hass", ".", "helpers", ".", "intent", ".", "async_handle", "(", "\"test\"", ",", "intent", ".", "INTENT_HUMIDITY", ",", "{", "\"name\"", ":", "{", "\"value\"", ":", "\"Bedroom humidifier\"", "}", ",", "\"humidity\"", ":", "{", "\"value\"", ":", "\"50\"", "}", "}", ",", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "result", ".", "speech", "[", "\"plain\"", "]", "[", "\"speech\"", "]", "==", "\"The bedroom humidifier is set to 50%\"", "assert", "len", "(", "turn_on_calls", ")", "==", "0", "assert", "len", "(", "humidity_calls", ")", "==", "1", "call", "=", "humidity_calls", "[", "0", "]", "assert", "call", ".", "domain", "==", "DOMAIN", "assert", "call", ".", "service", "==", "SERVICE_SET_HUMIDITY", "assert", "call", ".", "data", ".", "get", "(", "ATTR_ENTITY_ID", ")", "==", "\"humidifier.bedroom_humidifier\"", "assert", "call", ".", "data", ".", "get", "(", "ATTR_HUMIDITY", ")", "==", "50" ]
[ 22, 0 ]
[ 46, 45 ]
python
en
['en', 'en', 'en']
True
test_intent_set_humidity_and_turn_on
(hass)
Test the set humidity intent for turned off humidifier.
Test the set humidity intent for turned off humidifier.
async def test_intent_set_humidity_and_turn_on(hass): """Test the set humidity intent for turned off humidifier.""" hass.states.async_set( "humidifier.bedroom_humidifier", STATE_OFF, {ATTR_HUMIDITY: 40} ) humidity_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY) turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON) await intent.async_setup_intents(hass) result = await hass.helpers.intent.async_handle( "test", intent.INTENT_HUMIDITY, {"name": {"value": "Bedroom humidifier"}, "humidity": {"value": "50"}}, ) await hass.async_block_till_done() assert ( result.speech["plain"]["speech"] == "Turned bedroom humidifier on and set humidity to 50%" ) assert len(turn_on_calls) == 1 call = turn_on_calls[0] assert call.domain == DOMAIN assert call.service == SERVICE_TURN_ON assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier" assert len(humidity_calls) == 1 call = humidity_calls[0] assert call.domain == DOMAIN assert call.service == SERVICE_SET_HUMIDITY assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier" assert call.data.get(ATTR_HUMIDITY) == 50
[ "async", "def", "test_intent_set_humidity_and_turn_on", "(", "hass", ")", ":", "hass", ".", "states", ".", "async_set", "(", "\"humidifier.bedroom_humidifier\"", ",", "STATE_OFF", ",", "{", "ATTR_HUMIDITY", ":", "40", "}", ")", "humidity_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_SET_HUMIDITY", ")", "turn_on_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_TURN_ON", ")", "await", "intent", ".", "async_setup_intents", "(", "hass", ")", "result", "=", "await", "hass", ".", "helpers", ".", "intent", ".", "async_handle", "(", "\"test\"", ",", "intent", ".", "INTENT_HUMIDITY", ",", "{", "\"name\"", ":", "{", "\"value\"", ":", "\"Bedroom humidifier\"", "}", ",", "\"humidity\"", ":", "{", "\"value\"", ":", "\"50\"", "}", "}", ",", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "(", "result", ".", "speech", "[", "\"plain\"", "]", "[", "\"speech\"", "]", "==", "\"Turned bedroom humidifier on and set humidity to 50%\"", ")", "assert", "len", "(", "turn_on_calls", ")", "==", "1", "call", "=", "turn_on_calls", "[", "0", "]", "assert", "call", ".", "domain", "==", "DOMAIN", "assert", "call", ".", "service", "==", "SERVICE_TURN_ON", "assert", "call", ".", "data", ".", "get", "(", "ATTR_ENTITY_ID", ")", "==", "\"humidifier.bedroom_humidifier\"", "assert", "len", "(", "humidity_calls", ")", "==", "1", "call", "=", "humidity_calls", "[", "0", "]", "assert", "call", ".", "domain", "==", "DOMAIN", "assert", "call", ".", "service", "==", "SERVICE_SET_HUMIDITY", "assert", "call", ".", "data", ".", "get", "(", "ATTR_ENTITY_ID", ")", "==", "\"humidifier.bedroom_humidifier\"", "assert", "call", ".", "data", ".", "get", "(", "ATTR_HUMIDITY", ")", "==", "50" ]
[ 49, 0 ]
[ 80, 45 ]
python
en
['en', 'en', 'en']
True
test_intent_set_mode
(hass)
Test the set mode intent.
Test the set mode intent.
async def test_intent_set_mode(hass): """Test the set mode intent.""" hass.states.async_set( "humidifier.bedroom_humidifier", STATE_ON, { ATTR_HUMIDITY: 40, ATTR_SUPPORTED_FEATURES: 1, ATTR_AVAILABLE_MODES: ["home", "away"], ATTR_MODE: "home", }, ) mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE) turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON) await intent.async_setup_intents(hass) result = await hass.helpers.intent.async_handle( "test", intent.INTENT_MODE, {"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}}, ) await hass.async_block_till_done() assert ( result.speech["plain"]["speech"] == "The mode for bedroom humidifier is set to away" ) assert len(turn_on_calls) == 0 assert len(mode_calls) == 1 call = mode_calls[0] assert call.domain == DOMAIN assert call.service == SERVICE_SET_MODE assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier" assert call.data.get(ATTR_MODE) == "away"
[ "async", "def", "test_intent_set_mode", "(", "hass", ")", ":", "hass", ".", "states", ".", "async_set", "(", "\"humidifier.bedroom_humidifier\"", ",", "STATE_ON", ",", "{", "ATTR_HUMIDITY", ":", "40", ",", "ATTR_SUPPORTED_FEATURES", ":", "1", ",", "ATTR_AVAILABLE_MODES", ":", "[", "\"home\"", ",", "\"away\"", "]", ",", "ATTR_MODE", ":", "\"home\"", ",", "}", ",", ")", "mode_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_SET_MODE", ")", "turn_on_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_TURN_ON", ")", "await", "intent", ".", "async_setup_intents", "(", "hass", ")", "result", "=", "await", "hass", ".", "helpers", ".", "intent", ".", "async_handle", "(", "\"test\"", ",", "intent", ".", "INTENT_MODE", ",", "{", "\"name\"", ":", "{", "\"value\"", ":", "\"Bedroom humidifier\"", "}", ",", "\"mode\"", ":", "{", "\"value\"", ":", "\"away\"", "}", "}", ",", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "(", "result", ".", "speech", "[", "\"plain\"", "]", "[", "\"speech\"", "]", "==", "\"The mode for bedroom humidifier is set to away\"", ")", "assert", "len", "(", "turn_on_calls", ")", "==", "0", "assert", "len", "(", "mode_calls", ")", "==", "1", "call", "=", "mode_calls", "[", "0", "]", "assert", "call", ".", "domain", "==", "DOMAIN", "assert", "call", ".", "service", "==", "SERVICE_SET_MODE", "assert", "call", ".", "data", ".", "get", "(", "ATTR_ENTITY_ID", ")", "==", "\"humidifier.bedroom_humidifier\"", "assert", "call", ".", "data", ".", "get", "(", "ATTR_MODE", ")", "==", "\"away\"" ]
[ 83, 0 ]
[ 117, 45 ]
python
en
['en', 'en', 'en']
True
test_intent_set_mode_and_turn_on
(hass)
Test the set mode intent.
Test the set mode intent.
async def test_intent_set_mode_and_turn_on(hass): """Test the set mode intent.""" hass.states.async_set( "humidifier.bedroom_humidifier", STATE_OFF, { ATTR_HUMIDITY: 40, ATTR_SUPPORTED_FEATURES: 1, ATTR_AVAILABLE_MODES: ["home", "away"], ATTR_MODE: "home", }, ) mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE) turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON) await intent.async_setup_intents(hass) result = await hass.helpers.intent.async_handle( "test", intent.INTENT_MODE, {"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}}, ) await hass.async_block_till_done() assert ( result.speech["plain"]["speech"] == "Turned bedroom humidifier on and set away mode" ) assert len(turn_on_calls) == 1 call = turn_on_calls[0] assert call.domain == DOMAIN assert call.service == SERVICE_TURN_ON assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier" assert len(mode_calls) == 1 call = mode_calls[0] assert call.domain == DOMAIN assert call.service == SERVICE_SET_MODE assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier" assert call.data.get(ATTR_MODE) == "away"
[ "async", "def", "test_intent_set_mode_and_turn_on", "(", "hass", ")", ":", "hass", ".", "states", ".", "async_set", "(", "\"humidifier.bedroom_humidifier\"", ",", "STATE_OFF", ",", "{", "ATTR_HUMIDITY", ":", "40", ",", "ATTR_SUPPORTED_FEATURES", ":", "1", ",", "ATTR_AVAILABLE_MODES", ":", "[", "\"home\"", ",", "\"away\"", "]", ",", "ATTR_MODE", ":", "\"home\"", ",", "}", ",", ")", "mode_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_SET_MODE", ")", "turn_on_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_TURN_ON", ")", "await", "intent", ".", "async_setup_intents", "(", "hass", ")", "result", "=", "await", "hass", ".", "helpers", ".", "intent", ".", "async_handle", "(", "\"test\"", ",", "intent", ".", "INTENT_MODE", ",", "{", "\"name\"", ":", "{", "\"value\"", ":", "\"Bedroom humidifier\"", "}", ",", "\"mode\"", ":", "{", "\"value\"", ":", "\"away\"", "}", "}", ",", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "assert", "(", "result", ".", "speech", "[", "\"plain\"", "]", "[", "\"speech\"", "]", "==", "\"Turned bedroom humidifier on and set away mode\"", ")", "assert", "len", "(", "turn_on_calls", ")", "==", "1", "call", "=", "turn_on_calls", "[", "0", "]", "assert", "call", ".", "domain", "==", "DOMAIN", "assert", "call", ".", "service", "==", "SERVICE_TURN_ON", "assert", "call", ".", "data", ".", "get", "(", "ATTR_ENTITY_ID", ")", "==", "\"humidifier.bedroom_humidifier\"", "assert", "len", "(", "mode_calls", ")", "==", "1", "call", "=", "mode_calls", "[", "0", "]", "assert", "call", ".", "domain", "==", "DOMAIN", "assert", "call", ".", "service", "==", "SERVICE_SET_MODE", "assert", "call", ".", "data", ".", "get", "(", "ATTR_ENTITY_ID", ")", "==", "\"humidifier.bedroom_humidifier\"", "assert", "call", ".", "data", ".", "get", "(", "ATTR_MODE", ")", "==", "\"away\"" ]
[ 120, 0 ]
[ 158, 45 ]
python
en
['en', 'en', 'en']
True
test_intent_set_mode_tests_feature
(hass)
Test the set mode intent where modes are not supported.
Test the set mode intent where modes are not supported.
async def test_intent_set_mode_tests_feature(hass): """Test the set mode intent where modes are not supported.""" hass.states.async_set( "humidifier.bedroom_humidifier", STATE_ON, {ATTR_HUMIDITY: 40} ) mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE) await intent.async_setup_intents(hass) try: await hass.helpers.intent.async_handle( "test", intent.INTENT_MODE, {"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}}, ) assert False, "handling intent should have raised" except IntentHandleError as err: assert str(err) == "Entity bedroom humidifier does not support modes" assert len(mode_calls) == 0
[ "async", "def", "test_intent_set_mode_tests_feature", "(", "hass", ")", ":", "hass", ".", "states", ".", "async_set", "(", "\"humidifier.bedroom_humidifier\"", ",", "STATE_ON", ",", "{", "ATTR_HUMIDITY", ":", "40", "}", ")", "mode_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_SET_MODE", ")", "await", "intent", ".", "async_setup_intents", "(", "hass", ")", "try", ":", "await", "hass", ".", "helpers", ".", "intent", ".", "async_handle", "(", "\"test\"", ",", "intent", ".", "INTENT_MODE", ",", "{", "\"name\"", ":", "{", "\"value\"", ":", "\"Bedroom humidifier\"", "}", ",", "\"mode\"", ":", "{", "\"value\"", ":", "\"away\"", "}", "}", ",", ")", "assert", "False", ",", "\"handling intent should have raised\"", "except", "IntentHandleError", "as", "err", ":", "assert", "str", "(", "err", ")", "==", "\"Entity bedroom humidifier does not support modes\"", "assert", "len", "(", "mode_calls", ")", "==", "0" ]
[ 161, 0 ]
[ 179, 31 ]
python
en
['en', 'en', 'en']
True
test_intent_set_unknown_mode
(hass)
Test the set mode intent for unsupported mode.
Test the set mode intent for unsupported mode.
async def test_intent_set_unknown_mode(hass): """Test the set mode intent for unsupported mode.""" hass.states.async_set( "humidifier.bedroom_humidifier", STATE_ON, { ATTR_HUMIDITY: 40, ATTR_SUPPORTED_FEATURES: 1, ATTR_AVAILABLE_MODES: ["home", "away"], ATTR_MODE: "home", }, ) mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE) await intent.async_setup_intents(hass) try: await hass.helpers.intent.async_handle( "test", intent.INTENT_MODE, {"name": {"value": "Bedroom humidifier"}, "mode": {"value": "eco"}}, ) assert False, "handling intent should have raised" except IntentHandleError as err: assert str(err) == "Entity bedroom humidifier does not support eco mode" assert len(mode_calls) == 0
[ "async", "def", "test_intent_set_unknown_mode", "(", "hass", ")", ":", "hass", ".", "states", ".", "async_set", "(", "\"humidifier.bedroom_humidifier\"", ",", "STATE_ON", ",", "{", "ATTR_HUMIDITY", ":", "40", ",", "ATTR_SUPPORTED_FEATURES", ":", "1", ",", "ATTR_AVAILABLE_MODES", ":", "[", "\"home\"", ",", "\"away\"", "]", ",", "ATTR_MODE", ":", "\"home\"", ",", "}", ",", ")", "mode_calls", "=", "async_mock_service", "(", "hass", ",", "DOMAIN", ",", "SERVICE_SET_MODE", ")", "await", "intent", ".", "async_setup_intents", "(", "hass", ")", "try", ":", "await", "hass", ".", "helpers", ".", "intent", ".", "async_handle", "(", "\"test\"", ",", "intent", ".", "INTENT_MODE", ",", "{", "\"name\"", ":", "{", "\"value\"", ":", "\"Bedroom humidifier\"", "}", ",", "\"mode\"", ":", "{", "\"value\"", ":", "\"eco\"", "}", "}", ",", ")", "assert", "False", ",", "\"handling intent should have raised\"", "except", "IntentHandleError", "as", "err", ":", "assert", "str", "(", "err", ")", "==", "\"Entity bedroom humidifier does not support eco mode\"", "assert", "len", "(", "mode_calls", ")", "==", "0" ]
[ 182, 0 ]
[ 207, 31 ]
python
en
['en', 'en', 'en']
True
display_temp
( hass: HomeAssistant, temperature: Optional[float], unit: str, precision: float )
Convert temperature into preferred units/precision for display.
Convert temperature into preferred units/precision for display.
def display_temp( hass: HomeAssistant, temperature: Optional[float], unit: str, precision: float ) -> Optional[float]: """Convert temperature into preferred units/precision for display.""" temperature_unit = unit ha_unit = hass.config.units.temperature_unit if temperature is None: return temperature # If the temperature is not a number this can cause issues # with Polymer components, so bail early there. if not isinstance(temperature, Number): raise TypeError(f"Temperature is not a number: {temperature}") if temperature_unit != ha_unit: temperature = convert_temperature(temperature, temperature_unit, ha_unit) # Round in the units appropriate if precision == PRECISION_HALVES: temperature = round(temperature * 2) / 2.0 elif precision == PRECISION_TENTHS: temperature = round(temperature, 1) # Integer as a fall back (PRECISION_WHOLE) else: temperature = round(temperature) return temperature
[ "def", "display_temp", "(", "hass", ":", "HomeAssistant", ",", "temperature", ":", "Optional", "[", "float", "]", ",", "unit", ":", "str", ",", "precision", ":", "float", ")", "->", "Optional", "[", "float", "]", ":", "temperature_unit", "=", "unit", "ha_unit", "=", "hass", ".", "config", ".", "units", ".", "temperature_unit", "if", "temperature", "is", "None", ":", "return", "temperature", "# If the temperature is not a number this can cause issues", "# with Polymer components, so bail early there.", "if", "not", "isinstance", "(", "temperature", ",", "Number", ")", ":", "raise", "TypeError", "(", "f\"Temperature is not a number: {temperature}\"", ")", "if", "temperature_unit", "!=", "ha_unit", ":", "temperature", "=", "convert_temperature", "(", "temperature", ",", "temperature_unit", ",", "ha_unit", ")", "# Round in the units appropriate", "if", "precision", "==", "PRECISION_HALVES", ":", "temperature", "=", "round", "(", "temperature", "*", "2", ")", "/", "2.0", "elif", "precision", "==", "PRECISION_TENTHS", ":", "temperature", "=", "round", "(", "temperature", ",", "1", ")", "# Integer as a fall back (PRECISION_WHOLE)", "else", ":", "temperature", "=", "round", "(", "temperature", ")", "return", "temperature" ]
[ 9, 0 ]
[ 36, 22 ]
python
en
['en', 'it', 'en']
True
run_information
(hass, point_in_time: Optional[datetime] = None)
Return information about current run. There is also the run that covers point_in_time.
Return information about current run.
def run_information(hass, point_in_time: Optional[datetime] = None): """Return information about current run. There is also the run that covers point_in_time. """ run_info = run_information_from_instance(hass, point_in_time) if run_info: return run_info with session_scope(hass=hass) as session: return run_information_with_session(session, point_in_time)
[ "def", "run_information", "(", "hass", ",", "point_in_time", ":", "Optional", "[", "datetime", "]", "=", "None", ")", ":", "run_info", "=", "run_information_from_instance", "(", "hass", ",", "point_in_time", ")", "if", "run_info", ":", "return", "run_info", "with", "session_scope", "(", "hass", "=", "hass", ")", "as", "session", ":", "return", "run_information_with_session", "(", "session", ",", "point_in_time", ")" ]
[ 116, 0 ]
[ 126, 67 ]
python
en
['en', 'en', 'en']
True
run_information_from_instance
(hass, point_in_time: Optional[datetime] = None)
Return information about current run from the existing instance. Does not query the database for older runs.
Return information about current run from the existing instance.
def run_information_from_instance(hass, point_in_time: Optional[datetime] = None): """Return information about current run from the existing instance. Does not query the database for older runs. """ ins = hass.data[DATA_INSTANCE] if point_in_time is None or point_in_time > ins.recording_start: return ins.run_info
[ "def", "run_information_from_instance", "(", "hass", ",", "point_in_time", ":", "Optional", "[", "datetime", "]", "=", "None", ")", ":", "ins", "=", "hass", ".", "data", "[", "DATA_INSTANCE", "]", "if", "point_in_time", "is", "None", "or", "point_in_time", ">", "ins", ".", "recording_start", ":", "return", "ins", ".", "run_info" ]
[ 129, 0 ]
[ 137, 27 ]
python
en
['en', 'en', 'en']
True
run_information_with_session
(session, point_in_time: Optional[datetime] = None)
Return information about current run from the database.
Return information about current run from the database.
def run_information_with_session(session, point_in_time: Optional[datetime] = None): """Return information about current run from the database.""" recorder_runs = RecorderRuns query = session.query(recorder_runs) if point_in_time: query = query.filter( (recorder_runs.start < point_in_time) & (recorder_runs.end > point_in_time) ) res = query.first() if res: session.expunge(res) return res
[ "def", "run_information_with_session", "(", "session", ",", "point_in_time", ":", "Optional", "[", "datetime", "]", "=", "None", ")", ":", "recorder_runs", "=", "RecorderRuns", "query", "=", "session", ".", "query", "(", "recorder_runs", ")", "if", "point_in_time", ":", "query", "=", "query", ".", "filter", "(", "(", "recorder_runs", ".", "start", "<", "point_in_time", ")", "&", "(", "recorder_runs", ".", "end", ">", "point_in_time", ")", ")", "res", "=", "query", ".", "first", "(", ")", "if", "res", ":", "session", ".", "expunge", "(", "res", ")", "return", "res" ]
[ 140, 0 ]
[ 153, 14 ]
python
en
['en', 'en', 'en']
True
async_setup
(hass: HomeAssistant, config: ConfigType)
Set up the recorder.
Set up the recorder.
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool: """Set up the recorder.""" conf = config[DOMAIN] entity_filter = convert_include_exclude_filter(conf) auto_purge = conf[CONF_AUTO_PURGE] keep_days = conf[CONF_PURGE_KEEP_DAYS] commit_interval = conf[CONF_COMMIT_INTERVAL] db_max_retries = conf[CONF_DB_MAX_RETRIES] db_retry_wait = conf[CONF_DB_RETRY_WAIT] db_integrity_check = conf[CONF_DB_INTEGRITY_CHECK] db_url = conf.get(CONF_DB_URL) if not db_url: db_url = DEFAULT_URL.format(hass_config_path=hass.config.path(DEFAULT_DB_FILE)) exclude = conf[CONF_EXCLUDE] exclude_t = exclude.get(CONF_EVENT_TYPES, []) instance = hass.data[DATA_INSTANCE] = Recorder( hass=hass, auto_purge=auto_purge, keep_days=keep_days, commit_interval=commit_interval, uri=db_url, db_max_retries=db_max_retries, db_retry_wait=db_retry_wait, entity_filter=entity_filter, exclude_t=exclude_t, db_integrity_check=db_integrity_check, ) instance.async_initialize() instance.start() async def async_handle_purge_service(service): """Handle calls to the purge service.""" instance.do_adhoc_purge(**service.data) hass.services.async_register( DOMAIN, SERVICE_PURGE, async_handle_purge_service, schema=SERVICE_PURGE_SCHEMA ) return await instance.async_db_ready
[ "async", "def", "async_setup", "(", "hass", ":", "HomeAssistant", ",", "config", ":", "ConfigType", ")", "->", "bool", ":", "conf", "=", "config", "[", "DOMAIN", "]", "entity_filter", "=", "convert_include_exclude_filter", "(", "conf", ")", "auto_purge", "=", "conf", "[", "CONF_AUTO_PURGE", "]", "keep_days", "=", "conf", "[", "CONF_PURGE_KEEP_DAYS", "]", "commit_interval", "=", "conf", "[", "CONF_COMMIT_INTERVAL", "]", "db_max_retries", "=", "conf", "[", "CONF_DB_MAX_RETRIES", "]", "db_retry_wait", "=", "conf", "[", "CONF_DB_RETRY_WAIT", "]", "db_integrity_check", "=", "conf", "[", "CONF_DB_INTEGRITY_CHECK", "]", "db_url", "=", "conf", ".", "get", "(", "CONF_DB_URL", ")", "if", "not", "db_url", ":", "db_url", "=", "DEFAULT_URL", ".", "format", "(", "hass_config_path", "=", "hass", ".", "config", ".", "path", "(", "DEFAULT_DB_FILE", ")", ")", "exclude", "=", "conf", "[", "CONF_EXCLUDE", "]", "exclude_t", "=", "exclude", ".", "get", "(", "CONF_EVENT_TYPES", ",", "[", "]", ")", "instance", "=", "hass", ".", "data", "[", "DATA_INSTANCE", "]", "=", "Recorder", "(", "hass", "=", "hass", ",", "auto_purge", "=", "auto_purge", ",", "keep_days", "=", "keep_days", ",", "commit_interval", "=", "commit_interval", ",", "uri", "=", "db_url", ",", "db_max_retries", "=", "db_max_retries", ",", "db_retry_wait", "=", "db_retry_wait", ",", "entity_filter", "=", "entity_filter", ",", "exclude_t", "=", "exclude_t", ",", "db_integrity_check", "=", "db_integrity_check", ",", ")", "instance", ".", "async_initialize", "(", ")", "instance", ".", "start", "(", ")", "async", "def", "async_handle_purge_service", "(", "service", ")", ":", "\"\"\"Handle calls to the purge service.\"\"\"", "instance", ".", "do_adhoc_purge", "(", "*", "*", "service", ".", "data", ")", "hass", ".", "services", ".", "async_register", "(", "DOMAIN", ",", "SERVICE_PURGE", ",", "async_handle_purge_service", ",", "schema", "=", "SERVICE_PURGE_SCHEMA", ")", "return", "await", "instance", ".", "async_db_ready" ]
[ 156, 0 ]
[ 195, 40 ]
python
en
['en', 'en', 'en']
True
Recorder.__init__
( self, hass: HomeAssistant, auto_purge: bool, keep_days: int, commit_interval: int, uri: str, db_max_retries: int, db_retry_wait: int, entity_filter: Callable[[str], bool], exclude_t: List[str], db_integrity_check: bool, )
Initialize the recorder.
Initialize the recorder.
def __init__( self, hass: HomeAssistant, auto_purge: bool, keep_days: int, commit_interval: int, uri: str, db_max_retries: int, db_retry_wait: int, entity_filter: Callable[[str], bool], exclude_t: List[str], db_integrity_check: bool, ) -> None: """Initialize the recorder.""" threading.Thread.__init__(self, name="Recorder") self.hass = hass self.auto_purge = auto_purge self.keep_days = keep_days self.commit_interval = commit_interval self.queue: Any = queue.SimpleQueue() self.recording_start = dt_util.utcnow() self.db_url = uri self.db_max_retries = db_max_retries self.db_retry_wait = db_retry_wait self.db_integrity_check = db_integrity_check self.async_db_ready = asyncio.Future() self._queue_watch = threading.Event() self.engine: Any = None self.run_info: Any = None self.entity_filter = entity_filter self.exclude_t = exclude_t self._timechanges_seen = 0 self._commits_without_expire = 0 self._keepalive_count = 0 self._old_states = {} self._pending_expunge = [] self.event_session = None self.get_session = None self._completed_database_setup = False
[ "def", "__init__", "(", "self", ",", "hass", ":", "HomeAssistant", ",", "auto_purge", ":", "bool", ",", "keep_days", ":", "int", ",", "commit_interval", ":", "int", ",", "uri", ":", "str", ",", "db_max_retries", ":", "int", ",", "db_retry_wait", ":", "int", ",", "entity_filter", ":", "Callable", "[", "[", "str", "]", ",", "bool", "]", ",", "exclude_t", ":", "List", "[", "str", "]", ",", "db_integrity_check", ":", "bool", ",", ")", "->", "None", ":", "threading", ".", "Thread", ".", "__init__", "(", "self", ",", "name", "=", "\"Recorder\"", ")", "self", ".", "hass", "=", "hass", "self", ".", "auto_purge", "=", "auto_purge", "self", ".", "keep_days", "=", "keep_days", "self", ".", "commit_interval", "=", "commit_interval", "self", ".", "queue", ":", "Any", "=", "queue", ".", "SimpleQueue", "(", ")", "self", ".", "recording_start", "=", "dt_util", ".", "utcnow", "(", ")", "self", ".", "db_url", "=", "uri", "self", ".", "db_max_retries", "=", "db_max_retries", "self", ".", "db_retry_wait", "=", "db_retry_wait", "self", ".", "db_integrity_check", "=", "db_integrity_check", "self", ".", "async_db_ready", "=", "asyncio", ".", "Future", "(", ")", "self", ".", "_queue_watch", "=", "threading", ".", "Event", "(", ")", "self", ".", "engine", ":", "Any", "=", "None", "self", ".", "run_info", ":", "Any", "=", "None", "self", ".", "entity_filter", "=", "entity_filter", "self", ".", "exclude_t", "=", "exclude_t", "self", ".", "_timechanges_seen", "=", "0", "self", ".", "_commits_without_expire", "=", "0", "self", ".", "_keepalive_count", "=", "0", "self", ".", "_old_states", "=", "{", "}", "self", ".", "_pending_expunge", "=", "[", "]", "self", ".", "event_session", "=", "None", "self", ".", "get_session", "=", "None", "self", ".", "_completed_database_setup", "=", "False" ]
[ 208, 4 ]
[ 249, 46 ]
python
en
['en', 'en', 'en']
True
Recorder.async_initialize
(self)
Initialize the recorder.
Initialize the recorder.
def async_initialize(self): """Initialize the recorder.""" self.hass.bus.async_listen(MATCH_ALL, self.event_listener)
[ "def", "async_initialize", "(", "self", ")", ":", "self", ".", "hass", ".", "bus", ".", "async_listen", "(", "MATCH_ALL", ",", "self", ".", "event_listener", ")" ]
[ 252, 4 ]
[ 254, 66 ]
python
en
['en', 'en', 'en']
True
Recorder.do_adhoc_purge
(self, **kwargs)
Trigger an adhoc purge retaining keep_days worth of data.
Trigger an adhoc purge retaining keep_days worth of data.
def do_adhoc_purge(self, **kwargs): """Trigger an adhoc purge retaining keep_days worth of data.""" keep_days = kwargs.get(ATTR_KEEP_DAYS, self.keep_days) repack = kwargs.get(ATTR_REPACK) self.queue.put(PurgeTask(keep_days, repack))
[ "def", "do_adhoc_purge", "(", "self", ",", "*", "*", "kwargs", ")", ":", "keep_days", "=", "kwargs", ".", "get", "(", "ATTR_KEEP_DAYS", ",", "self", ".", "keep_days", ")", "repack", "=", "kwargs", ".", "get", "(", "ATTR_REPACK", ")", "self", ".", "queue", ".", "put", "(", "PurgeTask", "(", "keep_days", ",", "repack", ")", ")" ]
[ 256, 4 ]
[ 261, 52 ]
python
en
['en', 'sn', 'en']
True
Recorder.run
(self)
Start processing events to save.
Start processing events to save.
def run(self): """Start processing events to save.""" tries = 1 connected = False while not connected and tries <= self.db_max_retries: if tries != 1: time.sleep(self.db_retry_wait) try: self._setup_connection() migration.migrate_schema(self) self._setup_run() connected = True _LOGGER.debug("Connected to recorder database") except Exception as err: # pylint: disable=broad-except _LOGGER.error( "Error during connection setup: %s (retrying in %s seconds)", err, self.db_retry_wait, ) tries += 1 if not connected: @callback def connection_failed(): """Connect failed tasks.""" self.async_db_ready.set_result(False) persistent_notification.async_create( self.hass, "The recorder could not start, please check the log", "Recorder", ) self.hass.add_job(connection_failed) return shutdown_task = object() hass_started = concurrent.futures.Future() @callback def register(): """Post connection initialize.""" self.async_db_ready.set_result(True) def shutdown(event): """Shut down the Recorder.""" if not hass_started.done(): hass_started.set_result(shutdown_task) self.queue.put(None) self.join() self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown) if self.hass.state == CoreState.running: hass_started.set_result(None) else: @callback def notify_hass_started(event): """Notify that hass has started.""" hass_started.set_result(None) self.hass.bus.async_listen_once( EVENT_HOMEASSISTANT_START, notify_hass_started ) self.hass.add_job(register) result = hass_started.result() # If shutdown happened before Home Assistant finished starting if result is shutdown_task: return # Start periodic purge if self.auto_purge: @callback def async_purge(now): """Trigger the purge.""" self.queue.put(PurgeTask(self.keep_days, repack=False)) # Purge every night at 4:12am self.hass.helpers.event.track_time_change( async_purge, hour=4, minute=12, second=0 ) self.event_session = self.get_session() self.event_session.expire_on_commit = False # Use a session for the event read loop # with a commit every time the event time # has changed. This reduces the disk io. while True: event = self.queue.get() if event is None: self._close_run() self._close_connection() return if isinstance(event, PurgeTask): # Schedule a new purge task if this one didn't finish if not purge.purge_old_data(self, event.keep_days, event.repack): self.queue.put(PurgeTask(event.keep_days, event.repack)) continue if isinstance(event, WaitTask): self._queue_watch.set() continue if event.event_type == EVENT_TIME_CHANGED: self._keepalive_count += 1 if self._keepalive_count >= KEEPALIVE_TIME: self._keepalive_count = 0 self._send_keep_alive() if self.commit_interval: self._timechanges_seen += 1 if self._timechanges_seen >= self.commit_interval: self._timechanges_seen = 0 self._commit_event_session_or_retry() continue if event.event_type in self.exclude_t: continue entity_id = event.data.get(ATTR_ENTITY_ID) if entity_id is not None: if not self.entity_filter(entity_id): continue try: if event.event_type == EVENT_STATE_CHANGED: dbevent = Events.from_event(event, event_data="{}") else: dbevent = Events.from_event(event) dbevent.created = event.time_fired self.event_session.add(dbevent) except (TypeError, ValueError): _LOGGER.warning("Event is not JSON serializable: %s", event) except Exception as err: # pylint: disable=broad-except # Must catch the exception to prevent the loop from collapsing _LOGGER.exception("Error adding event: %s", err) if dbevent and event.event_type == EVENT_STATE_CHANGED: try: dbstate = States.from_event(event) has_new_state = event.data.get("new_state") if dbstate.entity_id in self._old_states: old_state = self._old_states.pop(dbstate.entity_id) if old_state.state_id: dbstate.old_state_id = old_state.state_id else: dbstate.old_state = old_state if not has_new_state: dbstate.state = None dbstate.event = dbevent dbstate.created = event.time_fired self.event_session.add(dbstate) if has_new_state: self._old_states[dbstate.entity_id] = dbstate self._pending_expunge.append(dbstate) except (TypeError, ValueError): _LOGGER.warning( "State is not JSON serializable: %s", event.data.get("new_state"), ) except Exception as err: # pylint: disable=broad-except # Must catch the exception to prevent the loop from collapsing _LOGGER.exception("Error adding state change: %s", err) # If they do not have a commit interval # than we commit right away if not self.commit_interval: self._commit_event_session_or_retry()
[ "def", "run", "(", "self", ")", ":", "tries", "=", "1", "connected", "=", "False", "while", "not", "connected", "and", "tries", "<=", "self", ".", "db_max_retries", ":", "if", "tries", "!=", "1", ":", "time", ".", "sleep", "(", "self", ".", "db_retry_wait", ")", "try", ":", "self", ".", "_setup_connection", "(", ")", "migration", ".", "migrate_schema", "(", "self", ")", "self", ".", "_setup_run", "(", ")", "connected", "=", "True", "_LOGGER", ".", "debug", "(", "\"Connected to recorder database\"", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "_LOGGER", ".", "error", "(", "\"Error during connection setup: %s (retrying in %s seconds)\"", ",", "err", ",", "self", ".", "db_retry_wait", ",", ")", "tries", "+=", "1", "if", "not", "connected", ":", "@", "callback", "def", "connection_failed", "(", ")", ":", "\"\"\"Connect failed tasks.\"\"\"", "self", ".", "async_db_ready", ".", "set_result", "(", "False", ")", "persistent_notification", ".", "async_create", "(", "self", ".", "hass", ",", "\"The recorder could not start, please check the log\"", ",", "\"Recorder\"", ",", ")", "self", ".", "hass", ".", "add_job", "(", "connection_failed", ")", "return", "shutdown_task", "=", "object", "(", ")", "hass_started", "=", "concurrent", ".", "futures", ".", "Future", "(", ")", "@", "callback", "def", "register", "(", ")", ":", "\"\"\"Post connection initialize.\"\"\"", "self", ".", "async_db_ready", ".", "set_result", "(", "True", ")", "def", "shutdown", "(", "event", ")", ":", "\"\"\"Shut down the Recorder.\"\"\"", "if", "not", "hass_started", ".", "done", "(", ")", ":", "hass_started", ".", "set_result", "(", "shutdown_task", ")", "self", ".", "queue", ".", "put", "(", "None", ")", "self", ".", "join", "(", ")", "self", ".", "hass", ".", "bus", ".", "async_listen_once", "(", "EVENT_HOMEASSISTANT_STOP", ",", "shutdown", ")", "if", "self", ".", "hass", ".", "state", "==", "CoreState", ".", "running", ":", "hass_started", ".", "set_result", "(", "None", ")", "else", ":", "@", "callback", "def", "notify_hass_started", "(", "event", ")", ":", "\"\"\"Notify that hass has started.\"\"\"", "hass_started", ".", "set_result", "(", "None", ")", "self", ".", "hass", ".", "bus", ".", "async_listen_once", "(", "EVENT_HOMEASSISTANT_START", ",", "notify_hass_started", ")", "self", ".", "hass", ".", "add_job", "(", "register", ")", "result", "=", "hass_started", ".", "result", "(", ")", "# If shutdown happened before Home Assistant finished starting", "if", "result", "is", "shutdown_task", ":", "return", "# Start periodic purge", "if", "self", ".", "auto_purge", ":", "@", "callback", "def", "async_purge", "(", "now", ")", ":", "\"\"\"Trigger the purge.\"\"\"", "self", ".", "queue", ".", "put", "(", "PurgeTask", "(", "self", ".", "keep_days", ",", "repack", "=", "False", ")", ")", "# Purge every night at 4:12am", "self", ".", "hass", ".", "helpers", ".", "event", ".", "track_time_change", "(", "async_purge", ",", "hour", "=", "4", ",", "minute", "=", "12", ",", "second", "=", "0", ")", "self", ".", "event_session", "=", "self", ".", "get_session", "(", ")", "self", ".", "event_session", ".", "expire_on_commit", "=", "False", "# Use a session for the event read loop", "# with a commit every time the event time", "# has changed. This reduces the disk io.", "while", "True", ":", "event", "=", "self", ".", "queue", ".", "get", "(", ")", "if", "event", "is", "None", ":", "self", ".", "_close_run", "(", ")", "self", ".", "_close_connection", "(", ")", "return", "if", "isinstance", "(", "event", ",", "PurgeTask", ")", ":", "# Schedule a new purge task if this one didn't finish", "if", "not", "purge", ".", "purge_old_data", "(", "self", ",", "event", ".", "keep_days", ",", "event", ".", "repack", ")", ":", "self", ".", "queue", ".", "put", "(", "PurgeTask", "(", "event", ".", "keep_days", ",", "event", ".", "repack", ")", ")", "continue", "if", "isinstance", "(", "event", ",", "WaitTask", ")", ":", "self", ".", "_queue_watch", ".", "set", "(", ")", "continue", "if", "event", ".", "event_type", "==", "EVENT_TIME_CHANGED", ":", "self", ".", "_keepalive_count", "+=", "1", "if", "self", ".", "_keepalive_count", ">=", "KEEPALIVE_TIME", ":", "self", ".", "_keepalive_count", "=", "0", "self", ".", "_send_keep_alive", "(", ")", "if", "self", ".", "commit_interval", ":", "self", ".", "_timechanges_seen", "+=", "1", "if", "self", ".", "_timechanges_seen", ">=", "self", ".", "commit_interval", ":", "self", ".", "_timechanges_seen", "=", "0", "self", ".", "_commit_event_session_or_retry", "(", ")", "continue", "if", "event", ".", "event_type", "in", "self", ".", "exclude_t", ":", "continue", "entity_id", "=", "event", ".", "data", ".", "get", "(", "ATTR_ENTITY_ID", ")", "if", "entity_id", "is", "not", "None", ":", "if", "not", "self", ".", "entity_filter", "(", "entity_id", ")", ":", "continue", "try", ":", "if", "event", ".", "event_type", "==", "EVENT_STATE_CHANGED", ":", "dbevent", "=", "Events", ".", "from_event", "(", "event", ",", "event_data", "=", "\"{}\"", ")", "else", ":", "dbevent", "=", "Events", ".", "from_event", "(", "event", ")", "dbevent", ".", "created", "=", "event", ".", "time_fired", "self", ".", "event_session", ".", "add", "(", "dbevent", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "_LOGGER", ".", "warning", "(", "\"Event is not JSON serializable: %s\"", ",", "event", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "# Must catch the exception to prevent the loop from collapsing", "_LOGGER", ".", "exception", "(", "\"Error adding event: %s\"", ",", "err", ")", "if", "dbevent", "and", "event", ".", "event_type", "==", "EVENT_STATE_CHANGED", ":", "try", ":", "dbstate", "=", "States", ".", "from_event", "(", "event", ")", "has_new_state", "=", "event", ".", "data", ".", "get", "(", "\"new_state\"", ")", "if", "dbstate", ".", "entity_id", "in", "self", ".", "_old_states", ":", "old_state", "=", "self", ".", "_old_states", ".", "pop", "(", "dbstate", ".", "entity_id", ")", "if", "old_state", ".", "state_id", ":", "dbstate", ".", "old_state_id", "=", "old_state", ".", "state_id", "else", ":", "dbstate", ".", "old_state", "=", "old_state", "if", "not", "has_new_state", ":", "dbstate", ".", "state", "=", "None", "dbstate", ".", "event", "=", "dbevent", "dbstate", ".", "created", "=", "event", ".", "time_fired", "self", ".", "event_session", ".", "add", "(", "dbstate", ")", "if", "has_new_state", ":", "self", ".", "_old_states", "[", "dbstate", ".", "entity_id", "]", "=", "dbstate", "self", ".", "_pending_expunge", ".", "append", "(", "dbstate", ")", "except", "(", "TypeError", ",", "ValueError", ")", ":", "_LOGGER", ".", "warning", "(", "\"State is not JSON serializable: %s\"", ",", "event", ".", "data", ".", "get", "(", "\"new_state\"", ")", ",", ")", "except", "Exception", "as", "err", ":", "# pylint: disable=broad-except", "# Must catch the exception to prevent the loop from collapsing", "_LOGGER", ".", "exception", "(", "\"Error adding state change: %s\"", ",", "err", ")", "# If they do not have a commit interval", "# than we commit right away", "if", "not", "self", ".", "commit_interval", ":", "self", ".", "_commit_event_session_or_retry", "(", ")" ]
[ 263, 4 ]
[ 431, 53 ]
python
en
['en', 'en', 'en']
True
Recorder.event_listener
(self, event)
Listen for new events and put them in the process queue.
Listen for new events and put them in the process queue.
def event_listener(self, event): """Listen for new events and put them in the process queue.""" self.queue.put(event)
[ "def", "event_listener", "(", "self", ",", "event", ")", ":", "self", ".", "queue", ".", "put", "(", "event", ")" ]
[ 529, 4 ]
[ 531, 29 ]
python
en
['en', 'en', 'en']
True
Recorder.block_till_done
(self)
Block till all events processed. This is only called in tests. This only blocks until the queue is empty which does not mean the recorder is done. Call tests.common's wait_recording_done after calling this to ensure the data is in the database.
Block till all events processed.
def block_till_done(self): """Block till all events processed. This is only called in tests. This only blocks until the queue is empty which does not mean the recorder is done. Call tests.common's wait_recording_done after calling this to ensure the data is in the database. """ self._queue_watch.clear() self.queue.put(WaitTask()) self._queue_watch.wait()
[ "def", "block_till_done", "(", "self", ")", ":", "self", ".", "_queue_watch", ".", "clear", "(", ")", "self", ".", "queue", ".", "put", "(", "WaitTask", "(", ")", ")", "self", ".", "_queue_watch", ".", "wait", "(", ")" ]
[ 533, 4 ]
[ 547, 32 ]
python
en
['sv', 'en', 'en']
True
Recorder._setup_connection
(self)
Ensure database is ready to fly.
Ensure database is ready to fly.
def _setup_connection(self): """Ensure database is ready to fly.""" kwargs = {} def setup_recorder_connection(dbapi_connection, connection_record): """Dbapi specific connection settings.""" if self._completed_database_setup: return # We do not import sqlite3 here so mysql/other # users do not have to pay for it to be loaded in # memory if self.db_url.startswith(SQLITE_URL_PREFIX): old_isolation = dbapi_connection.isolation_level dbapi_connection.isolation_level = None cursor = dbapi_connection.cursor() cursor.execute("PRAGMA journal_mode=WAL") cursor.close() dbapi_connection.isolation_level = old_isolation # WAL mode only needs to be setup once # instead of every time we open the sqlite connection # as its persistent and isn't free to call every time. self._completed_database_setup = True elif self.db_url.startswith("mysql"): cursor = dbapi_connection.cursor() cursor.execute("SET session wait_timeout=28800") cursor.close() if self.db_url == SQLITE_URL_PREFIX or ":memory:" in self.db_url: kwargs["connect_args"] = {"check_same_thread": False} kwargs["poolclass"] = StaticPool kwargs["pool_reset_on_return"] = None else: kwargs["echo"] = False if self.db_url != SQLITE_URL_PREFIX and self.db_url.startswith( SQLITE_URL_PREFIX ): with self.hass.timeout.freeze(DOMAIN): # # Here we run an sqlite3 quick_check. In the majority # of cases, the quick_check takes under 10 seconds. # # On systems with very large databases and # very slow disk or cpus, this can take a while. # validate_or_move_away_sqlite_database( self.db_url, self.db_integrity_check ) if self.engine is not None: self.engine.dispose() self.engine = create_engine(self.db_url, **kwargs) sqlalchemy_event.listen(self.engine, "connect", setup_recorder_connection) Base.metadata.create_all(self.engine) self.get_session = scoped_session(sessionmaker(bind=self.engine))
[ "def", "_setup_connection", "(", "self", ")", ":", "kwargs", "=", "{", "}", "def", "setup_recorder_connection", "(", "dbapi_connection", ",", "connection_record", ")", ":", "\"\"\"Dbapi specific connection settings.\"\"\"", "if", "self", ".", "_completed_database_setup", ":", "return", "# We do not import sqlite3 here so mysql/other", "# users do not have to pay for it to be loaded in", "# memory", "if", "self", ".", "db_url", ".", "startswith", "(", "SQLITE_URL_PREFIX", ")", ":", "old_isolation", "=", "dbapi_connection", ".", "isolation_level", "dbapi_connection", ".", "isolation_level", "=", "None", "cursor", "=", "dbapi_connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"PRAGMA journal_mode=WAL\"", ")", "cursor", ".", "close", "(", ")", "dbapi_connection", ".", "isolation_level", "=", "old_isolation", "# WAL mode only needs to be setup once", "# instead of every time we open the sqlite connection", "# as its persistent and isn't free to call every time.", "self", ".", "_completed_database_setup", "=", "True", "elif", "self", ".", "db_url", ".", "startswith", "(", "\"mysql\"", ")", ":", "cursor", "=", "dbapi_connection", ".", "cursor", "(", ")", "cursor", ".", "execute", "(", "\"SET session wait_timeout=28800\"", ")", "cursor", ".", "close", "(", ")", "if", "self", ".", "db_url", "==", "SQLITE_URL_PREFIX", "or", "\":memory:\"", "in", "self", ".", "db_url", ":", "kwargs", "[", "\"connect_args\"", "]", "=", "{", "\"check_same_thread\"", ":", "False", "}", "kwargs", "[", "\"poolclass\"", "]", "=", "StaticPool", "kwargs", "[", "\"pool_reset_on_return\"", "]", "=", "None", "else", ":", "kwargs", "[", "\"echo\"", "]", "=", "False", "if", "self", ".", "db_url", "!=", "SQLITE_URL_PREFIX", "and", "self", ".", "db_url", ".", "startswith", "(", "SQLITE_URL_PREFIX", ")", ":", "with", "self", ".", "hass", ".", "timeout", ".", "freeze", "(", "DOMAIN", ")", ":", "#", "# Here we run an sqlite3 quick_check. In the majority", "# of cases, the quick_check takes under 10 seconds.", "#", "# On systems with very large databases and", "# very slow disk or cpus, this can take a while.", "#", "validate_or_move_away_sqlite_database", "(", "self", ".", "db_url", ",", "self", ".", "db_integrity_check", ")", "if", "self", ".", "engine", "is", "not", "None", ":", "self", ".", "engine", ".", "dispose", "(", ")", "self", ".", "engine", "=", "create_engine", "(", "self", ".", "db_url", ",", "*", "*", "kwargs", ")", "sqlalchemy_event", ".", "listen", "(", "self", ".", "engine", ",", "\"connect\"", ",", "setup_recorder_connection", ")", "Base", ".", "metadata", ".", "create_all", "(", "self", ".", "engine", ")", "self", ".", "get_session", "=", "scoped_session", "(", "sessionmaker", "(", "bind", "=", "self", ".", "engine", ")", ")" ]
[ 549, 4 ]
[ 607, 73 ]
python
en
['en', 'en', 'en']
True
Recorder._close_connection
(self)
Close the connection.
Close the connection.
def _close_connection(self): """Close the connection.""" self.engine.dispose() self.engine = None self.get_session = None
[ "def", "_close_connection", "(", "self", ")", ":", "self", ".", "engine", ".", "dispose", "(", ")", "self", ".", "engine", "=", "None", "self", ".", "get_session", "=", "None" ]
[ 609, 4 ]
[ 613, 31 ]
python
en
['en', 'en', 'en']
True
Recorder._setup_run
(self)
Log the start of the current run.
Log the start of the current run.
def _setup_run(self): """Log the start of the current run.""" with session_scope(session=self.get_session()) as session: for run in session.query(RecorderRuns).filter_by(end=None): run.closed_incorrect = True run.end = self.recording_start _LOGGER.warning( "Ended unfinished session (id=%s from %s)", run.run_id, run.start ) session.add(run) self.run_info = RecorderRuns( start=self.recording_start, created=dt_util.utcnow() ) session.add(self.run_info) session.flush() session.expunge(self.run_info)
[ "def", "_setup_run", "(", "self", ")", ":", "with", "session_scope", "(", "session", "=", "self", ".", "get_session", "(", ")", ")", "as", "session", ":", "for", "run", "in", "session", ".", "query", "(", "RecorderRuns", ")", ".", "filter_by", "(", "end", "=", "None", ")", ":", "run", ".", "closed_incorrect", "=", "True", "run", ".", "end", "=", "self", ".", "recording_start", "_LOGGER", ".", "warning", "(", "\"Ended unfinished session (id=%s from %s)\"", ",", "run", ".", "run_id", ",", "run", ".", "start", ")", "session", ".", "add", "(", "run", ")", "self", ".", "run_info", "=", "RecorderRuns", "(", "start", "=", "self", ".", "recording_start", ",", "created", "=", "dt_util", ".", "utcnow", "(", ")", ")", "session", ".", "add", "(", "self", ".", "run_info", ")", "session", ".", "flush", "(", ")", "session", ".", "expunge", "(", "self", ".", "run_info", ")" ]
[ 615, 4 ]
[ 631, 42 ]
python
en
['en', 'en', 'en']
True
Recorder._close_run
(self)
Save end time for current run.
Save end time for current run.
def _close_run(self): """Save end time for current run.""" if self.event_session is not None: self.run_info.end = dt_util.utcnow() self.event_session.add(self.run_info) self._commit_event_session_or_retry() self.event_session.close() self.run_info = None
[ "def", "_close_run", "(", "self", ")", ":", "if", "self", ".", "event_session", "is", "not", "None", ":", "self", ".", "run_info", ".", "end", "=", "dt_util", ".", "utcnow", "(", ")", "self", ".", "event_session", ".", "add", "(", "self", ".", "run_info", ")", "self", ".", "_commit_event_session_or_retry", "(", ")", "self", ".", "event_session", ".", "close", "(", ")", "self", ".", "run_info", "=", "None" ]
[ 633, 4 ]
[ 641, 28 ]
python
en
['en', 'en', 'en']
True
mockup_raise
(*args, **kwargs)
Mockup to replace regular functions for error injection.
Mockup to replace regular functions for error injection.
def mockup_raise(*args, **kwargs): """Mockup to replace regular functions for error injection.""" raise NumatoGpioError("Error mockup")
[ "def", "mockup_raise", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "raise", "NumatoGpioError", "(", "\"Error mockup\"", ")" ]
[ 41, 0 ]
[ 43, 41 ]
python
en
['da', 'en', 'en']
True
mockup_return
(*args, **kwargs)
Mockup to replace regular functions for error injection.
Mockup to replace regular functions for error injection.
def mockup_return(*args, **kwargs): """Mockup to replace regular functions for error injection.""" return False
[ "def", "mockup_return", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "False" ]
[ 46, 0 ]
[ 48, 16 ]
python
en
['da', 'en', 'en']
True
test_cover_async_setup_entry
(hass, aioclient_mock)
Test climate setup without sensors.
Test climate setup without sensors.
async def test_cover_async_setup_entry(hass, aioclient_mock): """Test climate setup without sensors.""" aioclient_mock.get( TEST_SYSTEM_URL, text=TEST_SYSTEM_DATA, ) aioclient_mock.get( TEST_SET_URL, text=TEST_SET_RESPONSE, ) await add_mock_config(hass) registry = await hass.helpers.entity_registry.async_get_registry() assert len(aioclient_mock.mock_calls) == 1 # Test Cover Zone Entity entity_id = "cover.zone_open_without_sensor" state = hass.states.get(entity_id) assert state assert state.state == STATE_OPEN assert state.attributes.get("device_class") == DEVICE_CLASS_DAMPER assert state.attributes.get("current_position") == 100 entry = registry.async_get(entity_id) assert entry assert entry.unique_id == "uniqueid-ac2-z01" await hass.services.async_call( COVER_DOMAIN, SERVICE_CLOSE_COVER, {ATTR_ENTITY_ID: [entity_id]}, blocking=True, ) assert len(aioclient_mock.mock_calls) == 3 assert aioclient_mock.mock_calls[-2][0] == "GET" assert aioclient_mock.mock_calls[-2][1].path == "/setAircon" data = loads(aioclient_mock.mock_calls[-2][1].query["json"]) assert data["ac2"]["zones"]["z01"]["state"] == ADVANTAGE_AIR_STATE_CLOSE assert aioclient_mock.mock_calls[-1][0] == "GET" assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData" await hass.services.async_call( COVER_DOMAIN, SERVICE_OPEN_COVER, {ATTR_ENTITY_ID: [entity_id]}, blocking=True, ) assert len(aioclient_mock.mock_calls) == 5 assert aioclient_mock.mock_calls[-2][0] == "GET" assert aioclient_mock.mock_calls[-2][1].path == "/setAircon" data = loads(aioclient_mock.mock_calls[-2][1].query["json"]) assert data["ac2"]["zones"]["z01"]["state"] == ADVANTAGE_AIR_STATE_OPEN assert data["ac2"]["zones"]["z01"]["value"] == 100 assert aioclient_mock.mock_calls[-1][0] == "GET" assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData" await hass.services.async_call( COVER_DOMAIN, SERVICE_SET_COVER_POSITION, {ATTR_ENTITY_ID: [entity_id], ATTR_POSITION: 50}, blocking=True, ) assert len(aioclient_mock.mock_calls) == 7 assert aioclient_mock.mock_calls[-2][0] == "GET" assert aioclient_mock.mock_calls[-2][1].path == "/setAircon" data = loads(aioclient_mock.mock_calls[-2][1].query["json"]) assert data["ac2"]["zones"]["z01"]["value"] == 50 assert aioclient_mock.mock_calls[-1][0] == "GET" assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData" await hass.services.async_call( COVER_DOMAIN, SERVICE_SET_COVER_POSITION, {ATTR_ENTITY_ID: [entity_id], ATTR_POSITION: 0}, blocking=True, ) assert len(aioclient_mock.mock_calls) == 9 assert aioclient_mock.mock_calls[-2][0] == "GET" assert aioclient_mock.mock_calls[-2][1].path == "/setAircon" data = loads(aioclient_mock.mock_calls[-2][1].query["json"]) assert data["ac2"]["zones"]["z01"]["state"] == ADVANTAGE_AIR_STATE_CLOSE assert aioclient_mock.mock_calls[-1][0] == "GET" assert aioclient_mock.mock_calls[-1][1].path == "/getSystemData"
[ "async", "def", "test_cover_async_setup_entry", "(", "hass", ",", "aioclient_mock", ")", ":", "aioclient_mock", ".", "get", "(", "TEST_SYSTEM_URL", ",", "text", "=", "TEST_SYSTEM_DATA", ",", ")", "aioclient_mock", ".", "get", "(", "TEST_SET_URL", ",", "text", "=", "TEST_SET_RESPONSE", ",", ")", "await", "add_mock_config", "(", "hass", ")", "registry", "=", "await", "hass", ".", "helpers", ".", "entity_registry", ".", "async_get_registry", "(", ")", "assert", "len", "(", "aioclient_mock", ".", "mock_calls", ")", "==", "1", "# Test Cover Zone Entity", "entity_id", "=", "\"cover.zone_open_without_sensor\"", "state", "=", "hass", ".", "states", ".", "get", "(", "entity_id", ")", "assert", "state", "assert", "state", ".", "state", "==", "STATE_OPEN", "assert", "state", ".", "attributes", ".", "get", "(", "\"device_class\"", ")", "==", "DEVICE_CLASS_DAMPER", "assert", "state", ".", "attributes", ".", "get", "(", "\"current_position\"", ")", "==", "100", "entry", "=", "registry", ".", "async_get", "(", "entity_id", ")", "assert", "entry", "assert", "entry", ".", "unique_id", "==", "\"uniqueid-ac2-z01\"", "await", "hass", ".", "services", ".", "async_call", "(", "COVER_DOMAIN", ",", "SERVICE_CLOSE_COVER", ",", "{", "ATTR_ENTITY_ID", ":", "[", "entity_id", "]", "}", ",", "blocking", "=", "True", ",", ")", "assert", "len", "(", "aioclient_mock", ".", "mock_calls", ")", "==", "3", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "0", "]", "==", "\"GET\"", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "1", "]", ".", "path", "==", "\"/setAircon\"", "data", "=", "loads", "(", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "1", "]", ".", "query", "[", "\"json\"", "]", ")", "assert", "data", "[", "\"ac2\"", "]", "[", "\"zones\"", "]", "[", "\"z01\"", "]", "[", "\"state\"", "]", "==", "ADVANTAGE_AIR_STATE_CLOSE", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "1", "]", "[", "0", "]", "==", "\"GET\"", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "1", "]", "[", "1", "]", ".", "path", "==", "\"/getSystemData\"", "await", "hass", ".", "services", ".", "async_call", "(", "COVER_DOMAIN", ",", "SERVICE_OPEN_COVER", ",", "{", "ATTR_ENTITY_ID", ":", "[", "entity_id", "]", "}", ",", "blocking", "=", "True", ",", ")", "assert", "len", "(", "aioclient_mock", ".", "mock_calls", ")", "==", "5", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "0", "]", "==", "\"GET\"", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "1", "]", ".", "path", "==", "\"/setAircon\"", "data", "=", "loads", "(", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "1", "]", ".", "query", "[", "\"json\"", "]", ")", "assert", "data", "[", "\"ac2\"", "]", "[", "\"zones\"", "]", "[", "\"z01\"", "]", "[", "\"state\"", "]", "==", "ADVANTAGE_AIR_STATE_OPEN", "assert", "data", "[", "\"ac2\"", "]", "[", "\"zones\"", "]", "[", "\"z01\"", "]", "[", "\"value\"", "]", "==", "100", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "1", "]", "[", "0", "]", "==", "\"GET\"", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "1", "]", "[", "1", "]", ".", "path", "==", "\"/getSystemData\"", "await", "hass", ".", "services", ".", "async_call", "(", "COVER_DOMAIN", ",", "SERVICE_SET_COVER_POSITION", ",", "{", "ATTR_ENTITY_ID", ":", "[", "entity_id", "]", ",", "ATTR_POSITION", ":", "50", "}", ",", "blocking", "=", "True", ",", ")", "assert", "len", "(", "aioclient_mock", ".", "mock_calls", ")", "==", "7", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "0", "]", "==", "\"GET\"", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "1", "]", ".", "path", "==", "\"/setAircon\"", "data", "=", "loads", "(", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "1", "]", ".", "query", "[", "\"json\"", "]", ")", "assert", "data", "[", "\"ac2\"", "]", "[", "\"zones\"", "]", "[", "\"z01\"", "]", "[", "\"value\"", "]", "==", "50", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "1", "]", "[", "0", "]", "==", "\"GET\"", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "1", "]", "[", "1", "]", ".", "path", "==", "\"/getSystemData\"", "await", "hass", ".", "services", ".", "async_call", "(", "COVER_DOMAIN", ",", "SERVICE_SET_COVER_POSITION", ",", "{", "ATTR_ENTITY_ID", ":", "[", "entity_id", "]", ",", "ATTR_POSITION", ":", "0", "}", ",", "blocking", "=", "True", ",", ")", "assert", "len", "(", "aioclient_mock", ".", "mock_calls", ")", "==", "9", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "0", "]", "==", "\"GET\"", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "1", "]", ".", "path", "==", "\"/setAircon\"", "data", "=", "loads", "(", "aioclient_mock", ".", "mock_calls", "[", "-", "2", "]", "[", "1", "]", ".", "query", "[", "\"json\"", "]", ")", "assert", "data", "[", "\"ac2\"", "]", "[", "\"zones\"", "]", "[", "\"z01\"", "]", "[", "\"state\"", "]", "==", "ADVANTAGE_AIR_STATE_CLOSE", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "1", "]", "[", "0", "]", "==", "\"GET\"", "assert", "aioclient_mock", ".", "mock_calls", "[", "-", "1", "]", "[", "1", "]", ".", "path", "==", "\"/getSystemData\"" ]
[ 27, 0 ]
[ 112, 68 ]
python
en
['en', 'en', 'en']
True
assert_tensors_close
(a, b, atol=1e-12, prefix="")
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.
def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg)
[ "def", "assert_tensors_close", "(", "a", ",", "b", ",", "atol", "=", "1e-12", ",", "prefix", "=", "\"\"", ")", ":", "if", "a", "is", "None", "and", "b", "is", "None", ":", "return", "True", "try", ":", "if", "torch", ".", "allclose", "(", "a", ",", "b", ",", "atol", "=", "atol", ")", ":", "return", "True", "raise", "except", "Exception", ":", "pct_different", "=", "(", "torch", ".", "gt", "(", "(", "a", "-", "b", ")", ".", "abs", "(", ")", ",", "atol", ")", ")", ".", "float", "(", ")", ".", "mean", "(", ")", ".", "item", "(", ")", "if", "a", ".", "numel", "(", ")", ">", "100", ":", "msg", "=", "f\"tensor values are {pct_different:.1%} percent different.\"", "else", ":", "msg", "=", "f\"{a} != {b}\"", "if", "prefix", ":", "msg", "=", "prefix", "+", "\": \"", "+", "msg", "raise", "AssertionError", "(", "msg", ")" ]
[ 267, 0 ]
[ 283, 33 ]
python
en
['en', 'en', 'en']
True
test_cors_middleware_loaded_by_default
(hass)
Test accessing to server from banned IP when feature is off.
Test accessing to server from banned IP when feature is off.
async def test_cors_middleware_loaded_by_default(hass): """Test accessing to server from banned IP when feature is off.""" with patch("homeassistant.components.http.setup_cors") as mock_setup: await async_setup_component(hass, "http", {"http": {}}) assert len(mock_setup.mock_calls) == 1
[ "async", "def", "test_cors_middleware_loaded_by_default", "(", "hass", ")", ":", "with", "patch", "(", "\"homeassistant.components.http.setup_cors\"", ")", "as", "mock_setup", ":", "await", "async_setup_component", "(", "hass", ",", "\"http\"", ",", "{", "\"http\"", ":", "{", "}", "}", ")", "assert", "len", "(", "mock_setup", ".", "mock_calls", ")", "==", "1" ]
[ 25, 0 ]
[ 30, 42 ]
python
en
['en', 'en', 'en']
True
test_cors_middleware_loaded_from_config
(hass)
Test accessing to server from banned IP when feature is off.
Test accessing to server from banned IP when feature is off.
async def test_cors_middleware_loaded_from_config(hass): """Test accessing to server from banned IP when feature is off.""" with patch("homeassistant.components.http.setup_cors") as mock_setup: await async_setup_component( hass, "http", {"http": {"cors_allowed_origins": ["http://home-assistant.io"]}}, ) assert len(mock_setup.mock_calls) == 1
[ "async", "def", "test_cors_middleware_loaded_from_config", "(", "hass", ")", ":", "with", "patch", "(", "\"homeassistant.components.http.setup_cors\"", ")", "as", "mock_setup", ":", "await", "async_setup_component", "(", "hass", ",", "\"http\"", ",", "{", "\"http\"", ":", "{", "\"cors_allowed_origins\"", ":", "[", "\"http://home-assistant.io\"", "]", "}", "}", ",", ")", "assert", "len", "(", "mock_setup", ".", "mock_calls", ")", "==", "1" ]
[ 33, 0 ]
[ 42, 42 ]
python
en
['en', 'en', 'en']
True
mock_handler
(request)
Return if request was authenticated.
Return if request was authenticated.
async def mock_handler(request): """Return if request was authenticated.""" return web.Response(status=200)
[ "async", "def", "mock_handler", "(", "request", ")", ":", "return", "web", ".", "Response", "(", "status", "=", "200", ")" ]
[ 45, 0 ]
[ 47, 35 ]
python
en
['en', 'en', 'en']
True
client
(loop, aiohttp_client)
Fixture to set up a web.Application.
Fixture to set up a web.Application.
def client(loop, aiohttp_client): """Fixture to set up a web.Application.""" app = web.Application() app.router.add_get("/", mock_handler) setup_cors(app, [TRUSTED_ORIGIN]) return loop.run_until_complete(aiohttp_client(app))
[ "def", "client", "(", "loop", ",", "aiohttp_client", ")", ":", "app", "=", "web", ".", "Application", "(", ")", "app", ".", "router", ".", "add_get", "(", "\"/\"", ",", "mock_handler", ")", "setup_cors", "(", "app", ",", "[", "TRUSTED_ORIGIN", "]", ")", "return", "loop", ".", "run_until_complete", "(", "aiohttp_client", "(", "app", ")", ")" ]
[ 51, 0 ]
[ 56, 55 ]
python
en
['en', 'en', 'en']
True
test_cors_requests
(client)
Test cross origin requests.
Test cross origin requests.
async def test_cors_requests(client): """Test cross origin requests.""" req = await client.get("/", headers={ORIGIN: TRUSTED_ORIGIN}) assert req.status == 200 assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN # With password in URL req = await client.get( "/", params={"api_password": "some-pass"}, headers={ORIGIN: TRUSTED_ORIGIN} ) assert req.status == 200 assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN # With password in headers req = await client.get( "/", headers={HTTP_HEADER_HA_AUTH: "some-pass", ORIGIN: TRUSTED_ORIGIN} ) assert req.status == 200 assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN # With auth token in headers req = await client.get( "/", headers={AUTHORIZATION: "Bearer some-token", ORIGIN: TRUSTED_ORIGIN} ) assert req.status == 200 assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN
[ "async", "def", "test_cors_requests", "(", "client", ")", ":", "req", "=", "await", "client", ".", "get", "(", "\"/\"", ",", "headers", "=", "{", "ORIGIN", ":", "TRUSTED_ORIGIN", "}", ")", "assert", "req", ".", "status", "==", "200", "assert", "req", ".", "headers", "[", "ACCESS_CONTROL_ALLOW_ORIGIN", "]", "==", "TRUSTED_ORIGIN", "# With password in URL", "req", "=", "await", "client", ".", "get", "(", "\"/\"", ",", "params", "=", "{", "\"api_password\"", ":", "\"some-pass\"", "}", ",", "headers", "=", "{", "ORIGIN", ":", "TRUSTED_ORIGIN", "}", ")", "assert", "req", ".", "status", "==", "200", "assert", "req", ".", "headers", "[", "ACCESS_CONTROL_ALLOW_ORIGIN", "]", "==", "TRUSTED_ORIGIN", "# With password in headers", "req", "=", "await", "client", ".", "get", "(", "\"/\"", ",", "headers", "=", "{", "HTTP_HEADER_HA_AUTH", ":", "\"some-pass\"", ",", "ORIGIN", ":", "TRUSTED_ORIGIN", "}", ")", "assert", "req", ".", "status", "==", "200", "assert", "req", ".", "headers", "[", "ACCESS_CONTROL_ALLOW_ORIGIN", "]", "==", "TRUSTED_ORIGIN", "# With auth token in headers", "req", "=", "await", "client", ".", "get", "(", "\"/\"", ",", "headers", "=", "{", "AUTHORIZATION", ":", "\"Bearer some-token\"", ",", "ORIGIN", ":", "TRUSTED_ORIGIN", "}", ")", "assert", "req", ".", "status", "==", "200", "assert", "req", ".", "headers", "[", "ACCESS_CONTROL_ALLOW_ORIGIN", "]", "==", "TRUSTED_ORIGIN" ]
[ 59, 0 ]
[ 84, 69 ]
python
en
['en', 'nl', 'en']
True
test_cors_preflight_allowed
(client)
Test cross origin resource sharing preflight (OPTIONS) request.
Test cross origin resource sharing preflight (OPTIONS) request.
async def test_cors_preflight_allowed(client): """Test cross origin resource sharing preflight (OPTIONS) request.""" req = await client.options( "/", headers={ ORIGIN: TRUSTED_ORIGIN, ACCESS_CONTROL_REQUEST_METHOD: "GET", ACCESS_CONTROL_REQUEST_HEADERS: "x-requested-with", }, ) assert req.status == 200 assert req.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == TRUSTED_ORIGIN assert req.headers[ACCESS_CONTROL_ALLOW_HEADERS] == "X-REQUESTED-WITH"
[ "async", "def", "test_cors_preflight_allowed", "(", "client", ")", ":", "req", "=", "await", "client", ".", "options", "(", "\"/\"", ",", "headers", "=", "{", "ORIGIN", ":", "TRUSTED_ORIGIN", ",", "ACCESS_CONTROL_REQUEST_METHOD", ":", "\"GET\"", ",", "ACCESS_CONTROL_REQUEST_HEADERS", ":", "\"x-requested-with\"", ",", "}", ",", ")", "assert", "req", ".", "status", "==", "200", "assert", "req", ".", "headers", "[", "ACCESS_CONTROL_ALLOW_ORIGIN", "]", "==", "TRUSTED_ORIGIN", "assert", "req", ".", "headers", "[", "ACCESS_CONTROL_ALLOW_HEADERS", "]", "==", "\"X-REQUESTED-WITH\"" ]
[ 87, 0 ]
[ 100, 74 ]
python
en
['en', 'lb', 'en']
True
test_cors_middleware_with_cors_allowed_view
(hass)
Test that we can configure cors and have a cors_allowed view.
Test that we can configure cors and have a cors_allowed view.
async def test_cors_middleware_with_cors_allowed_view(hass): """Test that we can configure cors and have a cors_allowed view.""" class MyView(HomeAssistantView): """Test view that allows CORS.""" requires_auth = False cors_allowed = True def __init__(self, url, name): """Initialize test view.""" self.url = url self.name = name async def get(self, request): """Test response.""" return "test" assert await async_setup_component( hass, "http", {"http": {"cors_allowed_origins": ["http://home-assistant.io"]}} ) hass.http.register_view(MyView("/api/test", "api:test")) hass.http.register_view(MyView("/api/test", "api:test2")) hass.http.register_view(MyView("/api/test2", "api:test")) hass.http.app._on_startup.freeze() await hass.http.app.startup()
[ "async", "def", "test_cors_middleware_with_cors_allowed_view", "(", "hass", ")", ":", "class", "MyView", "(", "HomeAssistantView", ")", ":", "\"\"\"Test view that allows CORS.\"\"\"", "requires_auth", "=", "False", "cors_allowed", "=", "True", "def", "__init__", "(", "self", ",", "url", ",", "name", ")", ":", "\"\"\"Initialize test view.\"\"\"", "self", ".", "url", "=", "url", "self", ".", "name", "=", "name", "async", "def", "get", "(", "self", ",", "request", ")", ":", "\"\"\"Test response.\"\"\"", "return", "\"test\"", "assert", "await", "async_setup_component", "(", "hass", ",", "\"http\"", ",", "{", "\"http\"", ":", "{", "\"cors_allowed_origins\"", ":", "[", "\"http://home-assistant.io\"", "]", "}", "}", ")", "hass", ".", "http", ".", "register_view", "(", "MyView", "(", "\"/api/test\"", ",", "\"api:test\"", ")", ")", "hass", ".", "http", ".", "register_view", "(", "MyView", "(", "\"/api/test\"", ",", "\"api:test2\"", ")", ")", "hass", ".", "http", ".", "register_view", "(", "MyView", "(", "\"/api/test2\"", ",", "\"api:test\"", ")", ")", "hass", ".", "http", ".", "app", ".", "_on_startup", ".", "freeze", "(", ")", "await", "hass", ".", "http", ".", "app", ".", "startup", "(", ")" ]
[ 103, 0 ]
[ 130, 33 ]
python
en
['en', 'en', 'en']
True
test_cors_works_with_frontend
(hass, hass_client)
Test CORS works with the frontend.
Test CORS works with the frontend.
async def test_cors_works_with_frontend(hass, hass_client): """Test CORS works with the frontend.""" assert await async_setup_component( hass, "frontend", {"http": {"cors_allowed_origins": ["http://home-assistant.io"]}}, ) client = await hass_client() resp = await client.get("/") assert resp.status == 200
[ "async", "def", "test_cors_works_with_frontend", "(", "hass", ",", "hass_client", ")", ":", "assert", "await", "async_setup_component", "(", "hass", ",", "\"frontend\"", ",", "{", "\"http\"", ":", "{", "\"cors_allowed_origins\"", ":", "[", "\"http://home-assistant.io\"", "]", "}", "}", ",", ")", "client", "=", "await", "hass_client", "(", ")", "resp", "=", "await", "client", ".", "get", "(", "\"/\"", ")", "assert", "resp", ".", "status", "==", "200" ]
[ 133, 0 ]
[ 142, 29 ]
python
en
['en', 'en', 'en']
True
test_cors_on_static_files
(hass, hass_client)
Test that we enable CORS for static files.
Test that we enable CORS for static files.
async def test_cors_on_static_files(hass, hass_client): """Test that we enable CORS for static files.""" assert await async_setup_component( hass, "frontend", {"http": {"cors_allowed_origins": ["http://www.example.com"]}} ) hass.http.register_static_path("/something", str(Path(__file__).parent)) client = await hass_client() resp = await client.options( "/something/__init__.py", headers={ "origin": "http://www.example.com", ACCESS_CONTROL_REQUEST_METHOD: "GET", }, ) assert resp.status == 200 assert resp.headers[ACCESS_CONTROL_ALLOW_ORIGIN] == "http://www.example.com"
[ "async", "def", "test_cors_on_static_files", "(", "hass", ",", "hass_client", ")", ":", "assert", "await", "async_setup_component", "(", "hass", ",", "\"frontend\"", ",", "{", "\"http\"", ":", "{", "\"cors_allowed_origins\"", ":", "[", "\"http://www.example.com\"", "]", "}", "}", ")", "hass", ".", "http", ".", "register_static_path", "(", "\"/something\"", ",", "str", "(", "Path", "(", "__file__", ")", ".", "parent", ")", ")", "client", "=", "await", "hass_client", "(", ")", "resp", "=", "await", "client", ".", "options", "(", "\"/something/__init__.py\"", ",", "headers", "=", "{", "\"origin\"", ":", "\"http://www.example.com\"", ",", "ACCESS_CONTROL_REQUEST_METHOD", ":", "\"GET\"", ",", "}", ",", ")", "assert", "resp", ".", "status", "==", "200", "assert", "resp", ".", "headers", "[", "ACCESS_CONTROL_ALLOW_ORIGIN", "]", "==", "\"http://www.example.com\"" ]
[ 145, 0 ]
[ 161, 80 ]
python
en
['en', 'en', 'en']
True
async_get_next_ping_id
(hass)
Find the next id to use in the outbound ping. Must be called in async
Find the next id to use in the outbound ping.
def async_get_next_ping_id(hass): """Find the next id to use in the outbound ping. Must be called in async """ current_id = hass.data.setdefault(DOMAIN, {}).get(PING_ID, DEFAULT_START_ID) if current_id == MAX_PING_ID: next_id = DEFAULT_START_ID else: next_id = current_id + 1 hass.data[DOMAIN][PING_ID] = next_id return next_id
[ "def", "async_get_next_ping_id", "(", "hass", ")", ":", "current_id", "=", "hass", ".", "data", ".", "setdefault", "(", "DOMAIN", ",", "{", "}", ")", ".", "get", "(", "PING_ID", ",", "DEFAULT_START_ID", ")", "if", "current_id", "==", "MAX_PING_ID", ":", "next_id", "=", "DEFAULT_START_ID", "else", ":", "next_id", "=", "current_id", "+", "1", "hass", ".", "data", "[", "DOMAIN", "]", "[", "PING_ID", "]", "=", "next_id", "return", "next_id" ]
[ 13, 0 ]
[ 27, 18 ]
python
en
['en', 'en', 'en']
True
accuracy
(output, target, topk=(1,))
Computes the precision@k for the specified values of k
Computes the precision
def accuracy(output, target, topk=(1,)): """ Computes the precision@k for the specified values of k """ maxk = max(topk) batch_size = target.size(0) _, pred = output.topk(maxk, 1, True, True) pred = pred.t() # one-hot case if target.ndimension() > 1: target = target.max(1)[1] correct = pred.eq(target.view(1, -1).expand_as(pred)) res = dict() for k in topk: correct_k = correct[:k].view(-1).float().sum(0) res["acc{}".format(k)] = correct_k.mul_(1.0 / batch_size).item() return res
[ "def", "accuracy", "(", "output", ",", "target", ",", "topk", "=", "(", "1", ",", ")", ")", ":", "maxk", "=", "max", "(", "topk", ")", "batch_size", "=", "target", ".", "size", "(", "0", ")", "_", ",", "pred", "=", "output", ".", "topk", "(", "maxk", ",", "1", ",", "True", ",", "True", ")", "pred", "=", "pred", ".", "t", "(", ")", "# one-hot case", "if", "target", ".", "ndimension", "(", ")", ">", "1", ":", "target", "=", "target", ".", "max", "(", "1", ")", "[", "1", "]", "correct", "=", "pred", ".", "eq", "(", "target", ".", "view", "(", "1", ",", "-", "1", ")", ".", "expand_as", "(", "pred", ")", ")", "res", "=", "dict", "(", ")", "for", "k", "in", "topk", ":", "correct_k", "=", "correct", "[", ":", "k", "]", ".", "view", "(", "-", "1", ")", ".", "float", "(", ")", ".", "sum", "(", "0", ")", "res", "[", "\"acc{}\"", ".", "format", "(", "k", ")", "]", "=", "correct_k", ".", "mul_", "(", "1.0", "/", "batch_size", ")", ".", "item", "(", ")", "return", "res" ]
[ 3, 0 ]
[ 20, 14 ]
python
en
['en', 'en', 'en']
True
test_setup
(hass, requests_mock)
Test for successfully setting up the platform.
Test for successfully setting up the platform.
async def test_setup(hass, requests_mock): """Test for successfully setting up the platform.""" config = { "sensor": { "platform": "openhardwaremonitor", "host": "localhost", "port": 8085, } } requests_mock.get( "http://localhost:8085/data.json", text=load_fixture("openhardwaremonitor.json"), ) await async_setup_component(hass, "sensor", config) await hass.async_block_till_done() entities = hass.states.async_entity_ids("sensor") assert len(entities) == 38 state = hass.states.get("sensor.test_pc_intel_core_i7_7700_temperatures_cpu_core_1") assert state is not None assert state.state == "31.0" state = hass.states.get("sensor.test_pc_intel_core_i7_7700_temperatures_cpu_core_2") assert state is not None assert state.state == "30.0"
[ "async", "def", "test_setup", "(", "hass", ",", "requests_mock", ")", ":", "config", "=", "{", "\"sensor\"", ":", "{", "\"platform\"", ":", "\"openhardwaremonitor\"", ",", "\"host\"", ":", "\"localhost\"", ",", "\"port\"", ":", "8085", ",", "}", "}", "requests_mock", ".", "get", "(", "\"http://localhost:8085/data.json\"", ",", "text", "=", "load_fixture", "(", "\"openhardwaremonitor.json\"", ")", ",", ")", "await", "async_setup_component", "(", "hass", ",", "\"sensor\"", ",", "config", ")", "await", "hass", ".", "async_block_till_done", "(", ")", "entities", "=", "hass", ".", "states", ".", "async_entity_ids", "(", "\"sensor\"", ")", "assert", "len", "(", "entities", ")", "==", "38", "state", "=", "hass", ".", "states", ".", "get", "(", "\"sensor.test_pc_intel_core_i7_7700_temperatures_cpu_core_1\"", ")", "assert", "state", "is", "not", "None", "assert", "state", ".", "state", "==", "\"31.0\"", "state", "=", "hass", ".", "states", ".", "get", "(", "\"sensor.test_pc_intel_core_i7_7700_temperatures_cpu_core_2\"", ")", "assert", "state", "is", "not", "None", "assert", "state", ".", "state", "==", "\"30.0\"" ]
[ 6, 0 ]
[ 35, 32 ]
python
en
['en', 'en', 'en']
True
BertGenerationPreTrainedModel._init_weights
(self, module)
Initialize the weights
Initialize the weights
def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0)
[ "def", "_init_weights", "(", "self", ",", "module", ")", ":", "if", "isinstance", "(", "module", ",", "nn", ".", "Linear", ")", ":", "# Slightly different from the TF version which uses truncated_normal for initialization", "# cf https://github.com/pytorch/pytorch/pull/5617", "module", ".", "weight", ".", "data", ".", "normal_", "(", "mean", "=", "0.0", ",", "std", "=", "self", ".", "config", ".", "initializer_range", ")", "if", "module", ".", "bias", "is", "not", "None", ":", "module", ".", "bias", ".", "data", ".", "zero_", "(", ")", "elif", "isinstance", "(", "module", ",", "nn", ".", "Embedding", ")", ":", "module", ".", "weight", ".", "data", ".", "normal_", "(", "mean", "=", "0.0", ",", "std", "=", "self", ".", "config", ".", "initializer_range", ")", "if", "module", ".", "padding_idx", "is", "not", "None", ":", "module", ".", "weight", ".", "data", "[", "module", ".", "padding_idx", "]", ".", "zero_", "(", ")", "elif", "isinstance", "(", "module", ",", "nn", ".", "LayerNorm", ")", ":", "module", ".", "bias", ".", "data", ".", "zero_", "(", ")", "module", ".", "weight", ".", "data", ".", "fill_", "(", "1.0", ")" ]
[ 178, 4 ]
[ 192, 41 ]
python
en
['en', 'en', 'en']
True
setup_config
(hass)
Fixture that sets up the auth provider homeassistant module.
Fixture that sets up the auth provider homeassistant module.
def setup_config(hass): """Fixture that sets up the auth provider homeassistant module.""" hass.loop.run_until_complete( register_auth_provider(hass, {"type": "homeassistant"}) ) hass.loop.run_until_complete(auth_ha.async_setup(hass))
[ "def", "setup_config", "(", "hass", ")", ":", "hass", ".", "loop", ".", "run_until_complete", "(", "register_auth_provider", "(", "hass", ",", "{", "\"type\"", ":", "\"homeassistant\"", "}", ")", ")", "hass", ".", "loop", ".", "run_until_complete", "(", "auth_ha", ".", "async_setup", "(", "hass", ")", ")" ]
[ 10, 0 ]
[ 15, 59 ]
python
en
['en', 'en', 'en']
True
auth_provider
(hass)
Hass auth provider.
Hass auth provider.
async def auth_provider(hass): """Hass auth provider.""" provider = hass.auth.auth_providers[0] await provider.async_initialize() return provider
[ "async", "def", "auth_provider", "(", "hass", ")", ":", "provider", "=", "hass", ".", "auth", ".", "auth_providers", "[", "0", "]", "await", "provider", ".", "async_initialize", "(", ")", "return", "provider" ]
[ 19, 0 ]
[ 23, 19 ]
python
en
['fr', 'en', 'en']
True
owner_access_token
(hass, hass_owner_user)
Access token for owner user.
Access token for owner user.
async def owner_access_token(hass, hass_owner_user): """Access token for owner user.""" refresh_token = await hass.auth.async_create_refresh_token( hass_owner_user, CLIENT_ID ) return hass.auth.async_create_access_token(refresh_token)
[ "async", "def", "owner_access_token", "(", "hass", ",", "hass_owner_user", ")", ":", "refresh_token", "=", "await", "hass", ".", "auth", ".", "async_create_refresh_token", "(", "hass_owner_user", ",", "CLIENT_ID", ")", "return", "hass", ".", "auth", ".", "async_create_access_token", "(", "refresh_token", ")" ]
[ 27, 0 ]
[ 32, 61 ]
python
en
['en', 'no', 'en']
True
test_user_credential
(hass, auth_provider)
Add a test user.
Add a test user.
async def test_user_credential(hass, auth_provider): """Add a test user.""" await hass.async_add_executor_job( auth_provider.data.add_auth, "test-user", "test-pass" ) return await auth_provider.async_get_or_create_credentials( {"username": "test-user"} )
[ "async", "def", "test_user_credential", "(", "hass", ",", "auth_provider", ")", ":", "await", "hass", ".", "async_add_executor_job", "(", "auth_provider", ".", "data", ".", "add_auth", ",", "\"test-user\"", ",", "\"test-pass\"", ")", "return", "await", "auth_provider", ".", "async_get_or_create_credentials", "(", "{", "\"username\"", ":", "\"test-user\"", "}", ")" ]
[ 36, 0 ]
[ 44, 5 ]
python
en
['en', 'cy', 'en']
True
test_create_auth_system_generated_user
(hass, hass_ws_client)
Test we can't add auth to system generated users.
Test we can't add auth to system generated users.
async def test_create_auth_system_generated_user(hass, hass_ws_client): """Test we can't add auth to system generated users.""" system_user = MockUser(system_generated=True).add_to_hass(hass) client = await hass_ws_client(hass) await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/create", "user_id": system_user.id, "username": "test-user", "password": "test-pass", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "system_generated"
[ "async", "def", "test_create_auth_system_generated_user", "(", "hass", ",", "hass_ws_client", ")", ":", "system_user", "=", "MockUser", "(", "system_generated", "=", "True", ")", ".", "add_to_hass", "(", "hass", ")", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/create\"", ",", "\"user_id\"", ":", "system_user", ".", "id", ",", "\"username\"", ":", "\"test-user\"", ",", "\"password\"", ":", "\"test-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"system_generated\"" ]
[ 47, 0 ]
[ 65, 56 ]
python
en
['en', 'en', 'en']
True
test_create_auth_user_already_credentials
()
Test we can't create auth for user with pre-existing credentials.
Test we can't create auth for user with pre-existing credentials.
async def test_create_auth_user_already_credentials(): """Test we can't create auth for user with pre-existing credentials."""
[ "async", "def", "test_create_auth_user_already_credentials", "(", ")", ":" ]
[ 68, 0 ]
[ 69, 75 ]
python
en
['en', 'en', 'en']
True
test_create_auth_unknown_user
(hass_ws_client, hass)
Test create pointing at unknown user.
Test create pointing at unknown user.
async def test_create_auth_unknown_user(hass_ws_client, hass): """Test create pointing at unknown user.""" client = await hass_ws_client(hass) await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/create", "user_id": "test-id", "username": "test-user", "password": "test-pass", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "not_found"
[ "async", "def", "test_create_auth_unknown_user", "(", "hass_ws_client", ",", "hass", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/create\"", ",", "\"user_id\"", ":", "\"test-id\"", ",", "\"username\"", ":", "\"test-user\"", ",", "\"password\"", ":", "\"test-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"not_found\"" ]
[ 73, 0 ]
[ 90, 49 ]
python
en
['en', 'en', 'en']
True
test_create_auth_requires_admin
( hass, hass_ws_client, hass_read_only_access_token )
Test create requires admin to call API.
Test create requires admin to call API.
async def test_create_auth_requires_admin( hass, hass_ws_client, hass_read_only_access_token ): """Test create requires admin to call API.""" client = await hass_ws_client(hass, hass_read_only_access_token) await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/create", "user_id": "test-id", "username": "test-user", "password": "test-pass", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "unauthorized"
[ "async", "def", "test_create_auth_requires_admin", "(", "hass", ",", "hass_ws_client", ",", "hass_read_only_access_token", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ",", "hass_read_only_access_token", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/create\"", ",", "\"user_id\"", ":", "\"test-id\"", ",", "\"username\"", ":", "\"test-user\"", ",", "\"password\"", ":", "\"test-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"unauthorized\"" ]
[ 93, 0 ]
[ 111, 52 ]
python
en
['en', 'en', 'en']
True
test_create_auth
(hass, hass_ws_client, hass_storage)
Test create auth command works.
Test create auth command works.
async def test_create_auth(hass, hass_ws_client, hass_storage): """Test create auth command works.""" client = await hass_ws_client(hass) user = MockUser().add_to_hass(hass) assert len(user.credentials) == 0 await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/create", "user_id": user.id, "username": "test-user", "password": "test-pass", } ) result = await client.receive_json() assert result["success"], result assert len(user.credentials) == 1 creds = user.credentials[0] assert creds.auth_provider_type == "homeassistant" assert creds.auth_provider_id is None assert creds.data == {"username": "test-user"} assert prov_ha.STORAGE_KEY in hass_storage entry = hass_storage[prov_ha.STORAGE_KEY]["data"]["users"][0] assert entry["username"] == "test-user"
[ "async", "def", "test_create_auth", "(", "hass", ",", "hass_ws_client", ",", "hass_storage", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "user", "=", "MockUser", "(", ")", ".", "add_to_hass", "(", "hass", ")", "assert", "len", "(", "user", ".", "credentials", ")", "==", "0", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/create\"", ",", "\"user_id\"", ":", "user", ".", "id", ",", "\"username\"", ":", "\"test-user\"", ",", "\"password\"", ":", "\"test-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "result", "[", "\"success\"", "]", ",", "result", "assert", "len", "(", "user", ".", "credentials", ")", "==", "1", "creds", "=", "user", ".", "credentials", "[", "0", "]", "assert", "creds", ".", "auth_provider_type", "==", "\"homeassistant\"", "assert", "creds", ".", "auth_provider_id", "is", "None", "assert", "creds", ".", "data", "==", "{", "\"username\"", ":", "\"test-user\"", "}", "assert", "prov_ha", ".", "STORAGE_KEY", "in", "hass_storage", "entry", "=", "hass_storage", "[", "prov_ha", ".", "STORAGE_KEY", "]", "[", "\"data\"", "]", "[", "\"users\"", "]", "[", "0", "]", "assert", "entry", "[", "\"username\"", "]", "==", "\"test-user\"" ]
[ 114, 0 ]
[ 140, 43 ]
python
en
['en', 'en', 'en']
True
test_create_auth_duplicate_username
(hass, hass_ws_client, hass_storage)
Test we can't create auth with a duplicate username.
Test we can't create auth with a duplicate username.
async def test_create_auth_duplicate_username(hass, hass_ws_client, hass_storage): """Test we can't create auth with a duplicate username.""" client = await hass_ws_client(hass) user = MockUser().add_to_hass(hass) hass_storage[prov_ha.STORAGE_KEY] = { "version": 1, "data": {"users": [{"username": "test-user"}]}, } await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/create", "user_id": user.id, "username": "test-user", "password": "test-pass", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "username_exists"
[ "async", "def", "test_create_auth_duplicate_username", "(", "hass", ",", "hass_ws_client", ",", "hass_storage", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "user", "=", "MockUser", "(", ")", ".", "add_to_hass", "(", "hass", ")", "hass_storage", "[", "prov_ha", ".", "STORAGE_KEY", "]", "=", "{", "\"version\"", ":", "1", ",", "\"data\"", ":", "{", "\"users\"", ":", "[", "{", "\"username\"", ":", "\"test-user\"", "}", "]", "}", ",", "}", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/create\"", ",", "\"user_id\"", ":", "user", ".", "id", ",", "\"username\"", ":", "\"test-user\"", ",", "\"password\"", ":", "\"test-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"username_exists\"" ]
[ 143, 0 ]
[ 165, 55 ]
python
en
['en', 'en', 'en']
True
test_delete_removes_just_auth
(hass_ws_client, hass, hass_storage)
Test deleting an auth without being connected to a user.
Test deleting an auth without being connected to a user.
async def test_delete_removes_just_auth(hass_ws_client, hass, hass_storage): """Test deleting an auth without being connected to a user.""" client = await hass_ws_client(hass) hass_storage[prov_ha.STORAGE_KEY] = { "version": 1, "data": {"users": [{"username": "test-user"}]}, } await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/delete", "username": "test-user", } ) result = await client.receive_json() assert result["success"], result assert len(hass_storage[prov_ha.STORAGE_KEY]["data"]["users"]) == 0
[ "async", "def", "test_delete_removes_just_auth", "(", "hass_ws_client", ",", "hass", ",", "hass_storage", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "hass_storage", "[", "prov_ha", ".", "STORAGE_KEY", "]", "=", "{", "\"version\"", ":", "1", ",", "\"data\"", ":", "{", "\"users\"", ":", "[", "{", "\"username\"", ":", "\"test-user\"", "}", "]", "}", ",", "}", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/delete\"", ",", "\"username\"", ":", "\"test-user\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "result", "[", "\"success\"", "]", ",", "result", "assert", "len", "(", "hass_storage", "[", "prov_ha", ".", "STORAGE_KEY", "]", "[", "\"data\"", "]", "[", "\"users\"", "]", ")", "==", "0" ]
[ 168, 0 ]
[ 187, 71 ]
python
en
['en', 'en', 'en']
True
test_delete_removes_credential
(hass, hass_ws_client, hass_storage)
Test deleting auth that is connected to a user.
Test deleting auth that is connected to a user.
async def test_delete_removes_credential(hass, hass_ws_client, hass_storage): """Test deleting auth that is connected to a user.""" client = await hass_ws_client(hass) user = MockUser().add_to_hass(hass) hass_storage[prov_ha.STORAGE_KEY] = { "version": 1, "data": {"users": [{"username": "test-user"}]}, } user.credentials.append( await hass.auth.auth_providers[0].async_get_or_create_credentials( {"username": "test-user"} ) ) await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/delete", "username": "test-user", } ) result = await client.receive_json() assert result["success"], result assert len(hass_storage[prov_ha.STORAGE_KEY]["data"]["users"]) == 0
[ "async", "def", "test_delete_removes_credential", "(", "hass", ",", "hass_ws_client", ",", "hass_storage", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "user", "=", "MockUser", "(", ")", ".", "add_to_hass", "(", "hass", ")", "hass_storage", "[", "prov_ha", ".", "STORAGE_KEY", "]", "=", "{", "\"version\"", ":", "1", ",", "\"data\"", ":", "{", "\"users\"", ":", "[", "{", "\"username\"", ":", "\"test-user\"", "}", "]", "}", ",", "}", "user", ".", "credentials", ".", "append", "(", "await", "hass", ".", "auth", ".", "auth_providers", "[", "0", "]", ".", "async_get_or_create_credentials", "(", "{", "\"username\"", ":", "\"test-user\"", "}", ")", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/delete\"", ",", "\"username\"", ":", "\"test-user\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "result", "[", "\"success\"", "]", ",", "result", "assert", "len", "(", "hass_storage", "[", "prov_ha", ".", "STORAGE_KEY", "]", "[", "\"data\"", "]", "[", "\"users\"", "]", ")", "==", "0" ]
[ 190, 0 ]
[ 216, 71 ]
python
en
['en', 'en', 'en']
True
test_delete_requires_admin
(hass, hass_ws_client, hass_read_only_access_token)
Test delete requires admin.
Test delete requires admin.
async def test_delete_requires_admin(hass, hass_ws_client, hass_read_only_access_token): """Test delete requires admin.""" client = await hass_ws_client(hass, hass_read_only_access_token) await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/delete", "username": "test-user", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "unauthorized"
[ "async", "def", "test_delete_requires_admin", "(", "hass", ",", "hass_ws_client", ",", "hass_read_only_access_token", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ",", "hass_read_only_access_token", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/delete\"", ",", "\"username\"", ":", "\"test-user\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"unauthorized\"" ]
[ 219, 0 ]
[ 233, 52 ]
python
en
['wa', 'la', 'en']
False
test_delete_unknown_auth
(hass, hass_ws_client)
Test trying to delete an unknown auth username.
Test trying to delete an unknown auth username.
async def test_delete_unknown_auth(hass, hass_ws_client): """Test trying to delete an unknown auth username.""" client = await hass_ws_client(hass) await client.send_json( { "id": 5, "type": "config/auth_provider/homeassistant/delete", "username": "test-user", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "auth_not_found"
[ "async", "def", "test_delete_unknown_auth", "(", "hass", ",", "hass_ws_client", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "5", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/delete\"", ",", "\"username\"", ":", "\"test-user\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"auth_not_found\"" ]
[ 236, 0 ]
[ 250, 54 ]
python
en
['en', 'en', 'en']
True
test_change_password
( hass, hass_ws_client, hass_admin_user, auth_provider, test_user_credential )
Test that change password succeeds with valid password.
Test that change password succeeds with valid password.
async def test_change_password( hass, hass_ws_client, hass_admin_user, auth_provider, test_user_credential ): """Test that change password succeeds with valid password.""" await hass.auth.async_link_user(hass_admin_user, test_user_credential) client = await hass_ws_client(hass) await client.send_json( { "id": 6, "type": "config/auth_provider/homeassistant/change_password", "current_password": "test-pass", "new_password": "new-pass", } ) result = await client.receive_json() assert result["success"], result await auth_provider.async_validate_login("test-user", "new-pass")
[ "async", "def", "test_change_password", "(", "hass", ",", "hass_ws_client", ",", "hass_admin_user", ",", "auth_provider", ",", "test_user_credential", ")", ":", "await", "hass", ".", "auth", ".", "async_link_user", "(", "hass_admin_user", ",", "test_user_credential", ")", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "6", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/change_password\"", ",", "\"current_password\"", ":", "\"test-pass\"", ",", "\"new_password\"", ":", "\"new-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "result", "[", "\"success\"", "]", ",", "result", "await", "auth_provider", ".", "async_validate_login", "(", "\"test-user\"", ",", "\"new-pass\"", ")" ]
[ 253, 0 ]
[ 271, 69 ]
python
en
['en', 'nl', 'en']
True
test_change_password_wrong_pw
( hass, hass_ws_client, hass_admin_user, auth_provider, test_user_credential )
Test that change password fails with invalid password.
Test that change password fails with invalid password.
async def test_change_password_wrong_pw( hass, hass_ws_client, hass_admin_user, auth_provider, test_user_credential ): """Test that change password fails with invalid password.""" await hass.auth.async_link_user(hass_admin_user, test_user_credential) client = await hass_ws_client(hass) await client.send_json( { "id": 6, "type": "config/auth_provider/homeassistant/change_password", "current_password": "wrong-pass", "new_password": "new-pass", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "invalid_password" with pytest.raises(prov_ha.InvalidAuth): await auth_provider.async_validate_login("test-user", "new-pass")
[ "async", "def", "test_change_password_wrong_pw", "(", "hass", ",", "hass_ws_client", ",", "hass_admin_user", ",", "auth_provider", ",", "test_user_credential", ")", ":", "await", "hass", ".", "auth", ".", "async_link_user", "(", "hass_admin_user", ",", "test_user_credential", ")", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "6", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/change_password\"", ",", "\"current_password\"", ":", "\"wrong-pass\"", ",", "\"new_password\"", ":", "\"new-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"invalid_password\"", "with", "pytest", ".", "raises", "(", "prov_ha", ".", "InvalidAuth", ")", ":", "await", "auth_provider", ".", "async_validate_login", "(", "\"test-user\"", ",", "\"new-pass\"", ")" ]
[ 274, 0 ]
[ 294, 73 ]
python
en
['en', 'en', 'en']
True
test_change_password_no_creds
(hass, hass_ws_client)
Test that change password fails with no credentials.
Test that change password fails with no credentials.
async def test_change_password_no_creds(hass, hass_ws_client): """Test that change password fails with no credentials.""" client = await hass_ws_client(hass) await client.send_json( { "id": 6, "type": "config/auth_provider/homeassistant/change_password", "current_password": "test-pass", "new_password": "new-pass", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "credentials_not_found"
[ "async", "def", "test_change_password_no_creds", "(", "hass", ",", "hass_ws_client", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "6", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/change_password\"", ",", "\"current_password\"", ":", "\"test-pass\"", ",", "\"new_password\"", ":", "\"new-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"credentials_not_found\"" ]
[ 297, 0 ]
[ 312, 61 ]
python
en
['en', 'en', 'en']
True
test_admin_change_password_not_owner
( hass, hass_ws_client, auth_provider, test_user_credential )
Test that change password fails when not owner.
Test that change password fails when not owner.
async def test_admin_change_password_not_owner( hass, hass_ws_client, auth_provider, test_user_credential ): """Test that change password fails when not owner.""" client = await hass_ws_client(hass) await client.send_json( { "id": 6, "type": "config/auth_provider/homeassistant/admin_change_password", "user_id": "test-user", "password": "new-pass", } ) result = await client.receive_json() assert not result["success"], result assert result["error"]["code"] == "unauthorized" # Validate old login still works await auth_provider.async_validate_login("test-user", "test-pass")
[ "async", "def", "test_admin_change_password_not_owner", "(", "hass", ",", "hass_ws_client", ",", "auth_provider", ",", "test_user_credential", ")", ":", "client", "=", "await", "hass_ws_client", "(", "hass", ")", "await", "client", ".", "send_json", "(", "{", "\"id\"", ":", "6", ",", "\"type\"", ":", "\"config/auth_provider/homeassistant/admin_change_password\"", ",", "\"user_id\"", ":", "\"test-user\"", ",", "\"password\"", ":", "\"new-pass\"", ",", "}", ")", "result", "=", "await", "client", ".", "receive_json", "(", ")", "assert", "not", "result", "[", "\"success\"", "]", ",", "result", "assert", "result", "[", "\"error\"", "]", "[", "\"code\"", "]", "==", "\"unauthorized\"", "# Validate old login still works", "await", "auth_provider", ".", "async_validate_login", "(", "\"test-user\"", ",", "\"test-pass\"", ")" ]
[ 315, 0 ]
[ 335, 70 ]
python
en
['en', 'en', 'en']
True